diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..cc79e32c --- /dev/null +++ b/Dockerfile @@ -0,0 +1,38 @@ +FROM registry.cn-hangzhou.aliyuncs.com/prince/alpine-golang:1.11.5 as builder +MAINTAINER prince <8923052@qq.com> +ARG VERSION=1.1.7 +RUN set -xe; \ + apk update; \ + apk add --no-cache --virtual .build-deps \ + git; \ + cd /go/src/; \ + git clone https://github.com/sjqzhang/go-fastdfs.git; \ + cd go-fastdfs; \ + go get; \ + CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o fileserver; \ + ls -lh .; +FROM registry.cn-hangzhou.aliyuncs.com/prince/alpine-bash + +COPY --from=builder /go/src/go-fastdfs/fileserver / + +ENV INSTALL_DIR /usr/local/go-fastdfs + +ENV PATH $PATH:$INSTALL_DIR/ + +ENV GO_FASTDFS_DIR $INSTALL_DIR/data + +RUN set -xe; \ + mkdir -p $GO_FASTDFS_DIR; \ + mkdir -p $GO_FASTDFS_DIR/conf; \ + mkdir -p $GO_FASTDFS_DIR/data; \ + mkdir -p $GO_FASTDFS_DIR/files; \ + mkdir -p $GO_FASTDFS_DIR/log; \ + mkdir -p $INSTALL_DIR; \ + mv /fileserver $INSTALL_DIR/; \ + chmod +x $INSTALL_DIR/fileserver; + +WORKDIR $INSTALL_DIR + +VOLUME $GO_FASTDFS_DIR + +CMD ["fileserver" , "${OPTS}"] \ No newline at end of file diff --git a/README-en.md b/README-en.md new file mode 100644 index 00000000..ffa510a9 --- /dev/null +++ b/README-en.md @@ -0,0 +1,540 @@ + +# [中文](README-en.md) [English](README.md) + +![logo](doc/logo.png) + + +# go-fastdfs是一个基于http协议的分布式文件系统,它基于大道至简的设计理念,一切从简设计,使得它的运维及扩展变得更加简单,它具有高性能、高可靠、无中心、免维护等优点。 + +### 大家担心的是这么简单的文件系统,靠不靠谱,可不可以用于生产环境?答案是肯定的,正因为简单所以高效,因为简单所以稳定。如果你担心功能,那就跑单元测试,如果担心性能,那就跑压力测试,项目都自带了,跑一跑更放心^_^。 + +注意:使用前请认真阅读完本文,特别是[wiki](https://github.com/sjqzhang/go-fastdfs/wiki) + +- 支持curl命令上传 +- 支持浏览器上传 +- 支持HTTP下载 +- 支持多机自动同步 +- 支持断点下载 +- 支持配置自动生成 +- 支持小文件自动合并(减少inode占用) +- 支持秒传 +- 支持跨域访问 +- 支持一键迁移 +- 支持并行体验 +- 支持断点续传([tus](https://tus.io/)) +- 支持docker部署 +- 支持自监控告警 +- 支持图片缩放 +- 支持google认证码 +- 支持自定义认证 +- 支持集群文件信息查看 +- 使用通用HTTP协议 +- 无需专用客户端(支持wget,curl等工具) +- 类fastdfs +- 高性能 (使用leveldb作为kv库) +- 高可靠(设计极其简单,使用成熟组件) +- 无中心设计(所有节点都可以同时读写) + +# 优点 + +- 无依赖(单一文件) +- 自动同步 +- 失败自动修复 +- 按天分目录方便维护 +- 支持不同的场景 +- 文件自动去重 +- 支持目录自定义 +- 支持保留原文件名 +- 支持自动生成唯一文件名 +- 支持浏览器上传 +- 支持查看集群文件信息 +- 支持集群监控邮件告警 +- 支持小文件自动合并(减少inode占用) +- 支持秒传 +- 支持图片缩放 +- 支持google认证码 +- 支持自定义认证 +- 支持跨域访问 +- 极低资源开销 +- 支持断点续传([tus](https://tus.io/)) +- 支持docker部署 +- 支持一键迁移(从其他系统文件系统迁移过来) +- 支持并行体验(与现有的文件系统并行体验,确认OK再一键迁移) +- 支持token下载 token=md5(file_md5+timestamp) +- 运维简单,只有一个角色(不像fastdfs有三个角色Tracker Server,Storage Server,Client),配置自动生成 +- 每个节点对等(简化运维) +- 所有节点都可以同时读写 + + + +# 启动服务器(已编译,[下载](https://github.com/sjqzhang/fastdfs/releases)极速体验,只需一分钟) +一键安装:(请将以下命令复制到linux console中执行) +```shell +wget --no-check-certificate https://github.com/sjqzhang/go-fastdfs/releases/download/v1.2.6/fileserver -O fileserver && chmod +x fileserver && ./fileserver +``` +(注意:下载时要注意链接的版本号,windows下直接运行fileserver.exe,执行文件在这里[下载](https://github.com/sjqzhang/fastdfs/releases)) + +# 命令上传 + +`curl -F file=@http-index-fs http://10.1.xx.60:8080/upload` + + +# WEB上传(浏览器打开) + +`http://yourserver ip:8080/upload.html` 注意:不要使用127.0.0.1上传 + +# 代码上传(选项参阅浏览器上传) + +## python版本: +```python +import requests +url = 'http://10.1.5.9:8080/upload' +files = {'file': open('report.xls', 'rb')} +options={'output':'json','path':'','scene':''} #参阅浏览器上传的选项 +r = requests.post(url,data=options, files=files) +print(r.text) +``` +## golang版本 +```go +package main + +import ( + "fmt" + "github.com/astaxie/beego/httplib" +) + +func main() { + var obj interface{} + req:=httplib.Post("http://10.1.5.9:8080/upload") + req.PostFile("file","filename")//注意不是全路径 + req.Param("output","json") + req.Param("scene","") + req.Param("path","") + req.ToJSON(&obj) + fmt.Print(obj) +} +``` +## java版本 +依赖(这里使用了hutool工具包,更简便) +```xml + + cn.hutool + hutool-all + 4.4.3 + +``` +上传代码 +```java +public static void main(String[] args) { + //文件地址 + File file = new File("D:\\git\\2.jpg"); + //声明参数集合 + HashMap paramMap = new HashMap<>(); + //文件 + paramMap.put("file", file); + //输出 + paramMap.put("output","json"); + //自定义路径 + paramMap.put("path","image"); + //场景 + paramMap.put("scene","image"); + //上传 + String result= HttpUtil.post("http://xxxxx:xxxx/upload", paramMap); + //输出json结果 + System.out.println(result); +} +``` +[更多语言请参考](doc/upload.md) + +# 断点续传示例 + +## golang版本 +```go +package main + +import ( + "os" + "fmt" + "github.com/eventials/go-tus" +) + +func main() { + f, err := os.Open("100m") + if err != nil { + panic(err) + } + defer f.Close() + // create the tus client. + client, err := tus.NewClient("http://10.1.5.9:8080/big/upload/", nil) + fmt.Println(err) + // create an upload from a file. + upload, err := tus.NewUploadFromFile(f) + fmt.Println(err) + // create the uploader. + uploader, err := client.CreateUpload(upload) + fmt.Println(err) + // start the uploading process. + fmt.Println( uploader.Upload()) + +} + +```` +[更多客户端请参考](https://github.com/tus) + + + +部署图 +![部署图](doc/go-fastdfs-deploy.png) + +通用文件认证时序图 +![通用文件认证时序图](doc/authentication2.png) + +文件google认证时序图 +![文件认证时序图](doc/authentication.png) + +# 有问题请[点击反馈](https://github.com/sjqzhang/go-fastdfs/issues/new) + +# 重要说明 +## 在issue中有很多实际使用的问题及回答(很多已关闭,请查看已关闭的issue) + +## 项目从v1.1.8开始进入稳定状态 + +# 更新说明 +## 从低版升给到高版本,可能存在配置项变动的情况,一定要注意使用新的版本时的配置项。如何获得新版本的配置项及说明?先备份旧的配置项(文件名不能为cfg.json),再运行新的版本,配置项就自动生成。然后再修改相应的配置项。 + +- v1.1.9 增加文件自动迁移功能,支持同名文件重复覆盖选项。 + + # Q&A + +- 最佳实战? +``` +一、如果是海量存储,不要开启文件token认证功能,减少性能开消。 +二、尽量用标准上传,上传后业务保存path,在业务用的时候再并接上域名(方便迁移扩展等)。 +三、如果使用断点续传,上传后一定要用文件id置换成path存储(如何置换看QA/API文档),为后面访问减少性能开消。 +四、尽量使用物理服务器部署,因为主要压力或性能来自于IO +五、线上业务尽量使用nginx+gofastdfs部署架构(均衡算法使用ip_hash),以满足后面的功能扩展性(nginx+lua)。 +六、线上环境最好不要使用容器部署,容器适用于测试和功能验证。 +总结:业务保存的文件的path,减少后期访问路径转换带来开消,文件访问权限由业务来完成,这样性能最好,通用性强(可直接其它web服务器)。 + +重要提醒:如果开启小文件合并功能,后期是无法删除小文件的。 +上传结果说明 +请使用md5,path,scene字段,其它是为了兼容老的线上系统添加的,以后有可能去掉。 + +``` + +- 有API文档么? +[API文档](doc/api.md) + +- 有管理后台么? +``` +https://github.com/perfree/go-fastdfs-web +``` +- 断点上传有使用说明么? +``` +https://github.com/tus +``` + +- 在微信讨论群中大家都问到go-fastdfs性能怎样? +``` +由于问的人太多,在这里统一回答。 +go-fastdfs的文件定位与其它分布式系统不同,它的寻址是直接定位,不经过任何组件,所以可以近似时间复杂度为o(1)[文件路径定位] +基本没有性能损耗,项目中也附有压测脚本,大家可以自已进行压测,群里就不要太多讨论问题了,人多每次回复同样的问题 +大家也会觉得这群无聊。 +``` + + + +- 已经使用fastdfs存储的文件可以迁移到go fastdfs下么(其它迁移情况可按类似的方式处理,极速体验也用类似的方式)? +``` +答案是可以的,你担心的问题是路径改变,go fastdfs为你考虑了这一点 +步骤: + 一、下载最新版的go-fastdfs + 二、将原来的fastdfs文件目录复制到go-fastdfs的 files目录下(如果文件很多,可以逆向过来,将fileserver复制过去,但要保留fileserver的目录结构) + 三、将配置enable_migrate设为true + 注意:迁移过程中会扫描整下files目录下的所有文件, + 速度较慢,迁移完成后请将enable_migrate设为false + +说明:go-fastdfs的目录是不能变动的,与同步机制相关,很多同学在群里问题,我的files目录能不能自定义,答案是否定的。 +至于能不能用软链接的方式本人没有测试过,可以自行测试。 + +``` + +- 什么是集群,如何用Nginx管理多集群? +``` +1、在go-fastdfs中,一个集群就是一个group。 +2、请参阅部署图 +注意:配置中的 support_group_manage 参数设为true时,所有的url中都自动添加组信息。 +例如:http://10.1.5.9:8080/group/status +默认:http://10.1.5.9:8080/status +区别:多了group,对应配置中的 group 参数,这样主要是为了解决一个Nginx反向代理多个group(集群) +具体请参阅部署图 + +``` + + +- 如何搭建集群? +``` +一、先下载已编译的可执行文件(用最新版本) +二、运行可执行文件(生成配置) +三、修改配置 + peers:增加对端的http地址 + 检查: + host:自动生成是否正确 + peer_id:集群内是否唯一 +四、重新运行服器 +五、验证服务是否OK +``` + + +- 适合海量存储吗? +``` +答案:适合海量存储 +``` + +- 如何上传文件夹? +``` + DIR=log && ls $DIR |xargs -n 1 -I {} curl -s -F path=$DIR -F file=@$DIR/{} http://10.1.50.90:8080/upload + 上面命令的log为要上传的目录,如果代码上传就是简单的循环上传就OK。 +``` + +- 如何缩放图片? +``` +在下载url中加入width各height参数 +例如:http://127.0.0.1:8080/group1/haystack/5/124,0,27344,.jpg?download=0&width=100&height=100 +特明说明是:如果要等比例缩放,请将width或height设为0 +``` + +- 如何在浏览器中直接显示图片? +``` +在下载url中加入download=0参数 +例如:http://127.0.0.1:8080/group1/haystack/5/124,0,27344,.jpg?download=0 +``` + + +- 如何实现自定义认证上传下载? +``` +一、使用1.2.6版本以后的go-fastdfs +二、设auth_url参数(应用提供) +三、应用实现验证权限接口(即第二步的url),参数为 auth_toke 返回 ok 表示认证通过,其它为不通过 +四、认证通过后,可以上传或下载 +``` + + +- 还需要安装nginx么? +``` +go-fastdfs本身是一个高性能的web服务器,在开发或测试时,可以不用安装nginx, +但go-fastdfs的功能单一,如需要缓存或重定向或其它扩展,nginx都有成熟的组件 +所以建议线上还是加一层nginx,再借助nginx+lua解决扩展性问题。 +``` + +- 能动态加载配置么? +``` +答案:是可以的,但要更新到最新版本 +步骤: +1)修改 conf/cfg.json 文件 +2)访问 http://10.1.xx.60:8080/reload +3) 注意:每个节点都需要进行同样的操作 +``` + + +- 如何查看集群文件信息? +``` +http://10.1.xx.60:8080/stat + +如果出现文件统计出错怎么办? +请删除 data目录下的 stat.json文件 重启服务,请系统自动重新计算文件数。 + +或者调用 +http://10.1.xx.60:8080/repair_stat + +``` +- 可靠性怎样,能用于生产环境么? +``` +本项目已大规模用于生产环境,如担心不能满足 +可以在使用前对其各项特性进行压力测试,有任何 +问题可以直接提issue +``` + +- 如何后台运行程序? +``` +请使用control 对程序进行后面运行,具体操作如下: + 一、 chmod +x control + 二、 确保control与fileserver在同一个目录 + 三、 ./control start|stop|status #对和序进行启动,停止,查看状态等。 + +``` + + +- 能不能在一台机器部置多个服务端? +``` +不能,在设计之初就已考虑到集群的高可用问题,为了保证集群的真正可用,必须为不同的ip,ip 不能用 127.0.0.1 +错误 "peers": ["http://127.0.0.1:8080","http://127.0.0.1:8081","http://127.0.0.1:8082"] +正确 "peers": ["http://10.0.0.3:8080","http://10.0.0.4:8080","http://10.0.0.5:8080"] +``` +- 文件不同步了怎么办? +``` +正常情况下,集群会每小时自动同步修复文件。(性能较差,在海量情况下建议关闭自动修复) +那异常情况下怎么? +答案:手动同步(最好在低峰执行) +http://172.16.70.123:7080/sync?date=20190117&force=1 (说明:要在文件多的服务器上执行,相关于推送到别的服务器) +参数说明:date 表示同步那一天的数据 force 1.表示是否强制同步当天所有(性能差),0.表示只同步失败的文件 + +不同步的情况: +1) 原来运行N台,现在突然加入一台变成N+1台 +2)原来运行N台,某一台机器出现问题,变成N-1台 + +如果出现多天数据不一致怎么办?能一次同步所有吗? +答案是可以:(最好在低峰执行) +http://172.16.70.123:7080/repair?force=1 + +``` + +- 文件不同步会影响访问吗? +``` +答案:不会影响,会在访问不到时,自动修复不同步的文件。 +``` + +- 如何查看系统状态及说明? +``` +http://172.16.70.123:7080/status +注意:(Fs.Peers是不带本机的,如果带有可能出问题) +本机为 Fs.Local +sts["Fs.ErrorSetSize"] = this.errorset.Cardinality() 这个会导致内存增加 + +``` + + +- 如何编译(go1.9.2+)? +``` +git clone https://github.com/sjqzhang/go-fastdfs.git +cd go-fastdfs +mv vendor src +pwd=`pwd` +GOPATH=$pwd go build -o fileserver fileserver.go +``` + +- 如何跑单元测试 (尽量在linux下进行)? +``` + +git clone https://github.com/sjqzhang/go-fastdfs.git +cd go-fastdfs +mv vendor src +pwd=`pwd` +GOPATH=$pwd go test -v fileserver.go fileserver_test.go + +``` + + + +- 如何压测? +``` +步骤: +一、创建files文件夹 +二、将gen_file.py复制到files文件夹中,通过python gen_file.py 生成大量文件 +三、将benchmark.py放到 files目录外(即与files目录同一级),通过python benchmark.py进行压测(注意对benchmark.py中的ip进行修改) +先用gen_file.py产生大量文件(注意如果要生成大文件,自已在内容中乘上一个大的数即可) +例如: +# -*- coding: utf-8 -*- +import os +j=0 +for i in range(0,1000000): + if i%1000==0: + j=i + os.system('mkdir %s'%(i)) + with open('%s/%s.txt'%(j,i),'w+') as f: + f.write(str(i)*1024) +接着用benchmark.py进行压测 +也可以多机同时进行压测,所有节点都是可以同时读写的 +``` + +- 代码为什么写在一个文件中? +``` +一、目前的代码还非常简单,没必要弄得太复杂。 +二、个人理解模块化不是分开多个文件就表示模块化,大家可以用IDE去看一下代码结构,其实是已经模块化的。 +``` + +- 支持断点下载? +``` +答案:支持 +curl wget 如何 +wget -c http://10.1.5.9:8080/group1/default/20190128/16/10/2G +culr -C - http://10.1.5.9:8080/group1/default/20190128/16/10/2G +``` + +- Docker如何部署? +``` +步骤: +方式一、 + 一、构建镜像 + docker build . -t fastdfs + 二、运行容器(使用环境变量 GO_FASTDFS_DIR 指向存储目录。) + docker run --name fastdfs -v /data/fastdfs_data:/data -e GO_FASTDFS_DIR=/data fastdfs +方式二、 + 一、拉取镜像 + docker pull sjqzhang/go-fastdfs + 二、运行容器 + docker run --name fastdfs -v /data/fastdfs_data:/data -e GO_FASTDFS_DIR=/data fastdfs + +``` + +- 大文件如何分块上传或断点续传? +``` +一般的分块上传都要客户端支持,而语言的多样性,客户端难以维护,但分块上传的功能又有必要,为此提供一个简单的实现思路。 +方案一、 +借助linux split cat 实现分割与合并,具体查看 split 与 cat 帮助。 +分割: split -b 1M filename #按每个文1M +合并: cat x* > filename #合并 +方案二、 +借助hjsplit +http://www.hjsplit.org/ +具体自行实现 +方案三、 +建议用go实现hjsplit分割合并功,这样具有跨平台功能。(未实现,等你来....) +方案四、 +使用内置的继点续传功能(使用protocol for resumable uploads协议,[详情](https://tus.io/)) + 注意:方案四、只能指定一个上传服务器,不支持同时写,并且上传的url有变化 + 原上传url: http://10.1.5.9:8080//upload + 断点上传url: http://10.1.5.9:8080//big/upload/ + 上传完成,再通过秒传接口,获取文件信息 +``` + +- 如何秒传文件? +``` +通过http get的方式访问上传接口 +http://10.0.5.9:8080/upload?md5=filesum&output=json +参数说明: +md5=sum(file) 文件的摘要算法要与文件务器的算法一致(算法支持md5|sha1),如果是断点续传,可以使用文件的id,也就是urlolad后的id +output=json|text 返回的格式 + +``` + +- 集群如何规划及如何进行扩容? +``` +建议在前期规划时,尽量采购大容量的机器作为存储服务器,如果要两个副本就用两台组成一个集群,如果要三个副本 +就三台组成一个集群。(注意每台服务器最好配置保持一样,并且使用raid5磁盘阵列) + +如果提高可用性,只要在现在的集群peers中加入新的机器,再对集群进行修复即可。 +修复办法 http://172.16.70.123:7080/repair?force=1 (建议低峰变更) + +如何扩容? +为简单可靠起见,直接搭建一个新集群即可(搭建就是启动./fileserver进程,设置一下peers的IP地址,三五分钟的事) +issue中chengyuansen同学向我提议使用增加扩容特性,我觉得对代码逻辑及运维都增加复杂度,暂时没有加入这特性。 + +``` + + +- 访问限制问题 +``` +出于安全考虑,管理API只能在群集内部调用或者用127.0.0.1调用. +``` + + + + +- 有问题请[点击反馈](https://github.com/sjqzhang/go-fastdfs/issues/new) +## 有问题请加群 +![二维码](doc/wechat.jpg) + +#### 进群请改昵称,昵称格式:城市-公司-昵称,如果你喜欢这项目,请关注(star)此项目,关注是对项目的肯定,也是作者创新的动力。 + +#### [捐赠](doc/pay.png) diff --git a/README.md b/README.md index 77e39a33..711f3f2e 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,514 @@ -# FileServer -file Upload/Download server +# [中文](README-en.md) [English](README.md) + +![logo](doc/logo.png) + + +# go-fastdfs is a distributed file system based on http protocol. It is based on the design concept of avenue to simple. All the simple design makes its operation and expansion more simple. It has high performance, high reliability and no center. , maintenance-free and so on. + +### Everyone is worried about such a simple file system. Is it not reliable, can it be used in a production environment? The answer is yes, it is efficient because it is simple, and it is stable because it is simple. If you are worried about the function, then run the unit test, if you are worried about the performance, then run the stress test, the project comes with it, run more confident ^_^. + +Note: Please read this article carefully before using it, especially [wiki](https://github.com/sjqzhang/go-fastdfs/wiki) + +- Support curl command upload +- Support browser upload +- Support HTTP download +- Support multi-machine automatic synchronization +- Support breakpoint download +- Support configuration automatic generation +- Support small file automatic merge (reduce inode occupancy) +- Support for second pass +- Support for cross-domain access +- Support one-click migration +- Support for parallel experience +- Support for breakpoint resuming ([tus](https://tus.io/)) +- Support for docker deployment +- Support self-monitoring alarm +- Support image zoom +- Support google authentication code +- Support for custom authentication +- Support cluster file information viewing +- Use the universal HTTP protocol +- No need for a dedicated client (support wget, curl, etc.) +- class fastdfs +- High performance (using leveldb as a kv library) +- High reliability (design is extremely simple, using mature components) +- No center design (all nodes can read and write at the same time) + +# advantage + +- No dependencies (single file) +- Automatic synchronization +- Failure automatic repair +- Convenient maintenance by talent directory +- Support different scenarios +- Automatic file deduplication +- Support for directory customization +- Support to retain the original file name +- Support for automatic generation of unique file names +- Support browser upload +- Support for viewing cluster file information +- Support cluster monitoring email alarm +- Support small file automatic merge (reduce inode occupancy) +- Support for second pass +- Support image zoom +- Support google authentication code +- Support for custom authentication +- Support for cross-domain access +- Very low resource overhead +- Support for breakpoint resuming ([tus](https://tus.io/)) +- Support for docker deployment +- Support for one-click migration (migrated from other system file systems) +- Support for parallel experience (parallel experience with existing file system, confirm OK and then one-click migration) +- Support token download token=md5(file_md5+timestamp) +- Easy operation and maintenance, only one role (unlike fastdfs has three roles Tracker Server, Storage Server, Client), the configuration is automatically generated +- Peer-to-peer (simplified operation and maintenance) +- All nodes can read and write simultaneously + + + +#Start the server (compiled, [download](https://github.com/sjqzhang/fastdfs/releases) experience) +``` +./fileserver +``` + + +#Command upload + +`curl -F file=@http-index-fs http://10.1.xx.60:8080/upload` + + +# WEB upload (browser open) + +`http://127.0.0.1:8080` + +#Code upload (options see browser upload) +## python +```python +import requests +url = 'http://127.0.0.1:8080/upload' +files = {'file': open('report.xls', 'rb')} +options={'output':'json','path':'','scene':''} #See browser upload options +r = requests.post(url, files=files) +print(r.text) +``` +## golang +```go +package main + +import ( + "fmt" + "github.com/astaxie/beego/httplib" +) + +func main() { + var obj interface{} + req:=httplib.Post("http://10.1.5.9:8080/upload") + req.PostFile("file","path/to/file") + req.Param("output","json") + req.Param("scene","") + req.Param("path","") + req.ToJSON(&obj) + fmt.Print(obj) +} +```` + +## java + +```xml + + cn.hutool + hutool-all + 4.4.3 + +``` + +```java +public static void main(String[] args) { + File file = new File("D:\\git\\2.jpg"); + HashMap paramMap = new HashMap<>(); + paramMap.put("file", file); + paramMap.put("output","json"); + paramMap.put("path","image"); + paramMap.put("scene","image"); + String result= HttpUtil.post("http://xxxxx:xxxx/upload", paramMap); + System.out.println(result); +} +``` + +# Breakpoint resume example +## golang +```go +package main + +import ( + "os" + "fmt" + "github.com/eventials/go-tus" +) + +func main() { + f, err := os.Open("100m") + if err != nil { + panic(err) + } + defer f.Close() + // create the tus client. + client, err := tus.NewClient("http://10.1.5.9:8080/big/upload/", nil) + fmt.Println(err) + // create an upload from a file. + upload, err := tus.NewUploadFromFile(f) + fmt.Println(err) + // create the uploader. + uploader, err := client.CreateUpload(upload) + fmt.Println(err) + // start the uploading process. + fmt.Println( uploader.Upload()) + +} +```` +[more langue](doc/upload.md) + +![deploy](doc/go-fastdfs-deploy.png) + +Universal file authentication timing diagram +![Universal file authentication timing diagram](doc/authentication2.png) + +File google authentication timing diagram +![File google authentication timing diagram](doc/authentication.png) + +# Please click [Feedback](https://github.com/sjqzhang/go-fastdfs/issues/new) + + + # Q&A + +- Best practice? +``` +First, if it is mass storage, do not open the file token authentication function to reduce performance. +Second, try to use the standard upload, upload the business to save the path, and then connect the domain name when the business is used (convenient migration extension, etc.). +Third, if you use breakpoints to continue transmission, you must use the file id to replace the path storage after uploading (how to replace the QA/API document), to reduce performance for subsequent access. +Fourth, try to use physical server deployment, because the main pressure or performance comes from IO +Fifth, the online business should use the nginx+gofastdfs deployment architecture (the equalization algorithm uses ip_hash) to meet the later functional scalability (nginx+lua). +Sixth, the online environment is best not to use container deployment, the container is suitable for testing and functional verification. +Summary: The path of the file saved by the business reduces the conversion of the later access path, and the file access permission is completed by the service, so that the performance is the best and the versatility is strong (can be directly connected to other web servers). + +Important reminder: If the small file merge function is enabled, it is impossible to delete small files later. +Upload result description +Please use md5, path, scene field, others are added to be compatible with the old online system, and may be removed in the future. + +``` + +- Is there an API document? +[API documentation] (doc/api.md) + +- Is there a management background? +``` +Https://github.com/perfree/go-fastdfs-web +``` +- Is there any instructions for using the breakpoint upload? +``` +Https://github.com/tus +``` + +- In the WeChat discussion group, everyone asked about the performance of go-fastdfs? +``` +Because there are too many people asking, answer here in unison. +The file location of go-fastdfs is different from other distributed systems. Its addressing is directly located without any components, so the approximate time complexity is o(1)[file path location] +There is basically no performance loss. The project also has a pressure test script. You can carry out the pressure test yourself. Don’t discuss the problem too much in the group. People reply to the same question every time. +Everyone will also feel that this group is boring. +``` + + + +- Files that have been stored using fastdfs can be migrated to go fastdfs (other migrations can be handled in a similar manner, and the speed experience is similar)? +``` +The answer is yes, the problem you are worried about is the path change, go fastdfs considers this for you. +step: +First, download the latest version of go-fastdfs +Second, copy the original fastdfs file directory to the files directory of go-fastdfs (if there are a lot of files, you can reverse it, copy the fileserver, but keep the fileserver directory structure) +Third, set the enable_migrate to true +Note: All files in the files directory will be scanned during the migration process. +Slower, set enable_migrate to false after migration is complete + +Note: The directory of go-fastdfs can not be changed, related to the synchronization mechanism, many students in the group, my files directory can not be customized, the answer is no. +As for whether or not I can use the soft link, I have not tested it and can test it myself. + +``` + +- What is a cluster, how to manage multiple clusters with Nginx? +``` +1. In go-fastdfs, a cluster is a group. +2, please refer to the deployment diagram +Note: When the support_group_manage parameter in the configuration is set to true, group information is automatically added to all urls. +For example: http://10.1.5.9:8080/group/status +Default: http://10.1.5.9:8080/status +The difference: more group, corresponding to the group parameter in the configuration, so mainly to solve a Nginx reverse proxy multiple groups (cluster) +Please refer to the deployment diagram for details. + +``` + + +- How to build a cluster? +``` +First, download the compiled executable file (with the latest version) +Second, run the executable file (generate configuration) +Third, modify the configuration +Peers: increase the peer's http address + an examination: +Host: Is the automatic generation correct? +Peer_id: Is it unique within the cluster? +Fourth, re-run the server +5. Is the verification service OK? +``` + + +- Is it suitable for mass storage? +``` +Answer: Suitable for mass storage +``` + +- How to upload a folder? +``` + DIR=log && ls $DIR |xargs -n 1 -I {} curl -s -F path=$DIR -F file=@$DIR/{} http://10.1.50.90:8080/upload + The log of the above command is the directory to be uploaded. If the code upload is a simple loop upload, it will be OK. +``` + +- How to scale the picture? +``` +Add width parameter in the download url +For example: http://127.0.0.1:8080/group1/haystack/5/124,0,27344,.jpg?download=0&width=100&height=100 +The special explanation is: if you want to scale, please set the width or height to 0. +``` + +- How to display images directly in the browser? +``` +Add the download=0 parameter to the download url. +For example: http://127.0.0.1:8080/group1/haystack/5/124,0,27344,.jpg?download=0 +``` + + +- How to implement custom authentication upload and download? +``` +First, the use of the 1.2.6 version of the go-fastdfs +Second, set the auth_url parameter (provided by the application) +Third, the application implements the authentication permission interface (that is, the url of the second step), the parameter is auth_toke, ok, the authentication is passed, and the others are not passed. +Fourth, after the certification is passed, you can upload or download +``` + + +- Need to install nginx yet? +``` +Go-fastdfs itself is a high-performance web server, you don't need to install nginx when developing or testing. +But go-fastdfs has a single function, such as caching or redirection or other extensions, nginx has mature components +Therefore, it is recommended to add a layer of nginx on the line, and then use nginx+lua to solve the scalability problem. +``` + +- Can I dynamically load the configuration? +``` +Answer: Yes, but update to the latest version +step: +1) Modify the conf/cfg.json file +2) Visit http://10.1.xx.60:8080/reload +3) Note: Every node needs to do the same thing +``` + + +- How to view cluster file information? +``` +Http://10.1.xx.60:8080/stat + +What should I do if there is a file error? +Please delete the stat.json file in the data directory. Restart the service, please recalculate the number of files automatically. + +Or call +Http://10.1.xx.60:8080/repair_stat + +``` +- How reliable can it be used in a production environment? +``` +This project has been used on a large scale in the production environment, such as fear of not meeting +You can stress test its features before use, any +The problem can be directly mentioned +``` + +- How to run the program in the background? +``` +Please use control to run the program later, as follows: + First, chmod +x control + Second, make sure the control and fileserver are in the same directory + Third, ./control start|stop|status #Start and start the sequence, stop, view the status, and so on. + +``` + + +- Can I have multiple servers on one machine? +``` +No, the high availability of the cluster has been considered at the beginning of the design. In order to ensure the true availability of the cluster, it must be different for ip, ip can not use 127.0.0.1 +Error "peers": ["http://127.0.0.1:8080","http://127.0.0.1:8081","http://127.0.0.1:8082"] +Correct "peers": ["http://10.0.0.3:8080","http://10.0.0.4:8080","http://10.0.0.5:8080"] +``` +- What should I do if the files are not synchronized? +``` +Under normal circumstances, the cluster automatically synchronizes the repair files every hour. (The performance is poor, it is recommended to turn off automatic repair in case of massive) +What about the abnormal situation? +Answer: Manual synchronization (preferably at low peaks) +Http://172.16.70.123:7080/sync?date=20190117&force=1 (Note: To be executed on a server with many files, related to pushing to another server) +Parameter description: date indicates the data of the day of synchronization. force 1. indicates whether to force synchronization of all the day (poor performance), 0. means that only failed files are synchronized. + +Unsynchronized situation: +1) Originally running N sets, now suddenly join one to become N+1 +2) Originally running N sets, one machine has a problem and becomes N-1 + +What if there is a multi-day data inconsistency? Can I synchronize all at once? +The answer is yes: (preferably at low peaks) +Http://172.16.70.123:7080/repair?force=1 + +``` + +- Does the file out of sync affect access? +``` +Answer: It will not affect, it will automatically repair the files that are not synchronized when the access is not available. +``` + +- How do I check the system status and description? +``` +Http://172.16.70.123:7080/status +Note: (Fs.Peers is not equipped with this unit, if there is a problem with the problem) +This machine is Fs.Local +Sts["Fs.ErrorSetSize"] = this.errorset.Cardinality() This will cause memory to increase + +``` + + +- How to compile (go1.9.2+)? +``` +Git clone https://github.com/sjqzhang/go-fastdfs.git +Cd go-fastdfs +Mv vendor src +Pwd=`pwd` +GOPATH=$pwd go build -o fileserver fileserver.go +``` + +- How to run a unit test (try to do it under linux)? +``` + +Git clone https://github.com/sjqzhang/go-fastdfs.git +Cd go-fastdfs +Mv vendor src +Pwd=`pwd` +GOPATH=$pwd go test -v fileserver.go fileserver_test.go + +``` + + + +- How to test? +``` +step: +First, create a files folder +Second, copy gen_file.py to the files folder, generate a large number of files through python gen_file.py +Third, put benchmark.py outside the files directory (that is, the same level as the files directory), press the python benchmark.py (note the ip in the benchmark.py) +First use gen_file.py to generate a large number of files (note that if you want to generate large files, you can multiply the content by a large number) +E.g: +# -*- coding: utf-8 -*- +Import os +j=0 +For i in range(0,1000000): + If i%1000==0: + j=i + Os.system('mkdir %s'%(i)) + With open('%s/%s.txt'%(j,i),'w+') as f: + F.write(str(i)*1024) +Then use benchmark.py for pressure measurement +It is also possible to perform pressure measurement simultaneously in multiple machines, and all nodes can be read and written simultaneously. +``` + +- Why is the code written in a file? +``` +First, the current code is still very simple, no need to make it too complicated. +Second, the individual understands that modularity is not modular when multiple files are separated. You can use the IDE to look at the code structure, which is actually modular. +``` + +- Support for breakpoint downloads? +``` +Answer: Support +Curl wget how +Wget -c http://10.1.5.9:8080/group1/default/20190128/16/10/2G +Culr -C - http://10.1.5.9:8080/group1/default/20190128/16/10/2G +``` + +- How is Docker deployed? +``` +step: +method one, + First, build a mirror + Docker build . -t fastdfs + Second, run the container (use the environment variable GO_FASTDFS_DIR to point to the storage directory.) + Docker run --name fastdfs -v /data/fastdfs_data:/data -e GO_FASTDFS_DIR=/data fastdfs +Method 2, + First, pull the mirror + Docker pull sjqzhang/go-fastdfs + Second, run the container + Docker run --name fastdfs -v /data/fastdfs_data:/data -e GO_FASTDFS_DIR=/data fastdfs + +``` + +- How to upload large files or breakpoints? +``` +The general block uploading must be supported by the client, and the diversity of the language is difficult for the client to maintain, but the function of the block uploading is necessary, and a simple implementation idea is provided for this. +Option One, +Split and merge with linux split cat, see the split and cat help. +Split: split -b 1M filename #1M per text +Merge: cat x* > filename #merge +Option II, +With hjsplit +Http://www.hjsplit.org/ +Specific self-realization +third solution, +It is recommended to implement the hjsplit split merge function with go, which has cross-platform capabilities. (Unrealized, waiting for you to come....) +Option 4 +Use the built-in relay function (using the protocol for resumable uploads protocol, [Details] (https://tus.io/)) + Note: Option 4, you can only specify one upload server, do not support simultaneous writes, and the uploaded url has changed. + Original upload url: http://10.1.5.9:8080//upload + Breakpoint upload url: http://10.1.5.9:8080//big/upload/ + The upload is completed, and then the second pass interface is used to obtain the file information. +``` + +- How to pass files in seconds? +``` +Access the upload interface by means of http get +Http://10.0.5.9:8080/upload?md5=filesum&output=json +Parameter Description: +Md5=sum(file) The digest algorithm of the file should be consistent with the algorithm of the server (the algorithm supports md5|sha1). If it is a breakpoint, you can use the id of the file, which is the id after urlolad. +Output=json|text The format returned + +``` + +- How is the cluster planned and how is it expanded? +``` +It is recommended that in the early planning, try to purchase a large-capacity machine as a storage server. If you want two copies, use two to form a cluster. If you want three copies. +Let the three units form a cluster. (Note that the best configuration for each server remains the same and uses raid5 disk arrays) + +If you increase the availability, just add a new machine to the current cluster peers and fix the cluster. +Repair method http://172.16.70.123:7080/repair?force=1 (recommended low peak change) + +How to expand? +For the sake of simplicity and reliability, you can directly build a new cluster (build is to start the ./fileserver process, set the IP address of the peers, three or five minutes) +In the issue, chengyuansen suggested to me to use the increased capacity expansion feature. I feel that the complexity of the code logic and operation and maintenance is increased. I have not added this feature for the time being. + +``` + + +- Access restriction issues +``` +For security reasons, the management API can only be called inside the cluster or with 127.0.0.1. +``` + + + + +- If you have any questions, please click [Reply] (https://github.com/sjqzhang/go-fastdfs/issues/new) +## Have questions? +![QR code](doc/wechat.jpg) + + +#### [donation] (doc/pay.png) \ No newline at end of file diff --git a/benchmark.py b/benchmark.py new file mode 100644 index 00000000..a4c59d20 --- /dev/null +++ b/benchmark.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +import requests + +import gevent + + +from Queue import Queue + +q=Queue() + +import commands + +from gevent import monkey + +monkey.patch_all() +import os +out=commands.getoutput('find ./files -type f') + +lines=out.split("\n") + +for i in lines: + q.put(i) + + +def task(): + while True: + name=q.get(block=False) + if name=="": + break + url = 'http://10.1.5.20:8080/upload' + files = {'file': open(name, 'rb')} + options = {'output': 'json', 'path': '', 'scene': ''} + try: + r = requests.post(url, files=files) + except Exception as er: + print(er) + +th=[] +for i in range(200): + th.append(gevent.spawn(task)) +gevent.joinall(th) + diff --git a/control b/control new file mode 100644 index 00000000..71ca19d3 --- /dev/null +++ b/control @@ -0,0 +1,108 @@ +#!/bin/bash + +WORKSPACE=$(cd $(dirname $0)/; pwd) +cd $WORKSPACE + +mkdir -p log conf + +module= +app=fileserver +conf=conf/cfg.json +pidfile=conf/app.pid +logfile=log/app.log + +function check_pid() { + if [ -f $pidfile ];then + pid=`cat $pidfile` + if [ -n $pid ]; then + running=`ps -p $pid|grep -v "PID TTY" |wc -l` + return $running + fi + fi + return 0 +} + +function start() { + check_pid + running=$? + if [ $running -gt 0 ];then + echo -n "$app now is running already, pid=" + cat $pidfile + return 1 + fi + + nohup ./$app &> $logfile & + echo $! > $pidfile + echo "$app started..., pid=$!" +} + +function stop() { + pid=`cat $pidfile` + kill $pid + echo "$app stoped..." +} + +function restart() { + stop + sleep 1 + start +} + +function status() { + check_pid + running=$? + if [ $running -gt 0 ];then + echo -n "$app now is running, pid=" + cat $pidfile + else + echo "$app is stoped" + fi +} + +function tailf() { + tail -f $logfile +} + +function build() { + go build + if [ $? -ne 0 ]; then + exit $? + fi + mv $module $app + ./$app -v | grep -v "config" +} + +function pack() { + build + git log -1 --pretty=%h > gitversion + version=`./$app -v|grep -v config` + file_list="control cfg.example.json $app" + tar zcf $app-$version.tar.gz gitversion $file_list +} + +function packbin() { + build + git log -1 --pretty=%h > gitversion + version=`./$app -v|grep -v config` + tar zcvf $app-bin-$version.tar.gz $app gitversion +} + +function help() { + echo "$0 start|stop|restart|status|tail" +} + +if [ "$1" == "" ]; then + help +elif [ "$1" == "stop" ];then + stop +elif [ "$1" == "start" ];then + start +elif [ "$1" == "restart" ];then + restart +elif [ "$1" == "status" ];then + status +elif [ "$1" == "tail" ];then + tailf +else + help +fi diff --git a/doc/api.md b/doc/api.md new file mode 100644 index 00000000..1d38a19b --- /dev/null +++ b/doc/api.md @@ -0,0 +1,93 @@ +## API通用说明 +``` +一、统一使用POST请求 +二、返回格式统一为json +  格式如下 + { + "status":"ok", + "message":"", + "data":{} + } +二、url中的group只有在support_group_manage设置为true才有。 + 例如: + http://10.1.5.9:8080/group/reload + 默认: + http://10.1.5.9:8080/reload + 说明:url中的group为cfg.json中的group参数值。 + +``` + +## 配置管理API +``` +http://10.1.5.9:8080/group/reload + +参数: +action: set(修改参数),get获取参数,reload重新加载参数 +cfg:json参数 与 action=set配合完成参数设置 + +``` + +## 文件统计信息API +``` +http://10.1.50.90:8080/group/stat + +``` + +## 文件上传API +``` +http://10.1.50.90:8080/group/upload +参数: +file:上传的文件 +scene:场景 +output:输出 +path:自定义路径 +具体请参阅示例代码 +``` + +## 文件秒传 +``` +http://10.1.50.90:8080/group/upload +参数: +md5:文件的摘要 +摘要算法要与cfg.json中配置的一样 +``` + + +## 文件删除 +``` +http://10.1.50.90:8080/group/delete +参数: +md5:文件的摘要(md5|sha1) 视配置定 +path:文件路径 +md5与path二选一 +说明:md5或path都是上传文件时返回的信息,要以json方式返回才能看到(参阅浏览器上传) +``` + +## 文件信息 +``` +http://10.1.50.90:8080/group/get_file_info +参数: +md5:文件的摘要(md5|sha1) 视配置定 +path:文件路径 +md5与path二选一 +说明:md5或path都是上传文件时返回的信息,要以json方式返回才能看到(参阅浏览器上传) +``` + + +## 修复统计信息 +``` +http://10.1.50.90:8080/group/repair_stat +``` + +## 同步失败修复 +``` +http://10.1.50.90:8080/group/repair +参数: +force:是否强行修复(0|1) +``` + +## 从文件目录中修复元数据(性能较差) +``` +http://10.1.50.90:8080/group/repair_fileinfo + +``` \ No newline at end of file diff --git a/doc/authentication.png b/doc/authentication.png new file mode 100644 index 00000000..3c00f6db Binary files /dev/null and b/doc/authentication.png differ diff --git a/doc/authentication2.png b/doc/authentication2.png new file mode 100644 index 00000000..cdcf4060 Binary files /dev/null and b/doc/authentication2.png differ diff --git a/doc/coverage.html b/doc/coverage.html new file mode 100644 index 00000000..0bbec178 --- /dev/null +++ b/doc/coverage.html @@ -0,0 +1,3917 @@ + + + + + + + + +
+ +
+ not tracked + + not covered + covered + +
+
+
+ + + +
+ + + diff --git a/doc/go-fastdfs-deploy.png b/doc/go-fastdfs-deploy.png new file mode 100644 index 00000000..38deda46 Binary files /dev/null and b/doc/go-fastdfs-deploy.png differ diff --git a/doc/logo.png b/doc/logo.png new file mode 100644 index 00000000..0bb5622e Binary files /dev/null and b/doc/logo.png differ diff --git a/doc/pay.png b/doc/pay.png new file mode 100644 index 00000000..961f5089 Binary files /dev/null and b/doc/pay.png differ diff --git a/doc/upload.md b/doc/upload.md new file mode 100644 index 00000000..3aa19734 --- /dev/null +++ b/doc/upload.md @@ -0,0 +1,63 @@ +## java版本 +依赖(这里使用了hutool工具包,更简便) +```xml + + cn.hutool + hutool-all + 4.4.3 + +``` +上传代码 +```java +public static void main(String[] args) { + //文件地址 + File file = new File("D:\\git\\2.jpg"); + //声明参数集合 + HashMap paramMap = new HashMap<>(); + //文件 + paramMap.put("file", file); + //输出 + paramMap.put("output","json"); + //自定义路径 + paramMap.put("path","image"); + //场景 + paramMap.put("scene","image"); + //上传 + String result= HttpUtil.post("http://xxxxx:xxxx/upload", paramMap); + //输出json结果 + System.out.println(result); +} +``` +# 断点续传示例 + +## golang版本 +```go +package main + +import ( + "os" + "fmt" + "github.com/eventials/go-tus" +) + +func main() { + f, err := os.Open("100m") + if err != nil { + panic(err) + } + defer f.Close() + // create the tus client. + client, err := tus.NewClient("http://10.1.5.9:8080/big/upload/", nil) + fmt.Println(err) + // create an upload from a file. + upload, err := tus.NewUploadFromFile(f) + fmt.Println(err) + // create the uploader. + uploader, err := client.CreateUpload(upload) + fmt.Println(err) + // start the uploading process. + fmt.Println( uploader.Upload()) + +} + +```` diff --git a/doc/wechat.jpg b/doc/wechat.jpg new file mode 100644 index 00000000..0fa4d08e Binary files /dev/null and b/doc/wechat.jpg differ diff --git a/fileserver.go b/fileserver.go index a4828fd6..64b65a33 100644 --- a/fileserver.go +++ b/fileserver.go @@ -1,33 +1,96 @@ package main import ( + "bufio" + "bytes" "crypto/md5" "crypto/rand" + "crypto/sha1" "encoding/base64" + "errors" "flag" "fmt" + "github.com/astaxie/beego/httplib" + "github.com/deckarep/golang-set" + _ "github.com/eventials/go-tus" + "github.com/json-iterator/go" + "github.com/nfnt/resize" + "github.com/sjqzhang/googleAuthenticator" + log "github.com/sjqzhang/seelog" + "github.com/sjqzhang/tusd" + "github.com/sjqzhang/tusd/filestore" + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" + "image" + "image/jpeg" + "image/png" "io" + "io/ioutil" + slog "log" + random "math/rand" + "mime/multipart" + "net" "net/http" + _ "net/http/pprof" + "net/smtp" + "net/url" "os" + "os/signal" + "path" + "path/filepath" + "reflect" + "regexp" + "runtime" + "runtime/debug" + "strconv" "strings" + "sync" + "sync/atomic" + "syscall" "time" - - log "github.com/sjqzhang/seelog" + "unsafe" ) var staticHandler http.Handler -var util = &Common{} -var server = &Server{} -var bind = "0.0.0.0:8080" - -const ( - STORE_DIR = "files" - - logConfigStr = ` +var json = jsoniter.ConfigCompatibleWithStandardLibrary +var server *Server +var logacc log.LoggerInterface +var FOLDERS = []string{DATA_DIR, STORE_DIR, CONF_DIR, STATIC_DIR} +var CONST_QUEUE_SIZE = 10000 +var ( + FileName string + ptr unsafe.Pointer + DOCKER_DIR = "" + STORE_DIR = STORE_DIR_NAME + CONF_DIR = CONF_DIR_NAME + LOG_DIR = LOG_DIR_NAME + DATA_DIR = DATA_DIR_NAME + STATIC_DIR = STATIC_DIR_NAME + LARGE_DIR_NAME = "haystack" + LARGE_DIR = STORE_DIR + "/haystack" + CONST_LEVELDB_FILE_NAME = DATA_DIR + "/fileserver.db" + CONST_LOG_LEVELDB_FILE_NAME = DATA_DIR + "/log.db" + CONST_STAT_FILE_NAME = DATA_DIR + "/stat.json" + CONST_CONF_FILE_NAME = CONF_DIR + "/cfg.json" + CONST_SEARCH_FILE_NAME = DATA_DIR + "/search.txt" + logConfigStr = ` + + + + + + + + + + +` + logAccessConfigStr = ` - + @@ -37,147 +100,4062 @@ const ( ` ) -type Common struct { +const ( + STORE_DIR_NAME = "files" + LOG_DIR_NAME = "log" + DATA_DIR_NAME = "data" + CONF_DIR_NAME = "conf" + STATIC_DIR_NAME = "static" + CONST_STAT_FILE_COUNT_KEY = "fileCount" + CONST_BIG_UPLOAD_PATH_SUFFIX = "/big/upload/" + CONST_STAT_FILE_TOTAL_SIZE_KEY = "totalSize" + CONST_Md5_ERROR_FILE_NAME = "errors.md5" + CONST_Md5_QUEUE_FILE_NAME = "queue.md5" + CONST_FILE_Md5_FILE_NAME = "files.md5" + CONST_REMOME_Md5_FILE_NAME = "removes.md5" + CONST_SMALL_FILE_SIZE = 1024 * 1024 + CONST_MESSAGE_CLUSTER_IP = "Can only be called by the cluster ip or 127.0.0.1 or admin_ips(cfg.json),current ip:%s" + cfgJson = `{ + "绑定端号": "端口", + "addr": ":8080", + "PeerID": "集群内唯一,请使用0-9的单字符,默认自动生成", + "peer_id": "%s", + "本主机地址": "本机http地址,默认自动生成(注意端口必须与addr中的端口一致),必段为内网,自动生成不为内网请自行修改,下同", + "host": "%s", + "集群": "集群列表,注意为了高可用,IP必须不能是同一个,同一不会自动备份,且不能为127.0.0.1,且必须为内网IP,默认自动生成", + "peers": ["%s"], + "组号": "用于区别不同的集群(上传或下载)与support_group_upload配合使用,带在下载路径中", + "group": "group1", + "是否合并小文件": "默认不合并,合并可以解决inode不够用的情况(当前对于小于1M文件)进行合并", + "enable_merge_small_file": false, + "重试同步失败文件的时间": "单位秒", + "refresh_interval": 1800, + "是否自动重命名": "默认不自动重命名,使用原文件名", + "rename_file": false, + "是否支持web上传,方便调试": "默认支持web上传", + "enable_web_upload": true, + "是否支持非日期路径": "默认支持非日期路径,也即支持自定义路径,需要上传文件时指定path", + "enable_custom_path": true, + "下载域名": "用于外网下载文件的域名,不包含http://", + "download_domain": "", + "场景列表": "当设定后,用户指的场景必项在列表中,默认不做限制(注意:如果想开启场景认功能,格式如下:'场景名:googleauth_secret' 如 default:N7IET373HB2C5M6D ", + "scenes": [], + "默认场景": "默认default", + "default_scene": "default", + "是否显示目录": "默认显示,方便调试用,上线时请关闭", + "show_dir": true, + "邮件配置": "", + "mail": { + "user": "abc@163.com", + "password": "abc", + "host": "smtp.163.com:25" + }, + "告警接收邮件列表": "接收人数组", + "alram_receivers": [], + "告警接收URL": "方法post,参数:subjet,message", + "alarm_url": "", + "下载是否需带token": "真假", + "download_use_token": false, + "下载token过期时间": "单位秒", + "download_token_expire": 600, + "是否自动修复": "在超过1亿文件时出现性能问题,取消此选项,请手动按天同步,请查看FAQ", + "auto_repair": true, + "文件去重算法md5可能存在冲突,默认md5": "sha1|md5", + "file_sum_arithmetic": "md5", + "是否支持按组(集群)管理,主要用途是Nginx支持多集群": "默认不支持,不支持时路径为http://10.1.5.4:8080/action,支持时为http://10.1.5.4:8080/group(配置中的group参数)/action,action为动作名,如status,delete,sync等", + "support_group_manage": false, + "管理ip列表": "用于管理集的ip白名单,", + "admin_ips": ["127.0.0.1"], + "是否启用迁移": "默认不启用", + "enable_migrate": false, + "文件是否去重": "默认去重", + "enable_distinct_file": true, + "是否开启跨站访问": "默认开启", + "enable_cross_origin": true, + "是否开启Google认证,实现安全的上传、下载": "默认不开启", + "enable_google_auth": false, + "认证url": "当url不为空时生效,注意:普通上传中使用http参数 auth_token 作为认证参数, 在断点续传中通过HTTP头Upload-Metadata中的auth_token作为认证参数,认证流程参考认证架构图", + "auth_url": "", + "下载是否认证": "默认不认证(注意此选项是在auth_url不为空的情况下生效)", + "enable_download_auth": false, + "默认是否下载": "默认下载", + "default_download": true, + "本机是否只读": "默认可读可写", + "read_only": false, + "是否开启断点续传": "默认开启", + "enable_tus": true } + ` +) +type Common struct { +} type Server struct { + ldb *leveldb.DB + logDB *leveldb.DB + util *Common + statMap *CommonMap + sumMap *CommonMap //map[string]mapset.Set + queueToPeers chan FileInfo + queueFromPeers chan FileInfo + queueFileLog chan *FileLog + lockMap *CommonMap + sceneMap *CommonMap + searchMap *CommonMap + curDate string + host string +} +type FileInfo struct { + Name string `json:"name"` + ReName string `json:"rename"` + Path string `json:"path"` + Md5 string `json:"md5"` + Size int64 `json:"size"` + Peers []string `json:"peers"` + Scene string `json:"scene"` + TimeStamp int64 `json:"timeStamp"` + OffSet int64 `json:"offset"` +} +type FileLog struct { + FileInfo *FileInfo + FileName string +} +type JsonResult struct { + Message string `json:"message"` + Status string `json:"status"` + Data interface{} `json:"data"` +} +type FileResult struct { + Url string `json:"url"` + Md5 string `json:"md5"` + Path string `json:"path"` + Domain string `json:"domain"` + Scene string `json:"scene"` + Size int64 `json:"size"` + ModTime int64 `json:"mtime"` + //Just for Compatibility + Scenes string `json:"scenes"` + Retmsg string `json:"retmsg"` + Retcode int `json:"retcode"` + Src string `json:"src"` +} +type Mail struct { + User string `json:"user"` + Password string `json:"password"` + Host string `json:"host"` +} +type StatDateFileInfo struct { + Date string `json:"date"` + TotalSize int64 `json:"totalSize"` + FileCount int64 `json:"fileCount"` +} +type GloablConfig struct { + Addr string `json:"addr"` + Peers []string `json:"peers"` + Group string `json:"group"` + RenameFile bool `json:"rename_file"` + ShowDir bool `json:"show_dir"` + RefreshInterval int `json:"refresh_interval"` + EnableWebUpload bool `json:"enable_web_upload"` + DownloadDomain string `json:"download_domain"` + EnableCustomPath bool `json:"enable_custom_path"` + Scenes []string `json:"scenes"` + AlramReceivers []string `json:"alram_receivers"` + DefaultScene string `json:"default_scene"` + Mail Mail `json:"mail"` + AlarmUrl string `json:"alarm_url"` + DownloadUseToken bool `json:"download_use_token"` + DownloadTokenExpire int `json:"download_token_expire"` + QueueSize int `json:"queue_size"` + AutoRepair bool `json:"auto_repair"` + Host string `json:"host"` + FileSumArithmetic string `json:"file_sum_arithmetic"` + PeerId string `json:"peer_id"` + SupportGroupManage bool `json:"support_group_manage"` + AdminIps []string `json:"admin_ips"` + EnableMergeSmallFile bool `json:"enable_merge_small_file"` + EnableMigrate bool `json:"enable_migrate"` + EnableDistinctFile bool `json:"enable_distinct_file"` + ReadOnly bool `json:"read_only"` + EnableCrossOrigin bool `json:"enable_cross_origin"` + EnableGoogleAuth bool `json:"enable_google_auth"` + AuthUrl string `json:"auth_url"` + EnableDownloadAuth bool `json:"enable_download_auth"` + DefaultDownload bool `json:"default_download"` + EnableTus bool `json:"enable_tus"` +} +type FileInfoResult struct { + Name string `json:"name"` + Md5 string `json:"md5"` + Path string `json:"path"` + Size int64 `json:"size"` + ModTime time.Time `json:"mtime"` + IsDir bool `json:"is_dir"` +} +type Tuple struct { + Key string + Val interface{} } -func (this *Common) GetUUID() string { +func NewServer() *Server { + var ( + server *Server + err error + ) + server = &Server{ + util: &Common{}, + statMap: NewCommonMap(0), + lockMap: NewCommonMap(0), + sceneMap: NewCommonMap(0), + searchMap: NewCommonMap(0), + queueToPeers: make(chan FileInfo, CONST_QUEUE_SIZE), + queueFromPeers: make(chan FileInfo, CONST_QUEUE_SIZE), + queueFileLog: make(chan *FileLog, CONST_QUEUE_SIZE), + sumMap: NewCommonMap(365 * 3), + } + defaultTransport := &http.Transport{ + DisableKeepAlives: true, + Dial: httplib.TimeoutDialer(time.Second*6, time.Second*60), + MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, + } + settins := httplib.BeegoHTTPSettings{ + UserAgent: "Go-FastDFS", + ConnectTimeout: 10 * time.Second, + ReadWriteTimeout: 10 * time.Second, + Gzip: true, + DumpBody: true, + Transport: defaultTransport, + } + httplib.SetDefaultSetting(settins) + server.statMap.Put(CONST_STAT_FILE_COUNT_KEY, int64(0)) + server.statMap.Put(CONST_STAT_FILE_TOTAL_SIZE_KEY, int64(0)) + server.statMap.Put(server.util.GetToDay()+"_"+CONST_STAT_FILE_COUNT_KEY, int64(0)) + server.statMap.Put(server.util.GetToDay()+"_"+CONST_STAT_FILE_TOTAL_SIZE_KEY, int64(0)) + server.curDate = server.util.GetToDay() + opts := &opt.Options{ + CompactionTableSize: 1024 * 1024 * 20, + WriteBuffer: 1024 * 1024 * 20, + } + server.ldb, err = leveldb.OpenFile(CONST_LEVELDB_FILE_NAME, opts) + if err != nil { + fmt.Println(err) + log.Error(err) + panic(err) + } + server.logDB, err = leveldb.OpenFile(CONST_LOG_LEVELDB_FILE_NAME, opts) + if err != nil { + fmt.Println(err) + log.Error(err) + panic(err) + + } + return server +} + +type CommonMap struct { + sync.RWMutex + m map[string]interface{} +} + +func NewCommonMap(size int) *CommonMap { + if size > 0 { + return &CommonMap{m: make(map[string]interface{}, size)} + } else { + return &CommonMap{m: make(map[string]interface{})} + } +} +func (s *CommonMap) GetValue(k string) (interface{}, bool) { + s.RLock() + defer s.RUnlock() + v, ok := s.m[k] + return v, ok +} +func (s *CommonMap) Put(k string, v interface{}) { + s.Lock() + defer s.Unlock() + s.m[k] = v +} +func (s *CommonMap) Iter() <-chan Tuple { // reduce memory + ch := make(chan Tuple) + go func() { + s.RLock() + for k, v := range s.m { + ch <- Tuple{Key: k, Val: v} + } + close(ch) + s.RUnlock() + }() + return ch +} +func (s *CommonMap) LockKey(k string) { + s.Lock() + if v, ok := s.m[k]; ok { + s.m[k+"_lock_"] = true + s.Unlock() + switch v.(type) { + case *sync.Mutex: + v.(*sync.Mutex).Lock() + default: + log.Warn(fmt.Sprintf("LockKey %s", k)) + } + } else { + s.m[k] = &sync.Mutex{} + v = s.m[k] + s.m[k+"_lock_"] = true + v.(*sync.Mutex).Lock() + s.Unlock() + } +} +func (s *CommonMap) UnLockKey(k string) { + s.Lock() + if v, ok := s.m[k]; ok { + switch v.(type) { + case *sync.Mutex: + v.(*sync.Mutex).Unlock() + default: + log.Warn(fmt.Sprintf("UnLockKey %s", k)) + } + delete(s.m, k+"_lock_") // memory leak + delete(s.m, k) // memory leak + } + s.Unlock() +} +func (s *CommonMap) IsLock(k string) bool { + s.Lock() + if v, ok := s.m[k+"_lock_"]; ok { + s.Unlock() + return v.(bool) + } + s.Unlock() + return false +} +func (s *CommonMap) Keys() []string { + s.Lock() + keys := make([]string, len(s.m)) + defer s.Unlock() + for k, _ := range s.m { + keys = append(keys, k) + } + return keys +} +func (s *CommonMap) Clear() { + s.Lock() + defer s.Unlock() + s.m = make(map[string]interface{}) +} +func (s *CommonMap) Remove(key string) { + s.Lock() + defer s.Unlock() + if _, ok := s.m[key]; ok { + delete(s.m, key) + } +} +func (s *CommonMap) AddUniq(key string) { + s.Lock() + defer s.Unlock() + if _, ok := s.m[key]; !ok { + s.m[key] = nil + } +} +func (s *CommonMap) AddCount(key string, count int) { + s.Lock() + defer s.Unlock() + if _v, ok := s.m[key]; ok { + v := _v.(int) + v = v + count + s.m[key] = v + } else { + s.m[key] = 1 + } +} +func (s *CommonMap) AddCountInt64(key string, count int64) { + s.Lock() + defer s.Unlock() + if _v, ok := s.m[key]; ok { + v := _v.(int64) + v = v + count + s.m[key] = v + } else { + s.m[key] = count + } +} +func (s *CommonMap) Add(key string) { + s.Lock() + defer s.Unlock() + if _v, ok := s.m[key]; ok { + v := _v.(int) + v = v + 1 + s.m[key] = v + } else { + s.m[key] = 1 + } +} +func (s *CommonMap) Zero() { + s.Lock() + defer s.Unlock() + for k := range s.m { + s.m[k] = 0 + } +} +func (s *CommonMap) Contains(i ...interface{}) bool { + s.Lock() + defer s.Unlock() + for _, val := range i { + if _, ok := s.m[val.(string)]; !ok { + return false + } + } + return true +} +func (s *CommonMap) Get() map[string]interface{} { + s.Lock() + defer s.Unlock() + m := make(map[string]interface{}) + for k, v := range s.m { + m[k] = v + } + return m +} +func Config() *GloablConfig { + return (*GloablConfig)(atomic.LoadPointer(&ptr)) +} +func ParseConfig(filePath string) { + var ( + data []byte + ) + if filePath == "" { + data = []byte(strings.TrimSpace(cfgJson)) + } else { + file, err := os.Open(filePath) + if err != nil { + panic(fmt.Sprintln("open file path:", filePath, "error:", err)) + } + defer file.Close() + FileName = filePath + data, err = ioutil.ReadAll(file) + if err != nil { + panic(fmt.Sprintln("file path:", filePath, " read all error:", err)) + } + } + var c GloablConfig + if err := json.Unmarshal(data, &c); err != nil { + panic(fmt.Sprintln("file path:", filePath, "json unmarshal error:", err)) + } + log.Info(c) + atomic.StorePointer(&ptr, unsafe.Pointer(&c)) + log.Info("config parse success") +} +func (this *Common) GetUUID() string { b := make([]byte, 48) if _, err := io.ReadFull(rand.Reader, b); err != nil { return "" } id := this.MD5(base64.URLEncoding.EncodeToString(b)) return fmt.Sprintf("%s-%s-%s-%s-%s", id[0:8], id[8:12], id[12:16], id[16:20], id[20:]) - } - +func (this *Common) CopyFile(src, dst string) (int64, error) { + sourceFileStat, err := os.Stat(src) + if err != nil { + return 0, err + } + if !sourceFileStat.Mode().IsRegular() { + return 0, fmt.Errorf("%s is not a regular file", src) + } + source, err := os.Open(src) + if err != nil { + return 0, err + } + defer source.Close() + destination, err := os.Create(dst) + if err != nil { + return 0, err + } + defer destination.Close() + nBytes, err := io.Copy(destination, source) + return nBytes, err +} +func (this *Common) RandInt(min, max int) int { + return func(min, max int) int { + r := random.New(random.NewSource(time.Now().UnixNano())) + if min >= max { + return max + } + return r.Intn(max-min) + min + }(min, max) +} +func (this *Common) GetToDay() string { + return time.Now().Format("20060102") +} +func (this *Common) UrlEncode(v interface{}) string { + switch v.(type) { + case string: + m := make(map[string]string) + m["name"] = v.(string) + return strings.Replace(this.UrlEncodeFromMap(m), "name=", "", 1) + case map[string]string: + return this.UrlEncodeFromMap(v.(map[string]string)) + default: + return fmt.Sprintf("%v", v) + } +} +func (this *Common) UrlEncodeFromMap(m map[string]string) string { + vv := url.Values{} + for k, v := range m { + vv.Add(k, v) + } + return vv.Encode() +} +func (this *Common) UrlDecodeToMap(body string) (map[string]string, error) { + var ( + err error + m map[string]string + v url.Values + ) + m = make(map[string]string) + if v, err = url.ParseQuery(body); err != nil { + return m, err + } + for _k, _v := range v { + if len(_v) > 0 { + m[_k] = _v[0] + } + } + return m, nil +} +func (this *Common) GetDayFromTimeStamp(timeStamp int64) string { + return time.Unix(timeStamp, 0).Format("20060102") +} +func (this *Common) StrToMapSet(str string, sep string) mapset.Set { + result := mapset.NewSet() + for _, v := range strings.Split(str, sep) { + result.Add(v) + } + return result +} +func (this *Common) MapSetToStr(set mapset.Set, sep string) string { + var ( + ret []string + ) + for v := range set.Iter() { + ret = append(ret, v.(string)) + } + return strings.Join(ret, sep) +} +func (this *Common) GetPulicIP() string { + var ( + err error + conn net.Conn + ) + if conn, err = net.Dial("udp", "8.8.8.8:80"); err != nil { + return "127.0.0.1" + } + defer conn.Close() + localAddr := conn.LocalAddr().String() + idx := strings.LastIndex(localAddr, ":") + return localAddr[0:idx] +} func (this *Common) MD5(str string) string { - md := md5.New() md.Write([]byte(str)) return fmt.Sprintf("%x", md.Sum(nil)) } - +func (this *Common) GetFileMd5(file *os.File) string { + file.Seek(0, 0) + md5h := md5.New() + io.Copy(md5h, file) + sum := fmt.Sprintf("%x", md5h.Sum(nil)) + return sum +} +func (this *Common) GetFileSum(file *os.File, alg string) string { + alg = strings.ToLower(alg) + if alg == "sha1" { + return this.GetFileSha1Sum(file) + } else { + return this.GetFileMd5(file) + } +} +func (this *Common) GetFileSumByName(filepath string, alg string) (string, error) { + var ( + err error + file *os.File + ) + file, err = os.Open(filepath) + if err != nil { + return "", err + } + defer file.Close() + alg = strings.ToLower(alg) + if alg == "sha1" { + return this.GetFileSha1Sum(file), nil + } else { + return this.GetFileMd5(file), nil + } +} +func (this *Common) GetFileSha1Sum(file *os.File) string { + file.Seek(0, 0) + md5h := sha1.New() + io.Copy(md5h, file) + sum := fmt.Sprintf("%x", md5h.Sum(nil)) + return sum +} +func (this *Common) WriteFileByOffSet(filepath string, offset int64, data []byte) (error) { + var ( + err error + file *os.File + count int + ) + file, err = os.OpenFile(filepath, os.O_CREATE|os.O_RDWR, 0666) + if err != nil { + return err + } + defer file.Close() + count, err = file.WriteAt(data, offset) + if err != nil { + return err + } + if count != len(data) { + return errors.New(fmt.Sprintf("write %s error", filepath)) + } + return nil +} +func (this *Common) ReadFileByOffSet(filepath string, offset int64, length int) ([]byte, error) { + var ( + err error + file *os.File + result []byte + count int + ) + file, err = os.Open(filepath) + if err != nil { + return nil, err + } + defer file.Close() + result = make([]byte, length) + count, err = file.ReadAt(result, offset) + if err != nil { + return nil, err + } + if count != length { + return nil, errors.New("read error") + } + return result, nil +} +func (this *Common) Contains(obj interface{}, arrayobj interface{}) bool { + targetValue := reflect.ValueOf(arrayobj) + switch reflect.TypeOf(arrayobj).Kind() { + case reflect.Slice, reflect.Array: + for i := 0; i < targetValue.Len(); i++ { + if targetValue.Index(i).Interface() == obj { + return true + } + } + case reflect.Map: + if targetValue.MapIndex(reflect.ValueOf(obj)).IsValid() { + return true + } + } + return false +} func (this *Common) FileExists(fileName string) bool { _, err := os.Stat(fileName) return err == nil } - -func (this *Server) Download(w http.ResponseWriter, r *http.Request) { - log.Info("download:" + r.RequestURI) - staticHandler.ServeHTTP(w, r) +func (this *Common) WriteFile(path string, data string) bool { + if err := ioutil.WriteFile(path, []byte(data), 0775); err == nil { + return true + } else { + return false + } } - -func (this *Server) Upload(w http.ResponseWriter, r *http.Request) { - if r.Method == "POST" { - name := r.PostFormValue("name") - md5sum := r.PostFormValue("md5") - file, header, err := r.FormFile("file") +func (this *Common) WriteBinFile(path string, data []byte) bool { + if err := ioutil.WriteFile(path, data, 0775); err == nil { + return true + } else { + return false + } +} +func (this *Common) IsExist(filename string) bool { + _, err := os.Stat(filename) + return err == nil || os.IsExist(err) +} +func (this *Common) Match(matcher string, content string) []string { + var result []string + if reg, err := regexp.Compile(matcher); err == nil { + result = reg.FindAllString(content, -1) + } + return result +} +func (this *Common) ReadBinFile(path string) ([]byte, error) { + if this.IsExist(path) { + fi, err := os.Open(path) if err != nil { - log.Error(err) - fmt.Printf("FromFileErr") - http.Redirect(w, r, "/", http.StatusMovedPermanently) - return + return nil, err } - - if name == "" { - name = header.Filename + defer fi.Close() + return ioutil.ReadAll(fi) + } else { + return nil, errors.New("not found") + } +} +func (this *Common) RemoveEmptyDir(pathname string) { + defer func() { + if re := recover(); re != nil { + buffer := debug.Stack() + log.Error("postFileToPeer") + log.Error(re) + log.Error(string(buffer)) } - - folder := time.Now().Format("2006-01-02") - - folder = fmt.Sprintf(STORE_DIR+"/%s", folder) - - if !util.FileExists(folder) { - os.Mkdir(folder, 0777) + }() + handlefunc := func(file_path string, f os.FileInfo, err error) error { + if f.IsDir() { + files, _ := ioutil.ReadDir(file_path) + if len(files) == 0 && file_path != pathname { + os.Remove(file_path) + } } - - outPath := fmt.Sprintf(folder+"/%s", name) - - log.Info(fmt.Sprintf("upload: %s", outPath)) - - outFile, err := os.Create(outPath) - if err != nil { - log.Error(err) - w.Write([]byte("fail," + err.Error())) + return nil + } + fi, _ := os.Stat(pathname) + if fi.IsDir() { + filepath.Walk(pathname, handlefunc) + } +} +func (this *Common) JsonEncodePretty(o interface{}) string { + resp := "" + switch o.(type) { + case map[string]interface{}: + if data, err := json.Marshal(o); err == nil { + resp = string(data) } - - io.Copy(outFile, file) - if md5sum != "" { - outFile.Seek(0, 0) - md5h := md5.New() - io.Copy(md5h, outFile) - sum := fmt.Sprintf("%x", md5h.Sum(nil)) - if sum != md5sum { - outFile.Close() - w.Write([]byte("fail,md5sum error")) - os.Remove(outPath) - return - + case map[string]string: + if data, err := json.Marshal(o); err == nil { + resp = string(data) + } + case []interface{}: + if data, err := json.Marshal(o); err == nil { + resp = string(data) + } + case []string: + if data, err := json.Marshal(o); err == nil { + resp = string(data) + } + case string: + resp = o.(string) + default: + if data, err := json.Marshal(o); err == nil { + resp = string(data) + } + } + var v interface{} + if ok := json.Unmarshal([]byte(resp), &v); ok == nil { + if buf, ok := json.MarshalIndent(v, "", " "); ok == nil { + resp = string(buf) + } + } + return resp +} +func (this *Common) GetClientIp(r *http.Request) string { + client_ip := "" + headers := []string{"X_Forwarded_For", "X-Forwarded-For", "X-Real-Ip", + "X_Real_Ip", "Remote_Addr", "Remote-Addr"} + for _, v := range headers { + if _v, ok := r.Header[v]; ok { + if len(_v) > 0 { + client_ip = _v[0] + break } } - defer outFile.Close() - outFile.Sync() - - download_url := fmt.Sprintf("http://%s/%s", r.Host, outPath) - w.Write([]byte(download_url)) - - } else { - w.Write([]byte("fail,please use post method")) + } + if client_ip == "" { + clients := strings.Split(r.RemoteAddr, ":") + client_ip = clients[0] + } + return client_ip +} +func (this *Server) BackUpMetaDataByDate(date string) { + defer func() { + if re := recover(); re != nil { + buffer := debug.Stack() + log.Error("BackUpMetaDataByDate") + log.Error(re) + log.Error(string(buffer)) + } + }() + var ( + err error + keyPrefix string + msg string + name string + fileInfo FileInfo + logFileName string + fileLog *os.File + fileMeta *os.File + metaFileName string + fi os.FileInfo + ) + logFileName = DATA_DIR + "/" + date + "/" + CONST_FILE_Md5_FILE_NAME + this.lockMap.LockKey(logFileName) + defer this.lockMap.UnLockKey(logFileName) + metaFileName = DATA_DIR + "/" + date + "/" + "meta.data" + os.MkdirAll(DATA_DIR+"/"+date, 0775) + if this.util.IsExist(logFileName) { + os.Remove(logFileName) + } + if this.util.IsExist(metaFileName) { + os.Remove(metaFileName) + } + fileLog, err = os.OpenFile(logFileName, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) + if err != nil { + log.Error(err) return } - - useragent := r.Header.Get("User-Agent") - - if useragent != "" && (strings.Contains(useragent, "curl") || strings.Contains(useragent, "wget")) { - - } else { - http.Redirect(w, r, "/", http.StatusMovedPermanently) + defer fileLog.Close() + fileMeta, err = os.OpenFile(metaFileName, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) + if err != nil { + log.Error(err) + return + } + defer fileMeta.Close() + keyPrefix = "%s_%s_" + keyPrefix = fmt.Sprintf(keyPrefix, date, CONST_FILE_Md5_FILE_NAME) + iter := server.logDB.NewIterator(util.BytesPrefix([]byte(keyPrefix)), nil) + defer iter.Release() + for iter.Next() { + if err = json.Unmarshal(iter.Value(), &fileInfo); err != nil { + continue + } + name = fileInfo.Name + if fileInfo.ReName != "" { + name = fileInfo.ReName + } + msg = fmt.Sprintf("%s\t%s\n", fileInfo.Md5, string(iter.Value())) + if _, err = fileMeta.WriteString(msg); err != nil { + log.Error(err) + } + msg = fmt.Sprintf("%s\t%s\n", this.util.MD5(fileInfo.Path+"/"+name), string(iter.Value())) + if _, err = fileMeta.WriteString(msg); err != nil { + log.Error(err) + } + msg = fmt.Sprintf("%s|%d|%d|%s\n", fileInfo.Md5, fileInfo.Size, fileInfo.TimeStamp, fileInfo.Path+"/"+name) + if _, err = fileLog.WriteString(msg); err != nil { + log.Error(err) + } + } + if fi, err = fileLog.Stat(); err != nil { + log.Error(err) + } else if (fi.Size() == 0) { + fileLog.Close() + os.Remove(logFileName) + } + if fi, err = fileMeta.Stat(); err != nil { + log.Error(err) + } else if (fi.Size() == 0) { + fileMeta.Close() + os.Remove(metaFileName) } } +func (this *Server) RepairFileInfoFromFile() { + var ( + pathPrefix string + err error + fi os.FileInfo + ) + defer func() { + if re := recover(); re != nil { + buffer := debug.Stack() + log.Error("RepairFileInfoFromFile") + log.Error(re) + log.Error(string(buffer)) + } + }() + if this.lockMap.IsLock("RepairFileInfoFromFile") { + log.Warn("Lock RepairFileInfoFromFile") + return + } + this.lockMap.LockKey("RepairFileInfoFromFile") + defer this.lockMap.UnLockKey("RepairFileInfoFromFile") + handlefunc := func(file_path string, f os.FileInfo, err error) error { + var ( + files []os.FileInfo + fi os.FileInfo + fileInfo FileInfo + sum string + pathMd5 string + ) + if f.IsDir() { + files, err = ioutil.ReadDir(file_path) -func (this *Server) Index(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, - ` - - - Uploader - - -
- - -
- - `) + if err != nil { + return err + } + for _, fi = range files { + if fi.IsDir() || fi.Size() == 0 { + continue + } + file_path = strings.Replace(file_path, "\\", "/", -1) + if DOCKER_DIR != "" { + file_path = strings.Replace(file_path, DOCKER_DIR, "", 1) + } + if pathPrefix != "" { + file_path = strings.Replace(file_path, pathPrefix, STORE_DIR_NAME, 1) + } + if strings.HasPrefix(file_path, STORE_DIR_NAME+"/"+LARGE_DIR_NAME) { + log.Info(fmt.Sprintf("ignore small file file %s", file_path+"/"+fi.Name())) + continue + } + pathMd5 = this.util.MD5(file_path + "/" + fi.Name()) + //if finfo, _ := this.GetFileInfoFromLevelDB(pathMd5); finfo != nil && finfo.Md5 != "" { + // log.Info(fmt.Sprintf("exist ignore file %s", file_path+"/"+fi.Name())) + // continue + //} + //sum, err = this.util.GetFileSumByName(file_path+"/"+fi.Name(), Config().FileSumArithmetic) + sum = pathMd5 + if err != nil { + log.Error(err) + continue + } + fileInfo = FileInfo{ + Size: fi.Size(), + Name: fi.Name(), + Path: file_path, + Md5: sum, + TimeStamp: fi.ModTime().Unix(), + Peers: []string{this.host}, + OffSet: -2, + } + //log.Info(fileInfo) + log.Info(file_path, "/", fi.Name()) + this.AppendToQueue(&fileInfo) + //this.postFileToPeer(&fileInfo) + this.SaveFileInfoToLevelDB(fileInfo.Md5, &fileInfo, this.ldb) + //this.SaveFileMd5Log(&fileInfo, CONST_FILE_Md5_FILE_NAME) + } + } + return nil + } + pathname := STORE_DIR + pathPrefix, err = os.Readlink(pathname) + if err == nil { //link + pathname = pathPrefix + } + fi, err = os.Stat(pathname) + if err != nil { + log.Error(err) + } + if fi.IsDir() { + filepath.Walk(pathname, handlefunc) + } + log.Info("RepairFileInfoFromFile is finish.") } - -func init() { - flag.StringVar(&bind, "b", bind, "bind") - staticHandler = http.StripPrefix("/"+STORE_DIR, http.FileServer(http.Dir(STORE_DIR))) +func (this *Server) RepairStatByDate(date string) StatDateFileInfo { + defer func() { + if re := recover(); re != nil { + buffer := debug.Stack() + log.Error("RepairStatByDate") + log.Error(re) + log.Error(string(buffer)) + } + }() + var ( + err error + keyPrefix string + fileInfo FileInfo + fileCount int64 + fileSize int64 + stat StatDateFileInfo + ) + keyPrefix = "%s_%s_" + keyPrefix = fmt.Sprintf(keyPrefix, date, CONST_FILE_Md5_FILE_NAME) + iter := server.logDB.NewIterator(util.BytesPrefix([]byte(keyPrefix)), nil) + defer iter.Release() + for iter.Next() { + if err = json.Unmarshal(iter.Value(), &fileInfo); err != nil { + continue + } + fileCount = fileCount + 1 + fileSize = fileSize + fileInfo.Size + } + this.statMap.Put(date+"_"+CONST_STAT_FILE_COUNT_KEY, fileCount) + this.statMap.Put(date+"_"+CONST_STAT_FILE_TOTAL_SIZE_KEY, fileSize) + this.SaveStat() + stat.Date = date + stat.FileCount = fileCount + stat.TotalSize = fileSize + return stat } - -func main() { - - flag.Parse() - - if logger, err := log.LoggerFromConfigAsBytes([]byte(logConfigStr)); err != nil { - panic(err) - +func (this *Server) GetFilePathByInfo(fileInfo *FileInfo) string { + var ( + fn string + ) + fn = fileInfo.Name + if fileInfo.ReName != "" { + fn = fileInfo.ReName + } + return DOCKER_DIR + fileInfo.Path + "/" + fn +} +func (this *Server) CheckFileExistByInfo(md5s string, fileInfo *FileInfo) bool { + var ( + err error + fullpath string + fi os.FileInfo + info *FileInfo + ) + if fileInfo == nil { + return false + } + if fileInfo.OffSet >= 0 { //small file + if info, err = this.GetFileInfoFromLevelDB(fileInfo.Md5); err == nil && info.Md5 == fileInfo.Md5 { + return true + } else { + return false + } + } + fullpath = this.GetFilePathByInfo(fileInfo) + if fi, err = os.Stat(fullpath); err != nil { + return false + } + if fi.Size() == fileInfo.Size { + return true } else { - log.ReplaceLogger(logger) + return false } - - if !util.FileExists(STORE_DIR) { - os.Mkdir(STORE_DIR, 0777) +} +func (this *Server) ParseSmallFile(filename string) (string, int64, int, error) { + var ( + err error + offset int64 + length int + ) + err = errors.New("unvalid small file") + if len(filename) < 3 { + return filename, -1, -1, err } - - http.HandleFunc("/", server.Index) - http.HandleFunc("/upload", server.Upload) - http.HandleFunc("/"+STORE_DIR+"/", server.Download) - fmt.Printf(fmt.Sprintf("Listen:%s\n", bind)) - panic(http.ListenAndServe(bind, nil)) + if strings.Contains(filename, "/") { + filename = filename[strings.LastIndex(filename, "/")+1:] + } + pos := strings.Split(filename, ",") + if len(pos) < 3 { + return filename, -1, -1, err + } + offset, err = strconv.ParseInt(pos[1], 10, 64) + if err != nil { + return filename, -1, -1, err + } + if length, err = strconv.Atoi(pos[2]); err != nil { + return filename, offset, -1, err + } + if length > CONST_SMALL_FILE_SIZE || offset < 0 { + err = errors.New("invalid filesize or offset") + return filename, -1, -1, err + } + return pos[0], offset, length, nil +} +func (this *Server) DownloadFromPeer(peer string, fileInfo *FileInfo) { + var ( + err error + filename string + fpath string + fi os.FileInfo + sum string + data []byte + downloadUrl string + ) + if Config().ReadOnly { + log.Warn("ReadOnly", fileInfo) + return + } + filename = fileInfo.Name + if fileInfo.ReName != "" { + filename = fileInfo.ReName + } + if this.CheckFileExistByInfo(fileInfo.Md5, fileInfo) && Config().EnableDistinctFile { + return + } + if !Config().EnableDistinctFile && this.util.FileExists(this.GetFilePathByInfo(fileInfo)) { + return + } + if _, err = os.Stat(fileInfo.Path); err != nil { + os.MkdirAll(DOCKER_DIR+fileInfo.Path, 0775) + } + //fmt.Println("downloadFromPeer",fileInfo) + p := strings.Replace(fileInfo.Path, STORE_DIR_NAME+"/", "", 1) + //filename=this.util.UrlEncode(filename) + downloadUrl = peer + "/" + Config().Group + "/" + p + "/" + filename + log.Info("DownloadFromPeer: ", downloadUrl) + fpath = DOCKER_DIR + fileInfo.Path + "/" + filename + timeout := fileInfo.Size/1024/1024/8 + 30 + if fileInfo.OffSet == -2 { //migrate file + this.lockMap.LockKey(fpath) + defer this.lockMap.UnLockKey(fpath) + if fi, err = os.Stat(fpath); err == nil && fi.Size() == fileInfo.Size { //prevent double download + this.SaveFileInfoToLevelDB(fileInfo.Md5, fileInfo, this.ldb) + //log.Info(fmt.Sprintf("file '%s' has download", fpath)) + return + } + req := httplib.Get(downloadUrl) + req.SetTimeout(time.Second*30, time.Second*time.Duration(timeout)) + if err = req.ToFile(fpath); err != nil { + log.Error(err) + return + } + //this.SaveFileMd5Log(fileInfo, CONST_FILE_Md5_FILE_NAME) + this.SaveFileInfoToLevelDB(fileInfo.Md5, fileInfo, this.ldb) + return + } + req := httplib.Get(downloadUrl) + req.SetTimeout(time.Second*30, time.Second*time.Duration(timeout)) + if fileInfo.OffSet >= 0 { //small file download + data, err = req.Bytes() + if err != nil { + log.Error(err) + return + } + data2 := make([]byte, len(data)+1) + data2[0] = '1' + for i, v := range data { + data2[i+1] = v + } + data = data2 + if int64(len(data)) != fileInfo.Size { + log.Warn("file size is error") + return + } + fpath = strings.Split(fpath, ",")[0] + err = this.util.WriteFileByOffSet(fpath, fileInfo.OffSet, data) + if err != nil { + log.Warn(err) + return + } + this.SaveFileMd5Log(fileInfo, CONST_FILE_Md5_FILE_NAME) + return + } + if err = req.ToFile(fpath); err != nil { + log.Error(err) + return + } + if fi, err = os.Stat(fpath); err != nil { + os.Remove(fpath) + return + } + if sum, err = this.util.GetFileSumByName(fpath, Config().FileSumArithmetic); err != nil { + log.Error(err) + return + } + if fi.Size() != fileInfo.Size || sum != fileInfo.Md5 { + log.Error("file sum check error") + os.Remove(fpath) + return + } + if this.util.IsExist(fpath) { + this.SaveFileMd5Log(fileInfo, CONST_FILE_Md5_FILE_NAME) + } +} +func (this *Server) CrossOrigin(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type, Depth, User-Agent, X-File-Size, X-Requested-With, X-Requested-By, If-Modified-Since, X-File-Name, X-File-Type, Cache-Control, Origin") + w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS, PUT, DELETE") + w.Header().Set("Access-Control-Expose-Headers", "Authorization") + //https://blog.csdn.net/yanzisu_congcong/article/details/80552155 +} +func (this *Server) SetDownloadHeader(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", "attachment") +} +func (this *Server) CheckAuth(w http.ResponseWriter, r *http.Request) bool { + var ( + err error + req *httplib.BeegoHTTPRequest + result string + ) + if err = r.ParseForm(); err != nil { + log.Error(err) + return false + } + req = httplib.Post(Config().AuthUrl) + req.SetTimeout(time.Second*10, time.Second*10) + for k, _ := range r.Form { + req.Param(k, r.FormValue(k)) + } + if result, err = req.String(); err != nil { + return false + } + if result != "ok" { + return false + } + return true +} +func (this *Server) NotPermit(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(403) +} + +func (this *Server) GetFilePathFromRequest(w http.ResponseWriter, r *http.Request) (string, string) { + var ( + err error + fullpath string + smallPath string + ) + fullpath = r.RequestURI[len(Config().Group)+2 : len(r.RequestURI)] + fullpath = strings.Split(fullpath, "?")[0] // just path + fullpath = DOCKER_DIR + STORE_DIR_NAME + "/" + fullpath + if strings.HasPrefix(r.RequestURI, "/"+Config().Group+"/"+LARGE_DIR_NAME+"/") { + smallPath = fullpath //notice order + fullpath = strings.Split(fullpath, ",")[0] + } + if fullpath, err = url.PathUnescape(fullpath); err != nil { + log.Error(err) + } + return fullpath, smallPath +} +func (this *Server) CheckDownloadAuth(w http.ResponseWriter, r *http.Request) (bool, error) { + var ( + err error + maxTimestamp int64 + minTimestamp int64 + ts int64 + token string + timestamp string + fullpath string + smallPath string + pathMd5 string + fileInfo *FileInfo + scene string + secret interface{} + code string + ok bool + ) + CheckToken := func(token string, md5sum string, timestamp string) bool { + if this.util.MD5(md5sum+timestamp) != token { + return false + } + return true + } + if Config().EnableDownloadAuth && Config().AuthUrl != "" && !this.IsPeer(r) && !this.CheckAuth(w, r) { + return false, errors.New("auth fail") + } + if Config().DownloadUseToken && !this.IsPeer(r) { + token = r.FormValue("token") + timestamp = r.FormValue("timestamp") + if token == "" || timestamp == "" { + return false, errors.New("unvalid request") + } + maxTimestamp = time.Now().Add(time.Second * + time.Duration(Config().DownloadTokenExpire)).Unix() + minTimestamp = time.Now().Add(-time.Second * + time.Duration(Config().DownloadTokenExpire)).Unix() + if ts, err = strconv.ParseInt(timestamp, 10, 64); err != nil { + return false, errors.New("unvalid timestamp") + } + if ts > maxTimestamp || ts < minTimestamp { + return false, errors.New("timestamp expire") + } + fullpath, smallPath = this.GetFilePathFromRequest(w, r) + if smallPath != "" { + pathMd5 = this.util.MD5(smallPath) + } else { + pathMd5 = this.util.MD5(fullpath) + } + if fileInfo, err = this.GetFileInfoFromLevelDB(pathMd5); err != nil { + // TODO + } else { + ok := CheckToken(token, fileInfo.Md5, timestamp) + if !ok { + return ok, errors.New("unvalid token") + } + return ok, nil + } + } + if Config().EnableGoogleAuth && !this.IsPeer(r) { + fullpath = r.RequestURI[len(Config().Group)+2 : len(r.RequestURI)] + fullpath = strings.Split(fullpath, "?")[0] // just path + scene = strings.Split(fullpath, "/")[0] + code = r.FormValue("code") + if secret, ok = this.sceneMap.GetValue(scene); ok { + if !this.VerifyGoogleCode(secret.(string), code, int64(Config().DownloadTokenExpire/30)) { + return false, errors.New("invalid google code") + } + } + } + return true, nil +} + +func (this *Server) GetSmallFileByURI(w http.ResponseWriter, r *http.Request) ([]byte, bool, error) { + var ( + err error + data []byte + offset int64 + length int + fullpath string + info os.FileInfo + ) + fullpath, _ = this.GetFilePathFromRequest(w, r) + if _, offset, length, err = this.ParseSmallFile(r.RequestURI); err != nil { + return nil, false, err + } + if info, err = os.Stat(fullpath); err != nil { + return nil, false, err + } + if info.Size() < offset+int64(length) { + return nil, true, errors.New("noFound") + } else { + data, err = this.util.ReadFileByOffSet(fullpath, offset, length) + if err != nil { + return nil, false, err + } + return data, false, err + } +} +func (this *Server) DownloadSmallFileByURI(w http.ResponseWriter, r *http.Request) (bool, error) { + var ( + err error + data []byte + isDownload bool + imgWidth int + imgHeight int + width string + height string + notFound bool + ) + r.ParseForm() + isDownload = true + if r.FormValue("download") == "" { + isDownload = Config().DefaultDownload + } + if r.FormValue("download") == "0" { + isDownload = false + } + width = r.FormValue("width") + height = r.FormValue("height") + if width != "" { + imgWidth, err = strconv.Atoi(width) + if err != nil { + log.Error(err) + } + } + if height != "" { + imgHeight, err = strconv.Atoi(height) + if err != nil { + log.Error(err) + } + } + data, notFound, err = this.GetSmallFileByURI(w, r) + _ = notFound + if data != nil && string(data[0]) == "1" { + if isDownload { + this.SetDownloadHeader(w, r) + } + if (imgWidth != 0 || imgHeight != 0) { + this.ResizeImageByBytes(w, data[1:], uint(imgWidth), uint(imgHeight)) + return true, nil + } + w.Write(data[1:]) + return true, nil + } + return false, errors.New("not found") +} +func (this *Server) DownloadNormalFileByURI(w http.ResponseWriter, r *http.Request) (bool, error) { + var ( + err error + isDownload bool + imgWidth int + imgHeight int + width string + height string + ) + r.ParseForm() + isDownload = true + if r.FormValue("download") == "" { + isDownload = Config().DefaultDownload + } + if r.FormValue("download") == "0" { + isDownload = false + } + width = r.FormValue("width") + height = r.FormValue("height") + if width != "" { + imgWidth, err = strconv.Atoi(width) + if err != nil { + log.Error(err) + } + } + if height != "" { + imgHeight, err = strconv.Atoi(height) + if err != nil { + log.Error(err) + } + } + fmt.Println(isDownload) + if isDownload { + this.SetDownloadHeader(w, r) + } + fullpath, _ := this.GetFilePathFromRequest(w, r) + if (imgWidth != 0 || imgHeight != 0) { + this.ResizeImage(w, fullpath, uint(imgWidth), uint(imgHeight)) + return true, nil + } + staticHandler.ServeHTTP(w, r) + return true, nil +} +func (this *Server) DownloadNotFound(w http.ResponseWriter, r *http.Request) { + var ( + err error + fullpath string + smallPath string + isDownload bool + pathMd5 string + peer string + fileInfo *FileInfo + ) + fullpath, smallPath = this.GetFilePathFromRequest(w, r) + isDownload = true + if r.FormValue("download") == "" { + isDownload = Config().DefaultDownload + } + if r.FormValue("download") == "0" { + isDownload = false + } + if smallPath != "" { + pathMd5 = this.util.MD5(smallPath) + } else { + pathMd5 = this.util.MD5(fullpath) + } + for _, peer = range Config().Peers { + if fileInfo, err = this.checkPeerFileExist(peer, pathMd5); err != nil { + log.Error(err) + continue + } + if fileInfo.Md5 != "" { + go this.DownloadFromPeer(peer, fileInfo) + //http.Redirect(w, r, peer+r.RequestURI, 302) + if isDownload { + this.SetDownloadHeader(w, r) + } + this.DownloadFileToResponse(peer+r.RequestURI, w, r) + return + } + } + w.WriteHeader(404) + return +} +func (this *Server) Download(w http.ResponseWriter, r *http.Request) { + var ( + err error + ok bool + fullpath string + smallPath string + fi os.FileInfo + ) + if ok, err = this.CheckDownloadAuth(w, r); !ok { + log.Error(err) + this.NotPermit(w, r) + return + } + + if Config().EnableCrossOrigin { + this.CrossOrigin(w, r) + } + fullpath, smallPath = this.GetFilePathFromRequest(w, r) + if smallPath == "" { + if fi, err = os.Stat(fullpath); err != nil { + this.DownloadNotFound(w, r) + return + } + if !Config().ShowDir && fi.IsDir() { + w.Write([]byte("list dir deny")) + return + } + //staticHandler.ServeHTTP(w, r) + this.DownloadNormalFileByURI(w, r) + return + } + if smallPath != "" { + if ok, err = this.DownloadSmallFileByURI(w, r); !ok { + this.DownloadNotFound(w, r) + return + } + return + } + +} +func (this *Server) DownloadFileToResponse(url string, w http.ResponseWriter, r *http.Request) { + var ( + err error + req *httplib.BeegoHTTPRequest + resp *http.Response + ) + req = httplib.Get(url) + req.SetTimeout(time.Second*20, time.Second*600) + resp, err = req.DoRequest() + if err != nil { + log.Error(err) + } + defer resp.Body.Close() + _, err = io.Copy(w, resp.Body) + if err != nil { + log.Error(err) + } +} +func (this *Server) ResizeImageByBytes(w http.ResponseWriter, data []byte, width, height uint) { + var ( + img image.Image + err error + imgType string + ) + reader := bytes.NewReader(data) + img, imgType, err = image.Decode(reader) + if err != nil { + log.Error(err) + return + } + img = resize.Resize(width, height, img, resize.Lanczos3) + if imgType == "jpg" || imgType == "jpeg" { + jpeg.Encode(w, img, nil) + } else if imgType == "png" { + png.Encode(w, img) + } else { + w.Write(data) + } +} +func (this *Server) ResizeImage(w http.ResponseWriter, fullpath string, width, height uint) { + var ( + img image.Image + err error + imgType string + file *os.File + ) + file, err = os.Open(fullpath) + if err != nil { + log.Error(err) + return + } + img, imgType, err = image.Decode(file) + if err != nil { + log.Error(err) + return + } + file.Close() + img = resize.Resize(width, height, img, resize.Lanczos3) + if imgType == "jpg" || imgType == "jpeg" { + jpeg.Encode(w, img, nil) + } else if imgType == "png" { + png.Encode(w, img) + } else { + file.Seek(0, 0) + io.Copy(w, file) + } +} +func (this *Server) GetServerURI(r *http.Request) string { + return fmt.Sprintf("http://%s/", r.Host) +} +func (this *Server) CheckFileAndSendToPeer(date string, filename string, isForceUpload bool) { + var ( + md5set mapset.Set + err error + md5s []interface{} + ) + defer func() { + if re := recover(); re != nil { + buffer := debug.Stack() + log.Error("CheckFileAndSendToPeer") + log.Error(re) + log.Error(string(buffer)) + } + }() + if md5set, err = this.GetMd5sByDate(date, filename); err != nil { + log.Error(err) + return + } + md5s = md5set.ToSlice() + for _, md := range md5s { + if md == nil { + continue + } + if fileInfo, _ := this.GetFileInfoFromLevelDB(md.(string)); fileInfo != nil && fileInfo.Md5 != "" { + if isForceUpload { + fileInfo.Peers = []string{} + } + if len(fileInfo.Peers) > len(Config().Peers) { + continue + } + if !this.util.Contains(this.host, fileInfo.Peers) { + fileInfo.Peers = append(fileInfo.Peers, this.host) // peer is null + } + if filename == CONST_Md5_QUEUE_FILE_NAME { + this.AppendToDownloadQueue(fileInfo) + } else { + this.AppendToQueue(fileInfo) + } + } + } +} +func (this *Server) postFileToPeer(fileInfo *FileInfo) { + var ( + err error + peer string + filename string + info *FileInfo + postURL string + result string + fi os.FileInfo + i int + data []byte + fpath string + ) + defer func() { + if re := recover(); re != nil { + buffer := debug.Stack() + log.Error("postFileToPeer") + log.Error(re) + log.Error(string(buffer)) + } + }() + //fmt.Println("postFile",fileInfo) + for i, peer = range Config().Peers { + _ = i + if fileInfo.Peers == nil { + fileInfo.Peers = []string{} + } + if this.util.Contains(peer, fileInfo.Peers) { + continue + } + filename = fileInfo.Name + if fileInfo.ReName != "" { + filename = fileInfo.ReName + if fileInfo.OffSet != -1 { + filename = strings.Split(fileInfo.ReName, ",")[0] + } + } + fpath = DOCKER_DIR + fileInfo.Path + "/" + filename + if !this.util.FileExists(fpath) { + log.Warn(fmt.Sprintf("file '%s' not found", fpath)) + continue + } else { + if fileInfo.Size == 0 { + if fi, err = os.Stat(fpath); err != nil { + log.Error(err) + } else { + fileInfo.Size = fi.Size() + } + } + } + if fileInfo.OffSet != -2 { //not migrate file should check + if info, err = this.checkPeerFileExist(peer, fileInfo.Md5); info.Md5 != "" { + fileInfo.Peers = append(fileInfo.Peers, peer) + if _, err = this.SaveFileInfoToLevelDB(fileInfo.Md5, fileInfo, this.ldb); err != nil { + log.Error(err) + } + continue + } + } + postURL = fmt.Sprintf("%s%s", peer, this.getRequestURI("syncfile_info")) + b := httplib.Post(postURL) + b.SetTimeout(time.Second*30, time.Second*30) + if data, err = json.Marshal(fileInfo); err != nil { + log.Error(err) + return + } + b.Param("fileInfo", string(data)) + result, err = b.String() + if !strings.HasPrefix(result, "http://") || err != nil { + this.SaveFileMd5Log(fileInfo, CONST_Md5_ERROR_FILE_NAME) + } + if strings.HasPrefix(result, "http://") { + log.Info(result) + if !this.util.Contains(peer, fileInfo.Peers) { + fileInfo.Peers = append(fileInfo.Peers, peer) + if _, err = this.SaveFileInfoToLevelDB(fileInfo.Md5, fileInfo, this.ldb); err != nil { + log.Error(err) + } + } + } + if err != nil { + log.Error(err) + } + } +} +func (this *Server) SaveFileMd5Log(fileInfo *FileInfo, filename string) { + var ( + info FileInfo + ) + for len(this.queueFileLog)+len(this.queueFileLog)/10 > CONST_QUEUE_SIZE { + time.Sleep(time.Second * 1) + } + info = *fileInfo + this.queueFileLog <- &FileLog{FileInfo: &info, FileName: filename} +} +func (this *Server) saveFileMd5Log(fileInfo *FileInfo, filename string) { + var ( + err error + outname string + logDate string + ok bool + fullpath string + md5Path string + logKey string + ) + defer func() { + if re := recover(); re != nil { + buffer := debug.Stack() + log.Error("saveFileMd5Log") + log.Error(re) + log.Error(string(buffer)) + } + }() + if fileInfo == nil || fileInfo.Md5 == "" || filename == "" { + log.Warn("saveFileMd5Log", fileInfo, filename) + return + } + logDate = this.util.GetDayFromTimeStamp(fileInfo.TimeStamp) + outname = fileInfo.Name + if fileInfo.ReName != "" { + outname = fileInfo.ReName + } + fullpath = fileInfo.Path + "/" + outname + logKey = fmt.Sprintf("%s_%s_%s", logDate, filename, fileInfo.Md5) + if filename == CONST_FILE_Md5_FILE_NAME { + //this.searchMap.Put(fileInfo.Md5, fileInfo.Name) + if ok, err = this.IsExistFromLevelDB(fileInfo.Md5, this.ldb); !ok { + this.statMap.AddCountInt64(logDate+"_"+CONST_STAT_FILE_COUNT_KEY, 1) + this.statMap.AddCountInt64(logDate+"_"+CONST_STAT_FILE_TOTAL_SIZE_KEY, fileInfo.Size) + this.SaveStat() + } + if _, err = this.SaveFileInfoToLevelDB(logKey, fileInfo, this.logDB); err != nil { + log.Error(err) + } + if _, err := this.SaveFileInfoToLevelDB(fileInfo.Md5, fileInfo, this.ldb); err != nil { + log.Error("saveToLevelDB", err, fileInfo) + } + if _, err = this.SaveFileInfoToLevelDB(this.util.MD5(fullpath), fileInfo, this.ldb); err != nil { + log.Error("saveToLevelDB", err, fileInfo) + } + return + } + if filename == CONST_REMOME_Md5_FILE_NAME { + //this.searchMap.Remove(fileInfo.Md5) + if ok, err = this.IsExistFromLevelDB(fileInfo.Md5, this.ldb); ok { + this.statMap.AddCountInt64(logDate+"_"+CONST_STAT_FILE_COUNT_KEY, -1) + this.statMap.AddCountInt64(logDate+"_"+CONST_STAT_FILE_TOTAL_SIZE_KEY, -fileInfo.Size) + this.SaveStat() + } + this.RemoveKeyFromLevelDB(logKey, this.logDB) + md5Path = this.util.MD5(fullpath) + if err := this.RemoveKeyFromLevelDB(fileInfo.Md5, this.ldb); err != nil { + log.Error("RemoveKeyFromLevelDB", err, fileInfo) + } + if err = this.RemoveKeyFromLevelDB(md5Path, this.ldb); err != nil { + log.Error("RemoveKeyFromLevelDB", err, fileInfo) + } + return + } + this.SaveFileInfoToLevelDB(logKey, fileInfo, this.logDB) +} +func (this *Server) checkPeerFileExist(peer string, md5sum string) (*FileInfo, error) { + var ( + err error + fileInfo FileInfo + ) + req := httplib.Post(fmt.Sprintf("%s%s?md5=%s", peer, this.getRequestURI("check_file_exist"), md5sum)) + req.SetTimeout(time.Second*5, time.Second*10) + if err = req.ToJSON(&fileInfo); err != nil { + return &FileInfo{}, err + } + if fileInfo.Md5 == "" { + return &fileInfo, errors.New("not found") + } + return &fileInfo, nil +} +func (this *Server) CheckFileExist(w http.ResponseWriter, r *http.Request) { + var ( + data []byte + err error + fileInfo *FileInfo + fpath string + ) + r.ParseForm() + md5sum := "" + md5sum = r.FormValue("md5") + if fileInfo, err = this.GetFileInfoFromLevelDB(md5sum); fileInfo != nil { + if fileInfo.OffSet != -1 { + if data, err = json.Marshal(fileInfo); err != nil { + log.Error(err) + } + w.Write(data) + return + } + fpath = DOCKER_DIR + fileInfo.Path + "/" + fileInfo.Name + if fileInfo.ReName != "" { + fpath = DOCKER_DIR + fileInfo.Path + "/" + fileInfo.ReName + } + if this.util.IsExist(fpath) { + if data, err = json.Marshal(fileInfo); err == nil { + w.Write(data) + return + } else { + log.Error(err) + } + } else { + if fileInfo.OffSet == -1 { + this.RemoveKeyFromLevelDB(md5sum, this.ldb) // when file delete,delete from leveldb + } + } + } + data, _ = json.Marshal(FileInfo{}) + w.Write(data) + return +} +func (this *Server) Sync(w http.ResponseWriter, r *http.Request) { + var ( + result JsonResult + ) + r.ParseForm() + result.Status = "fail" + if !this.IsPeer(r) { + result.Message = "client must be in cluster" + w.Write([]byte(this.util.JsonEncodePretty(result))) + return + } + date := "" + force := "" + inner := "" + isForceUpload := false + force = r.FormValue("force") + date = r.FormValue("date") + inner = r.FormValue("inner") + if force == "1" { + isForceUpload = true + } + if inner != "1" { + for _, peer := range Config().Peers { + req := httplib.Post(peer + this.getRequestURI("sync")) + req.Param("force", force) + req.Param("inner", "1") + req.Param("date", date) + if _, err := req.String(); err != nil { + log.Error(err) + } + } + } + if date == "" { + result.Message = "require paramete date &force , ?date=20181230" + w.Write([]byte(this.util.JsonEncodePretty(result))) + return + } + date = strings.Replace(date, ".", "", -1) + if isForceUpload { + go this.CheckFileAndSendToPeer(date, CONST_FILE_Md5_FILE_NAME, isForceUpload) + } else { + go this.CheckFileAndSendToPeer(date, CONST_Md5_ERROR_FILE_NAME, isForceUpload) + } + result.Status = "ok" + result.Message = "job is running" + w.Write([]byte(this.util.JsonEncodePretty(result))) +} +func (this *Server) IsExistFromLevelDB(key string, db *leveldb.DB) (bool, error) { + return db.Has([]byte(key), nil) +} +func (this *Server) GetFileInfoFromLevelDB(key string) (*FileInfo, error) { + var ( + err error + data []byte + fileInfo FileInfo + ) + if data, err = this.ldb.Get([]byte(key), nil); err != nil { + return nil, err + } + if err = json.Unmarshal(data, &fileInfo); err != nil { + return nil, err + } + return &fileInfo, nil +} +func (this *Server) SaveStat() { + SaveStatFunc := func() { + defer func() { + if re := recover(); re != nil { + buffer := debug.Stack() + log.Error("SaveStatFunc") + log.Error(re) + log.Error(string(buffer)) + } + }() + stat := this.statMap.Get() + if v, ok := stat[CONST_STAT_FILE_COUNT_KEY]; ok { + switch v.(type) { + case int64, int32, int, float64, float32: + if v.(int64) >= 0 { + if data, err := json.Marshal(stat); err != nil { + log.Error(err) + } else { + this.util.WriteBinFile(CONST_STAT_FILE_NAME, data) + } + } + } + } + } + SaveStatFunc() +} +func (this *Server) RemoveKeyFromLevelDB(key string, db *leveldb.DB) (error) { + var ( + err error + ) + err = db.Delete([]byte(key), nil) + return err +} +func (this *Server) SaveFileInfoToLevelDB(key string, fileInfo *FileInfo, db *leveldb.DB) (*FileInfo, error) { + var ( + err error + data []byte + ) + if fileInfo == nil || db == nil { + return nil, errors.New("fileInfo is null or db is null") + } + if data, err = json.Marshal(fileInfo); err != nil { + return fileInfo, err + } + if err = db.Put([]byte(key), data, nil); err != nil { + return fileInfo, err + } + return fileInfo, nil +} +func (this *Server) IsPeer(r *http.Request) bool { + var ( + ip string + peer string + bflag bool + ) + //return true + ip = this.util.GetClientIp(r) + if ip == "127.0.0.1" || ip == this.util.GetPulicIP() { + return true + } + if this.util.Contains(ip, Config().AdminIps) { + return true + } + ip = "http://" + ip + bflag = false + for _, peer = range Config().Peers { + if strings.HasPrefix(peer, ip) { + bflag = true + break + } + } + return bflag +} +func (this *Server) ReceiveMd5s(w http.ResponseWriter, r *http.Request) { + var ( + err error + md5str string + fileInfo *FileInfo + md5s []string + ) + if !this.IsPeer(r) { + log.Warn(fmt.Sprintf("ReceiveMd5s %s", this.util.GetClientIp(r))) + w.Write([]byte(this.GetClusterNotPermitMessage(r))) + return + } + r.ParseForm() + md5str = r.FormValue("md5s") + md5s = strings.Split(md5str, ",") + AppendFunc := func(md5s []string) { + for _, m := range md5s { + if m != "" { + if fileInfo, err = this.GetFileInfoFromLevelDB(m); err != nil { + log.Error(err) + continue + } + this.AppendToQueue(fileInfo) + } + } + } + go AppendFunc(md5s) +} +func (this *Server) GetClusterNotPermitMessage(r *http.Request) string { + var ( + message string + ) + message = fmt.Sprintf(CONST_MESSAGE_CLUSTER_IP, this.util.GetClientIp(r)) + return message +} +func (this *Server) GetMd5sForWeb(w http.ResponseWriter, r *http.Request) { + var ( + date string + err error + result mapset.Set + lines []string + md5s []interface{} + ) + if !this.IsPeer(r) { + w.Write([]byte(this.GetClusterNotPermitMessage(r))) + return + } + date = r.FormValue("date") + if result, err = this.GetMd5sByDate(date, CONST_FILE_Md5_FILE_NAME); err != nil { + log.Error(err) + return + } + md5s = result.ToSlice() + for _, line := range md5s { + if line != nil && line != "" { + lines = append(lines, line.(string)) + } + } + w.Write([]byte( strings.Join(lines, ",") )) +} +func (this *Server) GetMd5File(w http.ResponseWriter, r *http.Request) { + var ( + date string + fpath string + data []byte + err error + ) + if !this.IsPeer(r) { + return + } + fpath = DATA_DIR + "/" + date + "/" + CONST_FILE_Md5_FILE_NAME + if !this.util.FileExists(fpath) { + w.WriteHeader(404) + return + } + if data, err = ioutil.ReadFile(fpath); err != nil { + w.WriteHeader(500) + return + } + w.Write(data) +} +func (this *Server) GetMd5sMapByDate(date string, filename string) (*CommonMap, error) { + var ( + err error + result *CommonMap + fpath string + content string + lines []string + line string + cols []string + data []byte + ) + result = &CommonMap{m: make(map[string]interface{})} + if filename == "" { + fpath = DATA_DIR + "/" + date + "/" + CONST_FILE_Md5_FILE_NAME + } else { + fpath = DATA_DIR + "/" + date + "/" + filename + } + if !this.util.FileExists(fpath) { + return result, errors.New(fmt.Sprintf("fpath %s not found", fpath)) + } + if data, err = ioutil.ReadFile(fpath); err != nil { + return result, err + } + content = string(data) + lines = strings.Split(content, "\n") + for _, line = range lines { + cols = strings.Split(line, "|") + if len(cols) > 2 { + if _, err = strconv.ParseInt(cols[1], 10, 64); err != nil { + continue + } + result.Add(cols[0]) + } + } + return result, nil +} +func (this *Server) GetMd5sByDate(date string, filename string) (mapset.Set, error) { + var ( + keyPrefix string + md5set mapset.Set + keys []string + ) + md5set = mapset.NewSet() + keyPrefix = "%s_%s_" + keyPrefix = fmt.Sprintf(keyPrefix, date, filename) + iter := server.logDB.NewIterator(util.BytesPrefix([]byte(keyPrefix)), nil) + for iter.Next() { + keys = strings.Split(string(iter.Key()), "_") + if len(keys) >= 3 { + md5set.Add(keys[2]) + } + } + iter.Release() + return md5set, nil +} +func (this *Server) SyncFileInfo(w http.ResponseWriter, r *http.Request) { + var ( + err error + fileInfo FileInfo + fileInfoStr string + filename string + ) + r.ParseForm() + if !this.IsPeer(r) { + return + } + fileInfoStr = r.FormValue("fileInfo") + if err = json.Unmarshal([]byte(fileInfoStr), &fileInfo); err != nil { + w.Write([]byte(this.GetClusterNotPermitMessage(r))) + log.Error(err) + return + } + if fileInfo.OffSet == -2 { // optimize migrate + this.SaveFileInfoToLevelDB(fileInfo.Md5, &fileInfo, this.ldb) + } else { + this.SaveFileMd5Log(&fileInfo, CONST_Md5_QUEUE_FILE_NAME) + } + this.AppendToDownloadQueue(&fileInfo) + filename = fileInfo.Name + if fileInfo.ReName != "" { + filename = fileInfo.ReName + } + p := strings.Replace(fileInfo.Path, STORE_DIR+"/", "", 1) + downloadUrl := fmt.Sprintf("http://%s/%s", r.Host, Config().Group+"/"+p+"/"+filename) + log.Info("SyncFileInfo: ", downloadUrl) + w.Write([]byte(downloadUrl)) +} +func (this *Server) CheckScene(scene string) (bool, error) { + var ( + scenes []string + ) + if len(Config().Scenes) == 0 { + return true, nil + } + for _, s := range Config().Scenes { + scenes = append(scenes, strings.Split(s, ":")[0]) + } + if !this.util.Contains(scene, scenes) { + return false, errors.New("not valid scene") + } + return true, nil +} +func (this *Server) GetFileInfo(w http.ResponseWriter, r *http.Request) { + var ( + fpath string + md5sum string + fileInfo *FileInfo + err error + result JsonResult + ) + md5sum = r.FormValue("md5") + fpath = r.FormValue("path") + result.Status = "fail" + if !this.IsPeer(r) { + w.Write([]byte(this.GetClusterNotPermitMessage(r))) + return + } + md5sum = r.FormValue("md5") + if fpath != "" { + fpath = strings.Replace(fpath, "/"+Config().Group+"/", STORE_DIR_NAME+"/", 1) + md5sum = this.util.MD5(fpath) + } + if fileInfo, err = this.GetFileInfoFromLevelDB(md5sum); err != nil { + log.Error(err) + result.Message = err.Error() + w.Write([]byte(this.util.JsonEncodePretty(result))) + return + } + result.Status = "ok" + result.Data = fileInfo + w.Write([]byte(this.util.JsonEncodePretty(result))) + return +} +func (this *Server) RemoveFile(w http.ResponseWriter, r *http.Request) { + var ( + err error + md5sum string + fileInfo *FileInfo + fpath string + delUrl string + result JsonResult + inner string + name string + ) + _ = delUrl + _ = inner + r.ParseForm() + md5sum = r.FormValue("md5") + fpath = r.FormValue("path") + inner = r.FormValue("inner") + result.Status = "fail" + if !this.IsPeer(r) { + w.Write([]byte(this.GetClusterNotPermitMessage(r))) + return + } + if fpath != "" && md5sum == "" { + fpath = strings.Replace(fpath, "/"+Config().Group+"/", STORE_DIR_NAME+"/", 1) + md5sum = this.util.MD5(fpath) + } + if inner != "1" { + for _, peer := range Config().Peers { + delFile := func(peer string, md5sum string, fileInfo *FileInfo) { + delUrl = fmt.Sprintf("%s%s", peer, this.getRequestURI("delete")) + req := httplib.Post(delUrl) + req.Param("md5", md5sum) + req.Param("inner", "1") + req.SetTimeout(time.Second*5, time.Second*10) + if _, err = req.String(); err != nil { + log.Error(err) + } + } + go delFile(peer, md5sum, fileInfo) + } + } + if len(md5sum) < 32 { + result.Message = "md5 unvalid" + w.Write([]byte(this.util.JsonEncodePretty(result))) + return + } + if fileInfo, err = this.GetFileInfoFromLevelDB(md5sum); err != nil { + result.Message = err.Error() + w.Write([]byte(this.util.JsonEncodePretty(result))) + return + } + if fileInfo.OffSet != -1 { + result.Message = "small file delete not support" + w.Write([]byte(this.util.JsonEncodePretty(result))) + return + } + name = fileInfo.Name + if fileInfo.ReName != "" { + name = fileInfo.ReName + } + fpath = fileInfo.Path + "/" + name + if fileInfo.Path != "" && this.util.FileExists(DOCKER_DIR+fpath) { + this.SaveFileMd5Log(fileInfo, CONST_REMOME_Md5_FILE_NAME) + if err = os.Remove(DOCKER_DIR + fpath); err != nil { + result.Message = err.Error() + w.Write([]byte(this.util.JsonEncodePretty(result))) + return + } else { + result.Message = "remove success" + result.Status = "ok" + w.Write([]byte(this.util.JsonEncodePretty(result))) + return + } + } + result.Message = "fail remove" + w.Write([]byte(this.util.JsonEncodePretty(result))) +} +func (this *Server) getRequestURI(action string) string { + var ( + uri string + ) + if Config().SupportGroupManage { + uri = "/" + Config().Group + "/" + action + } else { + uri = "/" + action + } + return uri +} +func (this *Server) BuildFileResult(fileInfo *FileInfo, r *http.Request) FileResult { + var ( + outname string + fileResult FileResult + p string + downloadUrl string + domain string + ) + if Config().DownloadDomain != "" { + domain = fmt.Sprintf("http://%s", Config().DownloadDomain) + } else { + domain = fmt.Sprintf("http://%s", r.Host) + } + outname = fileInfo.Name + if fileInfo.ReName != "" { + outname = fileInfo.ReName + } + p = strings.Replace(fileInfo.Path, STORE_DIR_NAME+"/", "", 1) + p = Config().Group + "/" + p + "/" + outname + downloadUrl = fmt.Sprintf("http://%s/%s", r.Host, p) + if Config().DownloadDomain != "" { + downloadUrl = fmt.Sprintf("http://%s/%s", Config().DownloadDomain, p) + } + fileResult.Url = downloadUrl + fileResult.Md5 = fileInfo.Md5 + fileResult.Path = "/" + p + fileResult.Domain = domain + fileResult.Scene = fileInfo.Scene + fileResult.Size = fileInfo.Size + fileResult.ModTime = fileInfo.TimeStamp + // Just for Compatibility + fileResult.Src = fileResult.Path + fileResult.Scenes = fileInfo.Scene + return fileResult +} +func (this *Server) SaveUploadFile(file multipart.File, header *multipart.FileHeader, fileInfo *FileInfo, r *http.Request) (*FileInfo, error) { + var ( + err error + outFile *os.File + folder string + fi os.FileInfo + ) + defer file.Close() + fileInfo.Name = header.Filename + if Config().RenameFile { + fileInfo.ReName = this.util.MD5(this.util.GetUUID()) + path.Ext(fileInfo.Name) + } + folder = time.Now().Format("20060102/15/04") + if Config().PeerId != "" { + folder = fmt.Sprintf(folder+"/%s", Config().PeerId) + } + if fileInfo.Scene != "" { + folder = fmt.Sprintf(STORE_DIR+"/%s/%s", fileInfo.Scene, folder) + } else { + folder = fmt.Sprintf(STORE_DIR+"/%s", folder) + } + if fileInfo.Path != "" { + if strings.HasPrefix(fileInfo.Path, STORE_DIR) { + folder = fileInfo.Path + } else { + folder = STORE_DIR + "/" + fileInfo.Path + } + } + if !this.util.FileExists(folder) { + os.MkdirAll(folder, 0775) + } + outPath := fmt.Sprintf(folder+"/%s", fileInfo.Name) + if Config().RenameFile { + outPath = fmt.Sprintf(folder+"/%s", fileInfo.ReName) + } + if this.util.FileExists(outPath) && Config().EnableDistinctFile { + for i := 0; i < 10000; i++ { + outPath = fmt.Sprintf(folder+"/%d_%s", i, header.Filename) + fileInfo.Name = fmt.Sprintf("%d_%s", i, header.Filename) + if !this.util.FileExists(outPath) { + break + } + } + } + log.Info(fmt.Sprintf("upload: %s", outPath)) + if outFile, err = os.Create(outPath); err != nil { + return fileInfo, err + } + defer outFile.Close() + if err != nil { + log.Error(err) + return fileInfo, errors.New("(error)fail," + err.Error()) + } + if _, err = io.Copy(outFile, file); err != nil { + log.Error(err) + return fileInfo, errors.New("(error)fail," + err.Error()) + } + if fi, err = outFile.Stat(); err != nil { + log.Error(err) + } else { + fileInfo.Size = fi.Size() + } + if fi.Size() != header.Size { + return fileInfo, errors.New("(error)file uncomplete") + } + v := this.util.GetFileSum(outFile, Config().FileSumArithmetic) + fileInfo.Md5 = v + //fileInfo.Path = folder //strings.Replace( folder,DOCKER_DIR,"",1) + fileInfo.Path = strings.Replace(folder, DOCKER_DIR, "", 1) + fileInfo.Peers = append(fileInfo.Peers, this.host) + //fmt.Println("upload",fileInfo) + return fileInfo, nil +} +func (this *Server) Upload(w http.ResponseWriter, r *http.Request) { + var ( + err error + ok bool + // pathname string + md5sum string + fileInfo FileInfo + uploadFile multipart.File + uploadHeader *multipart.FileHeader + scene string + output string + fileResult FileResult + data []byte + code string + secret interface{} + ) + output = r.FormValue("output") + if Config().EnableCrossOrigin { + this.CrossOrigin(w, r) + } + if Config().AuthUrl != "" { + if !this.CheckAuth(w, r) { + log.Warn("auth fail", r.Form) + this.NotPermit(w, r) + w.Write([]byte("auth fail")) + return + } + } + if r.Method == "POST" { + md5sum = r.FormValue("md5") + output = r.FormValue("output") + if Config().ReadOnly { + w.Write([]byte( "(error) readonly")) + return + } + if Config().EnableCustomPath { + fileInfo.Path = r.FormValue("path") + fileInfo.Path = strings.Trim(fileInfo.Path, "/") + } + scene = r.FormValue("scene") + code = r.FormValue("code") + if scene == "" { + //Just for Compatibility + scene = r.FormValue("scenes") + } + if Config().EnableGoogleAuth && scene != "" { + if secret, ok = this.sceneMap.GetValue(scene); ok { + if !this.VerifyGoogleCode(secret.(string), code, int64(Config().DownloadTokenExpire/30)) { + this.NotPermit(w, r) + w.Write([]byte("invalid request,error google code")) + return + } + } + } + fileInfo.Md5 = md5sum + fileInfo.OffSet = -1 + if uploadFile, uploadHeader, err = r.FormFile("file"); err != nil { + log.Error(err) + w.Write([]byte(err.Error())) + return + } + fileInfo.Peers = []string{} + fileInfo.TimeStamp = time.Now().Unix() + if scene == "" { + scene = Config().DefaultScene + } + if output == "" { + output = "text" + } + if !this.util.Contains(output, []string{"json", "text"}) { + w.Write([]byte("output just support json or text")) + return + } + fileInfo.Scene = scene + if _, err = this.CheckScene(scene); err != nil { + w.Write([]byte(err.Error())) + return + } + if err != nil { + log.Error(err) + http.Redirect(w, r, "/", http.StatusMovedPermanently) + return + } + if _, err = this.SaveUploadFile(uploadFile, uploadHeader, &fileInfo, r); err != nil { + w.Write([]byte(err.Error())) + return + } + if Config().EnableDistinctFile { + if v, _ := this.GetFileInfoFromLevelDB(fileInfo.Md5); v != nil && v.Md5 != "" { + fileResult = this.BuildFileResult(v, r) + if Config().RenameFile { + os.Remove(DOCKER_DIR + fileInfo.Path + "/" + fileInfo.ReName) + } else { + os.Remove(DOCKER_DIR + fileInfo.Path + "/" + fileInfo.Name) + } + if output == "json" { + if data, err = json.Marshal(fileResult); err != nil { + log.Error(err) + w.Write([]byte(err.Error())) + } + w.Write(data) + } else { + w.Write([]byte(fileResult.Url)) + } + return + } + } + if fileInfo.Md5 == "" { + log.Warn(" fileInfo.Md5 is null") + return + } + if md5sum != "" && fileInfo.Md5 != md5sum { + log.Warn(" fileInfo.Md5 and md5sum !=") + return + } + if Config().EnableMergeSmallFile && fileInfo.Size < CONST_SMALL_FILE_SIZE { + if err = this.SaveSmallFile(&fileInfo); err != nil { + log.Error(err) + return + } + } + this.saveFileMd5Log(&fileInfo, CONST_FILE_Md5_FILE_NAME) //maybe slow + go this.postFileToPeer(&fileInfo) + if fileInfo.Size <= 0 { + log.Error("file size is zero") + return + } + fileResult = this.BuildFileResult(&fileInfo, r) + if output == "json" { + if data, err = json.Marshal(fileResult); err != nil { + log.Error(err) + w.Write([]byte(err.Error())) + } + w.Write(data) + } else { + w.Write([]byte(fileResult.Url)) + } + return + } else { + md5sum = r.FormValue("md5") + output = r.FormValue("output") + if md5sum == "" { + w.Write([]byte("(error) if you want to upload fast md5 is require" + + ",and if you want to upload file,you must use post method ")) + return + } + if v, _ := this.GetFileInfoFromLevelDB(md5sum); v != nil && v.Md5 != "" { + fileResult = this.BuildFileResult(v, r) + } + if output == "json" { + if data, err = json.Marshal(fileResult); err != nil { + log.Error(err) + w.Write([]byte(err.Error())) + } + w.Write(data) + } else { + w.Write([]byte(fileResult.Url)) + } + } +} +func (this *Server) SaveSmallFile(fileInfo *FileInfo) (error) { + var ( + err error + filename string + fpath string + srcFile *os.File + desFile *os.File + largeDir string + destPath string + reName string + fileExt string + ) + filename = fileInfo.Name + fileExt = path.Ext(filename) + if fileInfo.ReName != "" { + filename = fileInfo.ReName + } + fpath = DOCKER_DIR + fileInfo.Path + "/" + filename + largeDir = LARGE_DIR + "/" + Config().PeerId + if !this.util.FileExists(largeDir) { + os.MkdirAll(largeDir, 0775) + } + reName = fmt.Sprintf("%d", this.util.RandInt(100, 300)) + destPath = largeDir + "/" + reName + this.lockMap.LockKey(destPath) + defer this.lockMap.UnLockKey(destPath) + if this.util.FileExists(fpath) { + srcFile, err = os.OpenFile(fpath, os.O_CREATE|os.O_RDONLY, 06666) + if err != nil { + return err + } + defer srcFile.Close() + desFile, err = os.OpenFile(destPath, os.O_CREATE|os.O_RDWR, 06666) + if err != nil { + return err + } + defer desFile.Close() + fileInfo.OffSet, err = desFile.Seek(0, 2) + if _, err = desFile.Write([]byte("1")); err != nil { //first byte set 1 + return err + } + fileInfo.OffSet, err = desFile.Seek(0, 2) + if err != nil { + return err + } + fileInfo.OffSet = fileInfo.OffSet - 1 //minus 1 byte + fileInfo.Size = fileInfo.Size + 1 + fileInfo.ReName = fmt.Sprintf("%s,%d,%d,%s", reName, fileInfo.OffSet, fileInfo.Size, fileExt) + if _, err = io.Copy(desFile, srcFile); err != nil { + return err + } + srcFile.Close() + os.Remove(fpath) + fileInfo.Path = strings.Replace(largeDir, DOCKER_DIR, "", 1) + } + return nil +} +func (this *Server) SendToMail(to, subject, body, mailtype string) error { + host := Config().Mail.Host + user := Config().Mail.User + password := Config().Mail.Password + hp := strings.Split(host, ":") + auth := smtp.PlainAuth("", user, password, hp[0]) + var contentType string + if mailtype == "html" { + contentType = "Content-Type: text/" + mailtype + "; charset=UTF-8" + } else { + contentType = "Content-Type: text/plain" + "; charset=UTF-8" + } + msg := []byte("To: " + to + "\r\nFrom: " + user + ">\r\nSubject: " + "\r\n" + contentType + "\r\n\r\n" + body) + sendTo := strings.Split(to, ";") + err := smtp.SendMail(host, auth, user, sendTo, msg) + return err +} +func (this *Server) BenchMark(w http.ResponseWriter, r *http.Request) { + t := time.Now() + batch := new(leveldb.Batch) + for i := 0; i < 100000000; i++ { + f := FileInfo{} + f.Peers = []string{"http://192.168.0.1", "http://192.168.2.5"} + f.Path = "20190201/19/02" + s := strconv.Itoa(i) + s = this.util.MD5(s) + f.Name = s + f.Md5 = s + if data, err := json.Marshal(&f); err == nil { + batch.Put([]byte(s), data) + } + if i%10000 == 0 { + if batch.Len() > 0 { + server.ldb.Write(batch, nil) + // batch = new(leveldb.Batch) + batch.Reset() + } + fmt.Println(i, time.Since(t).Seconds()) + } + //fmt.Println(server.GetFileInfoFromLevelDB(s)) + } + this.util.WriteFile("time.txt", time.Since(t).String()) + fmt.Println(time.Since(t).String()) +} +func (this *Server) RepairStatWeb(w http.ResponseWriter, r *http.Request) { + var ( + result JsonResult + date string + inner string + ) + if !this.IsPeer(r) { + result.Message = this.GetClusterNotPermitMessage(r) + w.Write([]byte(this.util.JsonEncodePretty(result))) + return + } + date = r.FormValue("date") + inner = r.FormValue("inner") + if ok, err := regexp.MatchString("\\d{8}", date); err != nil || !ok { + result.Message = "invalid date" + w.Write([]byte(this.util.JsonEncodePretty(result))) + return + } + if date == "" || len(date) != 8 { + date = this.util.GetToDay() + } + if inner != "1" { + for _, peer := range Config().Peers { + req := httplib.Post(peer + this.getRequestURI("repair_stat")) + req.Param("inner", "1") + req.Param("date", date) + if _, err := req.String(); err != nil { + log.Error(err) + } + } + } + result.Data = this.RepairStatByDate(date) + result.Status = "ok" + w.Write([]byte(this.util.JsonEncodePretty(result))) +} +func (this *Server) Stat(w http.ResponseWriter, r *http.Request) { + var ( + result JsonResult + inner string + echart string + category []string + barCount []int64 + barSize []int64 + dataMap map[string]interface{} + ) + if !this.IsPeer(r) { + result.Message = this.GetClusterNotPermitMessage(r) + w.Write([]byte(this.util.JsonEncodePretty(result))) + return + } + r.ParseForm() + inner = r.FormValue("inner") + echart = r.FormValue("echart") + data := this.GetStat() + result.Status = "ok" + result.Data = data + if echart == "1" { + dataMap = make(map[string]interface{}, 3) + for _, v := range data { + barCount = append(barCount, v.FileCount) + barSize = append(barSize, v.TotalSize) + category = append(category, v.Date) + } + dataMap["category"] = category + dataMap["barCount"] = barCount + dataMap["barSize"] = barSize + result.Data = dataMap + } + if inner == "1" { + w.Write([]byte(this.util.JsonEncodePretty(data))) + } else { + w.Write([]byte(this.util.JsonEncodePretty(result))) + } +} +func (this *Server) GetStat() []StatDateFileInfo { + var ( + min int64 + max int64 + err error + i int64 + rows []StatDateFileInfo + total StatDateFileInfo + ) + min = 20190101 + max = 20190101 + for k := range this.statMap.Get() { + ks := strings.Split(k, "_") + if len(ks) == 2 { + if i, err = strconv.ParseInt(ks[0], 10, 64); err != nil { + continue + } + if i >= max { + max = i + } + if i < min { + min = i + } + } + } + for i := min; i <= max; i++ { + s := fmt.Sprintf("%d", i) + if v, ok := this.statMap.GetValue(s + "_" + CONST_STAT_FILE_TOTAL_SIZE_KEY); ok { + var info StatDateFileInfo + info.Date = s + switch v.(type) { + case int64: + info.TotalSize = v.(int64) + total.TotalSize = total.TotalSize + v.(int64) + } + if v, ok := this.statMap.GetValue(s + "_" + CONST_STAT_FILE_COUNT_KEY); ok { + switch v.(type) { + case int64: + info.FileCount = v.(int64) + total.FileCount = total.FileCount + v.(int64) + } + } + rows = append(rows, info) + } + } + total.Date = "all" + rows = append(rows, total) + return rows +} +func (this *Server) RegisterExit() { + c := make(chan os.Signal) + signal.Notify(c, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + go func() { + for s := range c { + switch s { + case syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT: + this.ldb.Close() + log.Info("Exit", s) + os.Exit(1) + } + } + }() +} +func (this *Server) AppendToQueue(fileInfo *FileInfo) { + + for (len(this.queueToPeers) + CONST_QUEUE_SIZE/10) > CONST_QUEUE_SIZE { + time.Sleep(time.Millisecond * 50) + } + this.queueToPeers <- *fileInfo +} +func (this *Server) AppendToDownloadQueue(fileInfo *FileInfo) { + for (len(this.queueFromPeers) + CONST_QUEUE_SIZE/10) > CONST_QUEUE_SIZE { + time.Sleep(time.Millisecond * 50) + } + this.queueFromPeers <- *fileInfo +} +func (this *Server) ConsumerDownLoad() { + ConsumerFunc := func() { + for { + fileInfo := <-this.queueFromPeers + if len(fileInfo.Peers) <= 0 { + log.Warn("Peer is null", fileInfo) + continue + } + for _, peer := range fileInfo.Peers { + if strings.Contains(peer, "127.0.0.1") { + log.Warn("sync error with 127.0.0.1", fileInfo) + continue + } + if peer != this.host { + this.DownloadFromPeer(peer, &fileInfo) + break + } + } + } + } + for i := 0; i < 50; i++ { + go ConsumerFunc() + } +} +func (this *Server) ConsumerLog() { + go func() { + var ( + fileLog *FileLog + ) + for { + fileLog = <-this.queueFileLog + this.saveFileMd5Log(fileLog.FileInfo, fileLog.FileName) + } + }() +} +func (this *Server) LoadSearchDict() { + go func() { + log.Info("Load search dict ....") + f, err := os.Open(CONST_SEARCH_FILE_NAME) + if err != nil { + log.Error(err) + return + } + defer f.Close() + r := bufio.NewReader(f) + for { + line, isprefix, err := r.ReadLine() + for isprefix && err == nil { + kvs := strings.Split(string(line), "\t") + if len(kvs) == 2 { + this.searchMap.Put(kvs[0], kvs[1]) + } + } + } + log.Info("finish load search dict") + }() +} +func (this *Server) SaveSearchDict() { + var ( + err error + fp *os.File + searchDict map[string]interface{} + k string + v interface{} + ) + this.lockMap.LockKey(CONST_SEARCH_FILE_NAME) + defer this.lockMap.UnLockKey(CONST_SEARCH_FILE_NAME) + searchDict = this.searchMap.Get() + fp, err = os.OpenFile(CONST_SEARCH_FILE_NAME, os.O_RDWR, 0755) + if err != nil { + log.Error(err) + return + } + defer fp.Close() + for k, v = range searchDict { + fp.WriteString(fmt.Sprintf("%s\t%s", k, v.(string))) + } +} +func (this *Server) ConsumerPostToPeer() { + ConsumerFunc := func() { + for { + fileInfo := <-this.queueToPeers + this.postFileToPeer(&fileInfo) + } + } + for i := 0; i < 50; i++ { + go ConsumerFunc() + } +} +func (this *Server) AutoRepair(forceRepair bool) { + if this.lockMap.IsLock("AutoRepair") { + log.Warn("Lock AutoRepair") + return + } + this.lockMap.LockKey("AutoRepair") + defer this.lockMap.UnLockKey("AutoRepair") + AutoRepairFunc := func(forceRepair bool) { + var ( + dateStats []StatDateFileInfo + err error + countKey string + md5s string + localSet mapset.Set + remoteSet mapset.Set + allSet mapset.Set + tmpSet mapset.Set + fileInfo *FileInfo + ) + defer func() { + if re := recover(); re != nil { + buffer := debug.Stack() + log.Error("AutoRepair") + log.Error(re) + log.Error(string(buffer)) + } + }() + Update := func(peer string, dateStat StatDateFileInfo) { //从远端拉数据过来 + req := httplib.Get(fmt.Sprintf("%s%s?date=%s&force=%s", peer, this.getRequestURI("sync"), dateStat.Date, "1")) + req.SetTimeout(time.Second*5, time.Second*5) + if _, err = req.String(); err != nil { + log.Error(err) + } + log.Info(fmt.Sprintf("syn file from %s date %s", peer, dateStat.Date)) + } + for _, peer := range Config().Peers { + req := httplib.Post(fmt.Sprintf("%s%s", peer, this.getRequestURI("stat"))) + req.Param("inner", "1") + req.SetTimeout(time.Second*5, time.Second*15) + if err = req.ToJSON(&dateStats); err != nil { + log.Error(err) + continue + } + for _, dateStat := range dateStats { + if dateStat.Date == "all" { + continue + } + countKey = dateStat.Date + "_" + CONST_STAT_FILE_COUNT_KEY + if v, ok := this.statMap.GetValue(countKey); ok { + switch v.(type) { + case int64: + if v.(int64) != dateStat.FileCount || forceRepair { //不相等,找差异 + //TODO + req := httplib.Post(fmt.Sprintf("%s%s", peer, this.getRequestURI("get_md5s_by_date"))) + req.SetTimeout(time.Second*15, time.Second*60) + req.Param("date", dateStat.Date) + if md5s, err = req.String(); err != nil { + continue + } + if localSet, err = this.GetMd5sByDate(dateStat.Date, CONST_FILE_Md5_FILE_NAME); err != nil { + log.Error(err) + continue + } + remoteSet = this.util.StrToMapSet(md5s, ",") + allSet = localSet.Union(remoteSet) + md5s = this.util.MapSetToStr(allSet.Difference(localSet), ",") + req = httplib.Post(fmt.Sprintf("%s%s", peer, this.getRequestURI("receive_md5s"))) + req.SetTimeout(time.Second*15, time.Second*60) + req.Param("md5s", md5s) + req.String() + tmpSet = allSet.Difference(remoteSet) + for v := range tmpSet.Iter() { + if v != nil { + if fileInfo, err = this.GetFileInfoFromLevelDB(v.(string)); err != nil { + log.Error(err) + continue + } + this.AppendToQueue(fileInfo) + } + } + //Update(peer,dateStat) + } + } + } else { + Update(peer, dateStat) + } + } + } + } + AutoRepairFunc(forceRepair) +} +func (this *Server) CleanLogLevelDBByDate(date string, filename string) { + defer func() { + if re := recover(); re != nil { + buffer := debug.Stack() + log.Error("CleanLogLevelDBByDate") + log.Error(re) + log.Error(string(buffer)) + } + }() + var ( + err error + keyPrefix string + keys mapset.Set + ) + keys = mapset.NewSet() + keyPrefix = "%s_%s_" + keyPrefix = fmt.Sprintf(keyPrefix, date, filename) + iter := server.logDB.NewIterator(util.BytesPrefix([]byte(keyPrefix)), nil) + for iter.Next() { + keys.Add(string(iter.Value())) + } + iter.Release() + for key := range keys.Iter() { + err = this.RemoveKeyFromLevelDB(key.(string), this.logDB) + if err != nil { + log.Error(err) + } + } +} +func (this *Server) CleanAndBackUp() { + Clean := func() { + var ( + filenames []string + yesterday string + ) + if this.curDate != this.util.GetToDay() { + filenames = []string{CONST_Md5_QUEUE_FILE_NAME, CONST_Md5_ERROR_FILE_NAME, CONST_REMOME_Md5_FILE_NAME} + yesterday = this.util.GetDayFromTimeStamp(time.Now().AddDate(0, 0, -1).Unix()) + for _, filename := range filenames { + this.CleanLogLevelDBByDate(yesterday, filename) + } + this.BackUpMetaDataByDate(yesterday) + this.curDate = this.util.GetToDay() + } + } + go func() { + for { + time.Sleep(time.Hour * 6) + Clean() + } + }() +} +func (this *Server) LoadFileInfoByDate(date string, filename string) (mapset.Set, error) { + defer func() { + if re := recover(); re != nil { + buffer := debug.Stack() + log.Error("LoadFileInfoByDate") + log.Error(re) + log.Error(string(buffer)) + } + }() + var ( + err error + keyPrefix string + fileInfos mapset.Set + ) + fileInfos = mapset.NewSet() + keyPrefix = "%s_%s_" + keyPrefix = fmt.Sprintf(keyPrefix, date, filename) + iter := server.logDB.NewIterator(util.BytesPrefix([]byte(keyPrefix)), nil) + for iter.Next() { + var fileInfo FileInfo + if err = json.Unmarshal(iter.Value(), &fileInfo); err != nil { + continue + } + fileInfos.Add(&fileInfo) + } + iter.Release() + return fileInfos, nil +} +func (this *Server) LoadQueueSendToPeer() { + if queue, err := this.LoadFileInfoByDate(this.util.GetToDay(), CONST_Md5_QUEUE_FILE_NAME); err != nil { + log.Error(err) + } else { + for fileInfo := range queue.Iter() { + //this.queueFromPeers <- *fileInfo.(*FileInfo) + this.AppendToDownloadQueue(fileInfo.(*FileInfo)) + } + } +} +func (this *Server) CheckClusterStatus() { + check := func() { + defer func() { + if re := recover(); re != nil { + buffer := debug.Stack() + log.Error("CheckClusterStatus") + log.Error(re) + log.Error(string(buffer)) + } + }() + var ( + status JsonResult + err error + subject string + body string + req *httplib.BeegoHTTPRequest + ) + for _, peer := range Config().Peers { + req = httplib.Get(fmt.Sprintf("%s%s", peer, this.getRequestURI("status"))) + req.SetTimeout(time.Second*5, time.Second*5) + err = req.ToJSON(&status) + if status.Status != "ok" { + for _, to := range Config().AlramReceivers { + subject = "fastdfs server error" + if err != nil { + body = fmt.Sprintf("%s\nserver:%s\nerror:\n%s", subject, peer, err.Error()) + } else { + body = fmt.Sprintf("%s\nserver:%s\n", subject, peer) + } + if err = this.SendToMail(to, subject, body, "text"); err != nil { + log.Error(err) + } + } + if Config().AlarmUrl != "" { + req = httplib.Post(Config().AlarmUrl) + req.SetTimeout(time.Second*10, time.Second*10) + req.Param("message", body) + req.Param("subject", subject) + if _, err = req.String(); err != nil { + log.Error(err) + } + } + } + } + } + go func() { + for { + time.Sleep(time.Minute * 10) + check() + } + }() +} +func (this *Server) RepairFileInfo(w http.ResponseWriter, r *http.Request) { + var ( + result JsonResult + ) + if !this.IsPeer(r) { + w.Write([]byte(this.GetClusterNotPermitMessage(r))) + return + } + if !Config().EnableMigrate { + w.Write([]byte("please set enable_migrate=true")) + return + } + result.Status = "ok" + result.Message = "repair job start,don't try again,very danger " + go this.RepairFileInfoFromFile() + w.Write([]byte(this.util.JsonEncodePretty(result))) +} +func (this *Server) Reload(w http.ResponseWriter, r *http.Request) { + var ( + err error + data []byte + cfg GloablConfig + action string + cfgjson string + result JsonResult + ) + result.Status = "fail" + r.ParseForm() + if !this.IsPeer(r) { + w.Write([]byte(this.GetClusterNotPermitMessage(r))) + return + } + cfgjson = r.FormValue("cfg") + action = r.FormValue("action") + _ = cfgjson + if action == "get" { + result.Data = Config() + result.Status = "ok" + w.Write([]byte(this.util.JsonEncodePretty(result))) + return + } + if action == "set" { + if cfgjson == "" { + result.Message = "(error)parameter cfg(json) require" + w.Write([]byte(this.util.JsonEncodePretty(result))) + return + } + if err = json.Unmarshal([]byte(cfgjson), &cfg); err != nil { + log.Error(err) + result.Message = err.Error() + w.Write([]byte(this.util.JsonEncodePretty(result))) + return + } + result.Status = "ok" + cfgjson = this.util.JsonEncodePretty(cfg) + this.util.WriteFile(CONST_CONF_FILE_NAME, cfgjson) + w.Write([]byte(this.util.JsonEncodePretty(result))) + return + } + if action == "reload" { + if data, err = ioutil.ReadFile(CONST_CONF_FILE_NAME); err != nil { + result.Message = err.Error() + w.Write([]byte(this.util.JsonEncodePretty(result))) + return + } + if err = json.Unmarshal(data, &cfg); err != nil { + result.Message = err.Error() + w.Write([]byte(this.util.JsonEncodePretty(result))) + return + } + ParseConfig(CONST_CONF_FILE_NAME) + this.initComponent(true) + result.Status = "ok" + w.Write([]byte(this.util.JsonEncodePretty(result))) + return + } + if action == "" { + w.Write([]byte("(error)action support set(json) get reload")) + } +} +func (this *Server) RemoveEmptyDir(w http.ResponseWriter, r *http.Request) { + var ( + result JsonResult + ) + result.Status = "ok" + if this.IsPeer(r) { + go this.util.RemoveEmptyDir(DATA_DIR) + go this.util.RemoveEmptyDir(STORE_DIR) + result.Message = "clean job start ..,don't try again!!!" + w.Write([]byte(this.util.JsonEncodePretty(result))) + } else { + result.Message = this.GetClusterNotPermitMessage(r) + w.Write([]byte(this.util.JsonEncodePretty(result))) + } +} +func (this *Server) BackUp(w http.ResponseWriter, r *http.Request) { + var ( + err error + date string + result JsonResult + inner string + url string + ) + result.Status = "ok" + r.ParseForm() + date = r.FormValue("date") + inner = r.FormValue("inner") + if date == "" { + date = this.util.GetToDay() + } + if this.IsPeer(r) { + if inner != "1" { + for _, peer := range Config().Peers { + backUp := func(peer string, date string) { + url = fmt.Sprintf("%s%s", peer, this.getRequestURI("backup")) + req := httplib.Post(url) + req.Param("date", date) + req.Param("inner", "1") + req.SetTimeout(time.Second*5, time.Second*600) + if _, err = req.String(); err != nil { + log.Error(err) + } + } + go backUp(peer, date) + } + } + go this.BackUpMetaDataByDate(date) + result.Message = "back job start..." + w.Write([]byte(this.util.JsonEncodePretty(result))) + } else { + result.Message = this.GetClusterNotPermitMessage(r) + w.Write([]byte(this.util.JsonEncodePretty(result))) + } +} + +// Notice: performance is poor,just for low capacity,but low memory , if you want to high performance,use searchMap for search,but memory .... +func (this *Server) Search(w http.ResponseWriter, r *http.Request) { + var ( + result JsonResult + err error + kw string + count int + fileInfos []FileInfo + md5s []string + ) + kw = r.FormValue("kw") + if !this.IsPeer(r) { + result.Message = this.GetClusterNotPermitMessage(r) + w.Write([]byte(this.util.JsonEncodePretty(result))) + } + iter := this.ldb.NewIterator(nil, nil) + for iter.Next() { + var fileInfo FileInfo + value := iter.Value() + if err = json.Unmarshal(value, &fileInfo); err != nil { + log.Error(err) + continue + } + if strings.Contains(fileInfo.Name, kw) && !this.util.Contains(fileInfo.Md5, md5s) { + count = count + 1 + fileInfos = append(fileInfos, fileInfo) + md5s = append(md5s, fileInfo.Md5) + } + if count >= 100 { + break + } + } + iter.Release() + err = iter.Error() + if err != nil { + log.Error() + } + //fileInfos=this.SearchDict(kw) // serch file from map for huge capacity + result.Status = "ok" + result.Data = fileInfos + w.Write([]byte(this.util.JsonEncodePretty(result))) +} +func (this *Server) SearchDict(kw string) []FileInfo { + var ( + fileInfos []FileInfo + fileInfo *FileInfo + ) + for dict := range this.searchMap.Iter() { + if strings.Contains(dict.Val.(string), kw) { + if fileInfo, _ = this.GetFileInfoFromLevelDB(dict.Key); fileInfo != nil { + fileInfos = append(fileInfos, *fileInfo) + } + } + } + return fileInfos +} +func (this *Server) ListDir(w http.ResponseWriter, r *http.Request) { + var ( + result JsonResult + dir string + filesInfo []os.FileInfo + err error + filesResult []FileInfoResult + tmpDir string + ) + if !this.IsPeer(r) { + result.Message = this.GetClusterNotPermitMessage(r) + w.Write([]byte(this.util.JsonEncodePretty(result))) + return + } + dir = r.FormValue("dir") + //if dir == "" { + // result.Message = "dir can't null" + // w.Write([]byte(this.util.JsonEncodePretty(result))) + // return + //} + dir = strings.Replace(dir, ".", "", -1) + if tmpDir, err = os.Readlink(dir); err == nil { + dir = tmpDir + } + filesInfo, err = ioutil.ReadDir(DOCKER_DIR + STORE_DIR_NAME + "/" + dir) + if err != nil { + log.Error(err) + result.Message = err.Error() + w.Write([]byte(this.util.JsonEncodePretty(result))) + return + } + for _, f := range filesInfo { + fi := FileInfoResult{ + Name: f.Name(), + Size: f.Size(), + IsDir: f.IsDir(), + ModTime: f.ModTime(), + Path: dir, + Md5: this.util.MD5(STORE_DIR_NAME + "/" + dir + "/" + f.Name()), + } + filesResult = append(filesResult, fi) + } + result.Status = "ok" + result.Data = filesResult + w.Write([]byte(this.util.JsonEncodePretty(result))) + return +} +func (this *Server) VerifyGoogleCode(secret string, code string, discrepancy int64) bool { + var ( + goauth *googleAuthenticator.GAuth + ) + goauth = googleAuthenticator.NewGAuth() + if ok, err := goauth.VerifyCode(secret, code, discrepancy); ok { + return ok + } else { + log.Error(err) + return ok + } +} +func (this *Server) GenGoogleCode(w http.ResponseWriter, r *http.Request) { + var ( + err error + result JsonResult + secret string + goauth *googleAuthenticator.GAuth + ) + r.ParseForm() + goauth = googleAuthenticator.NewGAuth() + secret = r.FormValue("secret") + result.Status = "ok" + result.Message = "ok" + if !this.IsPeer(r) { + result.Message = this.GetClusterNotPermitMessage(r) + w.Write([]byte(this.util.JsonEncodePretty(result))) + return + } + if result.Data, err = goauth.GetCode(secret); err != nil { + result.Message = err.Error() + w.Write([]byte(this.util.JsonEncodePretty(result))) + return + } + w.Write([]byte(this.util.JsonEncodePretty(result))) +} +func (this *Server) GenGoogleSecret(w http.ResponseWriter, r *http.Request) { + var ( + result JsonResult + ) + result.Status = "ok" + result.Message = "ok" + if !this.IsPeer(r) { + result.Message = this.GetClusterNotPermitMessage(r) + w.Write([]byte(this.util.JsonEncodePretty(result))) + } + GetSeed := func(length int) string { + seeds := "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567" + s := "" + random.Seed(time.Now().UnixNano()) + for i := 0; i < length; i++ { + s += string(seeds[random.Intn(32)]) + } + return s + } + result.Data = GetSeed(16) + w.Write([]byte(this.util.JsonEncodePretty(result))) +} +func (this *Server) Report(w http.ResponseWriter, r *http.Request) { + var ( + reportFileName string + result JsonResult + html string + ) + result.Status = "ok" + r.ParseForm() + if this.IsPeer(r) { + reportFileName = STATIC_DIR + "/report.html" + if this.util.IsExist(reportFileName) { + if data, err := this.util.ReadBinFile(reportFileName); err != nil { + log.Error(err) + result.Message = err.Error() + w.Write([]byte(this.util.JsonEncodePretty(result))) + return + } else { + html = string(data) + if Config().SupportGroupManage { + html = strings.Replace(html, "{group}", "/"+Config().Group, 1) + } else { + html = strings.Replace(html, "{group}", "", 1) + } + w.Write([]byte(html)) + return + } + } else { + w.Write([]byte(fmt.Sprintf("%s is not found", reportFileName))) + } + } else { + w.Write([]byte(this.GetClusterNotPermitMessage(r))) + } +} +func (this *Server) Repair(w http.ResponseWriter, r *http.Request) { + var ( + force string + forceRepair bool + result JsonResult + ) + result.Status = "ok" + r.ParseForm() + force = r.FormValue("force") + if force == "1" { + forceRepair = true + } + if this.IsPeer(r) { + go this.AutoRepair(forceRepair) + result.Message = "repair job start..." + w.Write([]byte(this.util.JsonEncodePretty(result))) + } else { + result.Message = this.GetClusterNotPermitMessage(r) + w.Write([]byte(this.util.JsonEncodePretty(result))) + } +} +func (this *Server) Status(w http.ResponseWriter, r *http.Request) { + var ( + status JsonResult + sts map[string]interface{} + today string + sumset mapset.Set + ok bool + v interface{} + ) + memStat := new(runtime.MemStats) + runtime.ReadMemStats(memStat) + today = this.util.GetToDay() + sts = make(map[string]interface{}) + sts["Fs.QueueFromPeers"] = len(this.queueFromPeers) + sts["Fs.QueueToPeers"] = len(this.queueToPeers) + sts["Fs.QueueFileLog"] = len(this.queueFileLog) + for _, k := range []string{CONST_FILE_Md5_FILE_NAME, CONST_Md5_ERROR_FILE_NAME, CONST_Md5_QUEUE_FILE_NAME} { + k2 := fmt.Sprintf("%s_%s", today, k) + if v, ok = this.sumMap.GetValue(k2); ok { + sumset = v.(mapset.Set) + if k == CONST_Md5_QUEUE_FILE_NAME { + sts["Fs.QueueSetSize"] = sumset.Cardinality() + } + if k == CONST_Md5_ERROR_FILE_NAME { + sts["Fs.ErrorSetSize"] = sumset.Cardinality() + } + if k == CONST_FILE_Md5_FILE_NAME { + sts["Fs.FileSetSize"] = sumset.Cardinality() + } + } + } + sts["Fs.AutoRepair"] = Config().AutoRepair + sts["Fs.RefreshInterval"] = Config().RefreshInterval + sts["Fs.Peers"] = Config().Peers + sts["Fs.Local"] = this.host + sts["Fs.FileStats"] = this.GetStat() + sts["Fs.ShowDir"] = Config().ShowDir + sts["Sys.NumGoroutine"] = runtime.NumGoroutine() + sts["Sys.NumCpu"] = runtime.NumCPU() + sts["Sys.Alloc"] = memStat.Alloc + sts["Sys.TotalAlloc"] = memStat.TotalAlloc + sts["Sys.HeapAlloc"] = memStat.HeapAlloc + sts["Sys.Frees"] = memStat.Frees + sts["Sys.HeapObjects"] = memStat.HeapObjects + sts["Sys.NumGC"] = memStat.NumGC + sts["Sys.GCCPUFraction"] = memStat.GCCPUFraction + sts["Sys.GCSys"] = memStat.GCSys + //sts["Sys.MemInfo"] = memStat + status.Status = "ok" + status.Data = sts + w.Write([]byte(this.util.JsonEncodePretty(status))) +} +func (this *Server) HeartBeat(w http.ResponseWriter, r *http.Request) { +} +func (this *Server) Index(w http.ResponseWriter, r *http.Request) { + var ( + uploadUrl string + uploadBigUrl string + uppy string + ) + uploadUrl = "/upload" + uploadBigUrl = CONST_BIG_UPLOAD_PATH_SUFFIX + if Config().EnableWebUpload { + if Config().SupportGroupManage { + uploadUrl = fmt.Sprintf("/%s/upload", Config().Group) + uploadBigUrl = fmt.Sprintf("/%s%s", Config().Group, CONST_BIG_UPLOAD_PATH_SUFFIX) + } + uppy = ` + + + + go-fastdfs + + + + +
标准上传(强列建议使用这种方式)
+
+ +
+ 文件(file): + + 场景(scene): + + 输出(output): + + 自定义路径(path): + + google认证码(code): + + 自定义认证(auth_token): + + +
+
+
断点续传(如果文件很大时可以考虑)
+
+ +
+ + +
+ + ` + uppyFileName := STATIC_DIR + "/uppy.html" + if this.util.IsExist(uppyFileName) { + if data, err := this.util.ReadBinFile(uppyFileName); err != nil { + log.Error(err) + } else { + uppy = string(data) + } + } else { + this.util.WriteFile(uppyFileName, uppy) + } + fmt.Fprintf(w, + fmt.Sprintf(uppy, uploadUrl, Config().DefaultScene, uploadBigUrl)) + } else { + w.Write([]byte("web upload deny")) + } +} +func init() { + DOCKER_DIR = os.Getenv("GO_FASTDFS_DIR") + if DOCKER_DIR != "" { + if !strings.HasSuffix(DOCKER_DIR, "/") { + DOCKER_DIR = DOCKER_DIR + "/" + } + } + STORE_DIR = DOCKER_DIR + STORE_DIR_NAME + CONF_DIR = DOCKER_DIR + CONF_DIR_NAME + DATA_DIR = DOCKER_DIR + DATA_DIR_NAME + LOG_DIR = DOCKER_DIR + LOG_DIR_NAME + STATIC_DIR = DOCKER_DIR + STATIC_DIR_NAME + LARGE_DIR_NAME = "haystack" + LARGE_DIR = STORE_DIR + "/haystack" + CONST_LEVELDB_FILE_NAME = DATA_DIR + "/fileserver.db" + CONST_LOG_LEVELDB_FILE_NAME = DATA_DIR + "/log.db" + CONST_STAT_FILE_NAME = DATA_DIR + "/stat.json" + CONST_CONF_FILE_NAME = CONF_DIR + "/cfg.json" + CONST_SEARCH_FILE_NAME = DATA_DIR + "/search.txt" + FOLDERS = []string{DATA_DIR, STORE_DIR, CONF_DIR, STATIC_DIR} + logAccessConfigStr = strings.Replace(logAccessConfigStr, "{DOCKER_DIR}", DOCKER_DIR, -1) + logConfigStr = strings.Replace(logConfigStr, "{DOCKER_DIR}", DOCKER_DIR, -1) + for _, folder := range FOLDERS { + os.MkdirAll(folder, 0775) + } + server = NewServer() + flag.Parse() + peerId := fmt.Sprintf("%d", server.util.RandInt(0, 9)) + if !server.util.FileExists(CONST_CONF_FILE_NAME) { + peer := "http://" + server.util.GetPulicIP() + ":8080" + cfg := fmt.Sprintf(cfgJson, peerId, peer, peer) + server.util.WriteFile(CONST_CONF_FILE_NAME, cfg) + } + if logger, err := log.LoggerFromConfigAsBytes([]byte(logConfigStr)); err != nil { + panic(err) + } else { + log.ReplaceLogger(logger) + } + if _logacc, err := log.LoggerFromConfigAsBytes([]byte(logAccessConfigStr)); err == nil { + logacc = _logacc + log.Info("succes init log access") + } else { + log.Error(err.Error()) + } + ParseConfig(CONST_CONF_FILE_NAME) + if Config().QueueSize == 0 { + Config().QueueSize = CONST_QUEUE_SIZE + } + if Config().PeerId == "" { + Config().PeerId = peerId + } + staticHandler = http.StripPrefix("/"+Config().Group+"/", http.FileServer(http.Dir(STORE_DIR))) + server.initComponent(false) +} +func (this *Server) test() { + + testLock := func() { + wg := sync.WaitGroup{} + tt := func(i int, wg *sync.WaitGroup) { + //if server.lockMap.IsLock("xx") { + // return + //} + //fmt.Println("timeer len",len(server.lockMap.Get())) + //time.Sleep(time.Nanosecond*10) + server.lockMap.LockKey("xx") + defer server.lockMap.UnLockKey("xx") + //time.Sleep(time.Nanosecond*1) + //fmt.Println("xx", i) + wg.Done() + } + go func() { + for { + time.Sleep(time.Second * 1) + fmt.Println("timeer len", len(server.lockMap.Get()), server.lockMap.Get()) + } + }() + fmt.Println(len(server.lockMap.Get())) + for i := 0; i < 10000; i++ { + wg.Add(1) + go tt(i, &wg) + } + fmt.Println(len(server.lockMap.Get())) + fmt.Println(len(server.lockMap.Get())) + server.lockMap.LockKey("abc") + fmt.Println("lock") + time.Sleep(time.Second * 5) + server.lockMap.UnLockKey("abc") + server.lockMap.LockKey("abc") + server.lockMap.UnLockKey("abc") + } + _ = testLock + testFile := func() { + var ( + err error + f *os.File + ) + f, err = os.OpenFile("tt", os.O_CREATE|os.O_RDWR, 0777) + if err != nil { + fmt.Println(err) + } + f.WriteAt([]byte("1"), 100) + f.Seek(0, 2) + f.Write([]byte("2")) + //fmt.Println(f.Seek(0, 2)) + //fmt.Println(f.Seek(3, 2)) + //fmt.Println(f.Seek(3, 0)) + //fmt.Println(f.Seek(3, 1)) + //fmt.Println(f.Seek(3, 0)) + //f.Write([]byte("1")) + } + _ = testFile + //testFile() + //testLock() +} + +type hookDataStore struct { + tusd.DataStore +} + +func (store hookDataStore) NewUpload(info tusd.FileInfo) (id string, err error) { + if Config().AuthUrl != "" { + if auth_token, ok := info.MetaData["auth_token"]; !ok { + msg := "token auth fail,auth_token is not in http header Upload-Metadata," + + "in uppy uppy.setMeta({ auth_token: '9ee60e59-cb0f-4578-aaba-29b9fc2919ca' })" + log.Error(msg, fmt.Sprintf("current header:%v", info.MetaData)) + return "", errors.New(msg) + } else { + req := httplib.Post(Config().AuthUrl) + req.Param("auth_token", auth_token) + req.SetTimeout(time.Second*5, time.Second*10) + content, err := req.String() + if err != nil { + log.Error(err) + return "", err + } + if strings.TrimSpace(content) != "ok" { + return "", err + } + } + } + return store.DataStore.NewUpload(info) +} +func (this *Server) initTus() { + var ( + err error + fileLog *os.File + bigDir string + ) + BIG_DIR := STORE_DIR + "/_big/" + Config().PeerId + os.MkdirAll(BIG_DIR, 0775) + os.MkdirAll(LOG_DIR, 0775) + store := filestore.FileStore{ + Path: BIG_DIR, + } + if fileLog, err = os.OpenFile(LOG_DIR+"/tusd.log", os.O_CREATE|os.O_RDWR, 0666); err != nil { + log.Error(err) + panic("initTus") + } + go func() { + for { + if fi, err := fileLog.Stat(); err != nil { + log.Error(err) + } else { + if fi.Size() > 1024*1024*500 { //500M + this.util.CopyFile(LOG_DIR+"/tusd.log", LOG_DIR+"/tusd.log.2") + fileLog.Seek(0, 0) + fileLog.Truncate(0) + fileLog.Seek(0, 2) + } + } + time.Sleep(time.Second * 30) + } + }() + l := slog.New(fileLog, "[tusd] ", slog.LstdFlags) + bigDir = CONST_BIG_UPLOAD_PATH_SUFFIX + if Config().SupportGroupManage { + bigDir = fmt.Sprintf("/%s%s", Config().Group, CONST_BIG_UPLOAD_PATH_SUFFIX) + } + composer := tusd.NewStoreComposer() + // support raw tus upload and download + store.GetReaderExt = func(id string) (io.Reader, error) { + var ( + offset int64 + err error + length int + buffer []byte + fi *FileInfo + ) + if fi, err = this.GetFileInfoFromLevelDB(id); err != nil { + log.Error(err) + return nil, err + } else { + fp := DOCKER_DIR + fi.Path + "/" + fi.ReName + if this.util.FileExists(fp) { + log.Info(fmt.Sprintf("download:%s", fp)) + return os.Open(fp) + } + ps := strings.Split(fp, ",") + if len(ps) > 2 && this.util.FileExists(ps[0]) { + if length, err = strconv.Atoi(ps[2]); err != nil { + return nil, err + } + if offset, err = strconv.ParseInt(ps[1], 10, 64); err != nil { + return nil, err + } + if buffer, err = this.util.ReadFileByOffSet(ps[0], offset, length); err != nil { + return nil, err + } + if buffer[0] == '1' { + bufferReader := bytes.NewBuffer(buffer[1:]) + return bufferReader, nil + } else { + msg := "data no sync" + log.Error(msg) + return nil, errors.New(msg) + } + } + return nil, errors.New(fmt.Sprintf("%s not found", fp)) + } + } + store.UseIn(composer) + SetupPreHooks := func(composer *tusd.StoreComposer) { + composer.UseCore(hookDataStore{ + DataStore: composer.Core, + }) + } + SetupPreHooks(composer) + handler, err := tusd.NewHandler(tusd.Config{ + Logger: l, + BasePath: bigDir, + StoreComposer: composer, + NotifyCompleteUploads: true, + RespectForwardedHeaders: true, + }) + notify := func(handler *tusd.Handler) { + for { + select { + case info := <-handler.CompleteUploads: + log.Info("CompleteUploads", info) + name := "" + if v, ok := info.MetaData["filename"]; ok { + name = v + } + var err error + md5sum := "" + oldFullPath := BIG_DIR + "/" + info.ID + ".bin" + infoFullPath := BIG_DIR + "/" + info.ID + ".info" + if md5sum, err = this.util.GetFileSumByName(oldFullPath, Config().FileSumArithmetic); err != nil { + log.Error(err) + continue + } + ext := path.Ext(name) + filename := md5sum + ext + timeStamp := time.Now().Unix() + fpath := time.Now().Format("/20060102/15/04/") + newFullPath := STORE_DIR + "/" + Config().DefaultScene + fpath + Config().PeerId + "/" + filename + if fi, err := this.GetFileInfoFromLevelDB(md5sum); err != nil { + log.Error(err) + } else { + if fi.Md5 != "" { + if _, err := this.SaveFileInfoToLevelDB(info.ID, fi, this.ldb); err != nil { + log.Error(err) + } + log.Info(fmt.Sprintf("file is found md5:%s", fi.Md5)) + log.Info("remove file:", oldFullPath) + log.Info("remove file:", infoFullPath) + os.Remove(oldFullPath) + os.Remove(infoFullPath) + continue + } + } + fpath = STORE_DIR_NAME + "/" + Config().DefaultScene + fpath + Config().PeerId + os.MkdirAll(DOCKER_DIR+fpath, 0775) + fileInfo := &FileInfo{ + Name: name, + Path: fpath, + ReName: filename, + Size: info.Size, + TimeStamp: timeStamp, + Md5: md5sum, + Peers: []string{this.host}, + OffSet: -1, + } + if err = os.Rename(oldFullPath, newFullPath); err != nil { + log.Error(err) + continue + } + log.Info(fileInfo) + os.Remove(infoFullPath) + if _, err = this.SaveFileInfoToLevelDB(info.ID, fileInfo, this.ldb); err != nil { //assosiate file id + log.Error(err) + } + this.SaveFileMd5Log(fileInfo, CONST_FILE_Md5_FILE_NAME) + go this.postFileToPeer(fileInfo) + callBack := func(info tusd.FileInfo, fileInfo *FileInfo) { + if callback_url, ok := info.MetaData["callback_url"]; ok { + req := httplib.Post(callback_url) + req.SetTimeout(time.Second*10, time.Second*10) + req.Param("info", server.util.JsonEncodePretty(fileInfo)) + req.Param("id", info.ID) + if _, err := req.String(); err != nil { + log.Error(err) + } + } + } + go callBack(info, fileInfo) + } + } + } + go notify(handler) + if err != nil { + log.Error(err) + } + http.Handle(bigDir, http.StripPrefix(bigDir, handler)) +} +func (this *Server) FormatStatInfo() { + var ( + data []byte + err error + count int64 + stat map[string]interface{} + ) + if this.util.FileExists(CONST_STAT_FILE_NAME) { + if data, err = this.util.ReadBinFile(CONST_STAT_FILE_NAME); err != nil { + log.Error(err) + } else { + if err = json.Unmarshal(data, &stat); err != nil { + log.Error(err) + } else { + for k, v := range stat { + switch v.(type) { + case float64: + vv := strings.Split(fmt.Sprintf("%f", v), ".")[0] + if count, err = strconv.ParseInt(vv, 10, 64); err != nil { + log.Error(err) + } else { + this.statMap.Put(k, count) + } + default: + this.statMap.Put(k, v) + } + } + } + } + } else { + this.RepairStatByDate(this.util.GetToDay()) + } +} +func (this *Server) initComponent(isReload bool) { + var ( + ip string + ) + ip = this.util.GetPulicIP() + if Config().Host == "" { + if len(strings.Split(Config().Addr, ":")) == 2 { + server.host = fmt.Sprintf("http://%s:%s", ip, strings.Split(Config().Addr, ":")[1]) + Config().Host = server.host + } + } else { + if strings.HasPrefix(Config().Host, "http") { + server.host = Config().Host + } else { + server.host = "http://" + Config().Host + } + } + ex, _ := regexp.Compile("\\d+\\.\\d+\\.\\d+\\.\\d+") + var peers []string + for _, peer := range Config().Peers { + if this.util.Contains(ip, ex.FindAllString(peer, -1)) || + this.util.Contains("127.0.0.1", ex.FindAllString(peer, -1)) { + continue + } + if strings.HasPrefix(peer, "http") { + peers = append(peers, peer) + } else { + peers = append(peers, "http://"+peer) + } + } + Config().Peers = peers + if !isReload { + this.FormatStatInfo() + if Config().EnableTus { + this.initTus() + } + } + for _, s := range Config().Scenes { + kv := strings.Split(s, ":") + if len(kv) == 2 { + this.sceneMap.Put(kv[0], kv[1]) + } + } +} + +type HttpHandler struct { +} + +func (HttpHandler) ServeHTTP(res http.ResponseWriter, req *http.Request) { + status_code := "200" + defer func(t time.Time) { + logStr := fmt.Sprintf("[Access] %s | %v | %s | %s | %s | %s |%s", + time.Now().Format("2006/01/02 - 15:04:05"), + res.Header(), + time.Since(t).String(), + server.util.GetClientIp(req), + req.Method, + status_code, + req.RequestURI, + ) + logacc.Info(logStr) + }(time.Now()) + defer func() { + if err := recover(); err != nil { + status_code = "500" + res.WriteHeader(500) + print(err) + buff := debug.Stack() + log.Error(err) + log.Error(string(buff)) + } + }() + if Config().EnableCrossOrigin { + server.CrossOrigin(res, req) + } + http.DefaultServeMux.ServeHTTP(res, req) +} +func (this *Server) Main() { + go func() { + for { + this.CheckFileAndSendToPeer(this.util.GetToDay(), CONST_Md5_ERROR_FILE_NAME, false) + //fmt.Println("CheckFileAndSendToPeer") + time.Sleep(time.Second * time.Duration(Config().RefreshInterval)) + //this.util.RemoveEmptyDir(STORE_DIR) + } + }() + go this.CleanAndBackUp() + go this.CheckClusterStatus() + go this.LoadQueueSendToPeer() + go this.ConsumerPostToPeer() + go this.ConsumerLog() + go this.ConsumerDownLoad() + //go this.LoadSearchDict() + if Config().EnableMigrate { + go this.RepairFileInfoFromFile() + } + if Config().AutoRepair { + go func() { + for { + time.Sleep(time.Minute * 3) + this.AutoRepair(false) + time.Sleep(time.Minute * 60) + } + }() + } + groupRoute := "" + if Config().SupportGroupManage { + groupRoute = "/" + Config().Group + } + uploadPage := "upload.html" + if groupRoute == "" { + http.HandleFunc(fmt.Sprintf("%s", "/"), this.Index) + http.HandleFunc(fmt.Sprintf("/%s", uploadPage), this.Index) + } else { + http.HandleFunc(fmt.Sprintf("%s", groupRoute), this.Index) + http.HandleFunc(fmt.Sprintf("%s/%s", groupRoute, uploadPage), this.Index) + } + http.HandleFunc(fmt.Sprintf("%s/check_file_exist", groupRoute), this.CheckFileExist) + http.HandleFunc(fmt.Sprintf("%s/upload", groupRoute), this.Upload) + http.HandleFunc(fmt.Sprintf("%s/delete", groupRoute), this.RemoveFile) + http.HandleFunc(fmt.Sprintf("%s/get_file_info", groupRoute), this.GetFileInfo) + http.HandleFunc(fmt.Sprintf("%s/sync", groupRoute), this.Sync) + http.HandleFunc(fmt.Sprintf("%s/stat", groupRoute), this.Stat) + http.HandleFunc(fmt.Sprintf("%s/repair_stat", groupRoute), this.RepairStatWeb) + http.HandleFunc(fmt.Sprintf("%s/status", groupRoute), this.Status) + http.HandleFunc(fmt.Sprintf("%s/repair", groupRoute), this.Repair) + http.HandleFunc(fmt.Sprintf("%s/report", groupRoute), this.Report) + http.HandleFunc(fmt.Sprintf("%s/backup", groupRoute), this.BackUp) + http.HandleFunc(fmt.Sprintf("%s/search", groupRoute), this.Search) + http.HandleFunc(fmt.Sprintf("%s/list_dir", groupRoute), this.ListDir) + http.HandleFunc(fmt.Sprintf("%s/remove_empty_dir", groupRoute), this.RemoveEmptyDir) + http.HandleFunc(fmt.Sprintf("%s/repair_fileinfo", groupRoute), this.RepairFileInfo) + http.HandleFunc(fmt.Sprintf("%s/reload", groupRoute), this.Reload) + http.HandleFunc(fmt.Sprintf("%s/syncfile_info", groupRoute), this.SyncFileInfo) + http.HandleFunc(fmt.Sprintf("%s/get_md5s_by_date", groupRoute), this.GetMd5sForWeb) + http.HandleFunc(fmt.Sprintf("%s/receive_md5s", groupRoute), this.ReceiveMd5s) + http.HandleFunc(fmt.Sprintf("%s/gen_google_secret", groupRoute), this.GenGoogleSecret) + http.HandleFunc(fmt.Sprintf("%s/gen_google_code", groupRoute), this.GenGoogleCode) + http.HandleFunc("/"+Config().Group+"/", this.Download) + fmt.Println("Listen on " + Config().Addr) + err := http.ListenAndServe(Config().Addr, new(HttpHandler)) + log.Error(err) + fmt.Println(err) +} +func main() { + server.Main() } diff --git a/fileserver_test.go b/fileserver_test.go new file mode 100644 index 00000000..ed2b9337 --- /dev/null +++ b/fileserver_test.go @@ -0,0 +1,400 @@ +package main + +import ( + "fmt" + "github.com/astaxie/beego/httplib" + "github.com/eventials/go-tus" + "io/ioutil" + _ "net/http/pprof" + "os" + "testing" + "time" +) + +const ( + CONST_SMALL_FILE_NAME = "small.txt" + CONST_BIG_FILE_NAME = "big.txt" + CONST_DOWNLOAD_BIG_FILE_NAME = "big_dowload.txt" + CONST_DOWNLOAD_SMALL_FILE_NAME = "small_dowload.txt" +) + +var testUtil = Common{} + +var endPoint = "http://127.0.0.1:8080" +var endPoint2 = "" + +var testCfg *GloablConfig + +var testSmallFileMd5 = "" +var testBigFileMd5 = "" + +func initFile(smallSize, bigSig int) { + + var ( + err error + ) + + smallBytes := make([]byte, smallSize) + for i := 0; i < len(smallBytes); i++ { + smallBytes[i] = 'a' + } + bigBytes := make([]byte, bigSig) + for i := 0; i < len(smallBytes); i++ { + bigBytes[i] = 'a' + } + + ioutil.WriteFile(CONST_SMALL_FILE_NAME, smallBytes, 0664) + ioutil.WriteFile(CONST_BIG_FILE_NAME, bigBytes, 0664) + testSmallFileMd5, err = testUtil.GetFileSumByName(CONST_SMALL_FILE_NAME, "") + if err != nil { + // testing.T.Error(err) + fmt.Println(err) + } + testBigFileMd5, err = testUtil.GetFileSumByName(CONST_BIG_FILE_NAME, "") + if err != nil { + //testing.T.Error(err) + fmt.Println(err) + } + fmt.Println(CONST_SMALL_FILE_NAME, testSmallFileMd5) + fmt.Println(CONST_BIG_FILE_NAME, testBigFileMd5) + +} + +func uploadContinueBig(t *testing.T) { + f, err := os.Open(CONST_BIG_FILE_NAME) + if err != nil { + panic(err) + } + defer f.Close() + client, err := tus.NewClient(endPoint+"/big/upload/", nil) + if err != nil { + t.Error(err) + } + upload, err := tus.NewUploadFromFile(f) + if err != nil { + t.Error(err) + return + } + uploader, err := client.CreateUpload(upload) + if err != nil { + t.Error(err) + return + } + url := uploader.Url() + err = uploader.Upload() + time.Sleep(time.Second * 1) + if err != nil { + t.Error(err) + return + } + if err := httplib.Get(url).ToFile(CONST_DOWNLOAD_BIG_FILE_NAME); err != nil { + t.Error(err) + } + fmt.Println(url) + + if md5sum, err := testUtil.GetFileSumByName(CONST_DOWNLOAD_BIG_FILE_NAME, ""); md5sum != testBigFileMd5 { + t.Error("uploadContinue bigfile download fail") + t.Error(err) + } + +} + +func refreshConfig(t *testing.T) { + var ( + cfg GloablConfig + err error + cfgStr string + result string + ) + + if testCfg == nil { + return + } + cfgStr = testUtil.JsonEncodePretty(testCfg) + if cfg.Addr == "" { + return + } + fmt.Println("refreshConfig") + req := httplib.Post(endPoint + "/reload?action=set") + req.Param("cfg", cfgStr) + result, err = req.String() + + if err != nil { + t.Error(err) + } + + req = httplib.Get(endPoint + "/reload?action=reload") + + result, err = req.String() + if err != nil { + t.Error(err) + + } + fmt.Println(result) + +} + +func testConfig(t *testing.T) { + + var ( + cfg GloablConfig + err error + cfgStr string + result string + jsonResult JsonResult + ) + + req := httplib.Get(endPoint + "/reload?action=get") + req.SetTimeout(time.Second*2, time.Second*3) + err = req.ToJSON(&jsonResult) + + if err != nil { + t.Error(err) + return + } + + cfgStr = testUtil.JsonEncodePretty(cfg) + cfgStr = testUtil.JsonEncodePretty(jsonResult.Data.(map[string]interface{})) + fmt.Println("cfg:\n", cfgStr) + if err = json.Unmarshal([]byte(cfgStr), &cfg); err != nil { + t.Error(err) + return + } else { + testCfg = &cfg + } + + if cfg.Peers != nil && len(cfg.Peers) > 0 && endPoint2 == "" { + endPoint2 = cfg.Peers[0] + } + + if cfg.Group == "" || cfg.Addr == "" { + t.Error("fail config") + + } + + cfg.EnableMergeSmallFile = true + cfgStr = testUtil.JsonEncodePretty(cfg) + req = httplib.Post(endPoint + "/reload?action=set") + req.Param("cfg", cfgStr) + result, err = req.String() + + if err != nil { + t.Error(err) + } + + req = httplib.Get(endPoint + "/reload?action=reload") + + result, err = req.String() + if err != nil { + t.Error(err) + + } + fmt.Println(result) + +} + +func testCommon(t *testing.T) { + + testUtil.RemoveEmptyDir("files") + + if len(testUtil.GetUUID()) != 36 { + t.Error("testCommon fail") + } +} + +func testCommonMap(t *testing.T) { + var ( + commonMap *CommonMap + ) + commonMap = NewCommonMap(1) + commonMap.AddUniq("1") + //if len(commonMap.Keys()) != 1 { + // t.Error("testCommonMap fail") + //} + commonMap.Clear() + if len(commonMap.Keys()) != 0 { + t.Error("testCommonMap fail") + } + commonMap.AddCount("count", 1) + commonMap.Add("count") + if v, ok := commonMap.GetValue("count"); ok { + if v.(int) != 2 { + t.Error("testCommonMap fail") + } + } + if !commonMap.Contains("count") { + t.Error("testCommonMap fail") + } + commonMap.Zero() + if v, ok := commonMap.GetValue("count"); ok { + if v.(int) != 0 { + t.Error("testCommonMap fail") + } + } + commonMap.Remove("count") + + if _, ok := commonMap.GetValue("count"); ok { + t.Error("testCommonMap fail") + } + +} + +func testApis(t *testing.T) { + + var ( + err error + result string + ) + + apis := []string{"/index","/status", "/stat", "/repair?force=1", "/repair_stat", + "/sync?force=1&date=" + testUtil.GetToDay(),"/delete?md5="+testSmallFileMd5, + "/repair_fileinfo",""} + for _, v := range apis { + req := httplib.Get(endPoint + v) + req.SetTimeout(time.Second*2, time.Second*3) + result, err = req.String() + if err != nil { + t.Error(err) + continue + } + fmt.Println("#########apis#########",v) + fmt.Println(result) + } + +} + +func uploadContinueSmall(t *testing.T) { + f, err := os.Open(CONST_SMALL_FILE_NAME) + if err != nil { + panic(err) + } + defer f.Close() + client, err := tus.NewClient(endPoint+"/big/upload/", nil) + if err != nil { + t.Error(err) + } + upload, err := tus.NewUploadFromFile(f) + if err != nil { + t.Error(err) + } + uploader, err := client.CreateUpload(upload) + if err != nil { + t.Error(err) + } + url := uploader.Url() + err = uploader.Upload() + time.Sleep(time.Second * 1) + if err != nil { + t.Error(err) + } + if err := httplib.Get(url).ToFile(CONST_DOWNLOAD_SMALL_FILE_NAME); err != nil { + t.Error(err) + } + fmt.Println(url) + + if md5sum, err := testUtil.GetFileSumByName(CONST_DOWNLOAD_SMALL_FILE_NAME, ""); md5sum != testSmallFileMd5 { + t.Error("uploadContinue smallfile download fail") + t.Error(err) + } + +} + +func uploadSmall(t *testing.T) { + var obj FileResult + req := httplib.Post(endPoint + "/upload") + req.PostFile("file", CONST_SMALL_FILE_NAME) + req.Param("output", "json") + req.Param("scene", "") + req.Param("path", "") + req.ToJSON(&obj) + fmt.Println(obj.Url) + if obj.Md5 != testSmallFileMd5 { + t.Error("file not equal") + } else { + req = httplib.Get(obj.Url) + req.ToFile(CONST_DOWNLOAD_SMALL_FILE_NAME) + if md5sum, err := testUtil.GetFileSumByName(CONST_DOWNLOAD_SMALL_FILE_NAME, ""); md5sum != testSmallFileMd5 { + t.Error("small file not equal", err) + } + } +} + +func uploadLarge(t *testing.T) { + var obj FileResult + req := httplib.Post(endPoint + "/upload") + req.PostFile("file", CONST_BIG_FILE_NAME) + req.Param("output", "json") + req.Param("scene", "") + req.Param("path", "") + req.ToJSON(&obj) + fmt.Println(obj.Url) + if obj.Md5 != testBigFileMd5 { + t.Error("file not equal") + } else { + req = httplib.Get(obj.Url) + req.ToFile(CONST_DOWNLOAD_BIG_FILE_NAME) + if md5sum, err := testUtil.GetFileSumByName(CONST_DOWNLOAD_BIG_FILE_NAME, ""); md5sum != testBigFileMd5 { + + t.Error("big file not equal", err) + } + } +} + +func checkFileExist(t *testing.T) { + var obj FileInfo + req := httplib.Post(endPoint + "/check_file_exist") + req.Param("md5", testBigFileMd5) + req.ToJSON(&obj) + if obj.Md5 != testBigFileMd5 { + t.Error("file not equal testBigFileMd5") + } + req = httplib.Get(endPoint + "/check_file_exist?md5=" + testSmallFileMd5) + req.ToJSON(&obj) + if obj.Md5 != testSmallFileMd5 { + t.Error("file not equal testSmallFileMd5") + } +} + +func Test_main(t *testing.T) { + + tests := []struct { + name string + }{ + {"main"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + testCommonMap(t) + + go main() + + time.Sleep(time.Second * 1) + testConfig(t) + + initFile(1024*testUtil.RandInt(100, 512), 1024*1024*testUtil.RandInt(2, 20)) + uploadContinueBig(t) + uploadContinueSmall(t) + initFile(1024*testUtil.RandInt(100, 512), 1024*1024*testUtil.RandInt(2, 20)) + uploadSmall(t) + uploadLarge(t) + checkFileExist(t) + testApis(t) + if endPoint != endPoint2 && endPoint2!="" { + endPoint = endPoint2 + fmt.Println("#######endPoint2######",endPoint2) + initFile(1024*testUtil.RandInt(100, 512), 1024*1024*testUtil.RandInt(2, 20)) + uploadContinueBig(t) + uploadContinueSmall(t) + initFile(1024*testUtil.RandInt(100, 512), 1024*1024*testUtil.RandInt(2, 20)) + uploadSmall(t) + uploadLarge(t) + checkFileExist(t) + testApis(t) + } + time.Sleep(time.Second * 2) + //testCommon(t) + }) + } +} diff --git a/gen_file.py b/gen_file.py new file mode 100644 index 00000000..7afe5e40 --- /dev/null +++ b/gen_file.py @@ -0,0 +1,9 @@ +# -*- coding: utf-8 -*- +import os +j=0 +for i in range(0,1000000): + if i%1000==0: + j=i + os.system('mkdir -p %s'%(i)) + with open('%s/%s.txt'%(j,i),'w+') as f: + f.write(str(i)) diff --git a/gofastdfs.service b/gofastdfs.service new file mode 100644 index 00000000..6375893c --- /dev/null +++ b/gofastdfs.service @@ -0,0 +1,15 @@ +[Unit] +Description=gofastdfs service +Wants=network.target + +[Service] +PIDFile=/home/gofastdfs/conf/app.pid +Environment="GO_FASTDFS_DIR=/home/gofastdfs" #/home/gofastdfs 修改成你的安装路径 +ExecStart=/home/gofastdfs/fileserver $GO_FASTDFS_DIR +ExecReload=/bin/kill -s HUP $MAINPID +ExecStop=/bin/kill -s QUIT $MAINPID +PrivateTmp=true +Restart=always + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/nginx/gofastdfs.conf b/nginx/gofastdfs.conf new file mode 100644 index 00000000..e6c30ff6 --- /dev/null +++ b/nginx/gofastdfs.conf @@ -0,0 +1,58 @@ +worker_processes 1; +events { + worker_connections 1024; +} +http { + include mime.types; + default_type application/html; + error_log logs/error_www.abc.com.log error; + sendfile on; + keepalive_timeout 65; + upstream go-fastdfs { + server 10.1.52.154:8080; + server 10.1.52.155:8080; + ip_hash; + } + server { + listen 80; + server_name localhost; + location / { + root html; + index index.html index.htm; + } + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root html; + } + location ~/group1 { + access_by_lua ' + -- 获取请求路径,不包括参数。例如:/group1/default/test.png + local uri = ngx.var.uri; + -- 获取请求参数 + local args = ngx.req.get_uri_args(); + -- 获取请求参数中时间戳信息,传入的是毫秒 + local ts = args["timestamp"]; + -- 获取请求参数中 token 信息 + local token1 = args["token"]; + -- 更新系统缓存时间戳 + ngx.update_time(); + -- 获取当前服务器系统时间,ngx.time() 获取的是秒 + local getTime = ngx.time() * 1000; + -- 计算时间差 + local diffTime = tonumber(ts) - getTime; + -- md5 加盐加密 + -- expire token有效期 + local expire=60*10*1000 + local token2 = ngx.md5("" .. "salt" .. tostring(ts)); + -- 判断时间是否有效 + if (tonumber(ts) > getTime-expire ) or (tonumber(ts) < getTime+expire ) then + -- 校验 token 是否相等 + if token1 == token2 then + -- 校验通过则转发请求 + ngx.exec("@gofastdfs"); + end + end + '; + } + } +} \ No newline at end of file diff --git a/static/report.html b/static/report.html new file mode 100644 index 00000000..bd5279c4 --- /dev/null +++ b/static/report.html @@ -0,0 +1,182 @@ + + + + + + go-fastdfs report + + + + + + + + +
+
+ + + + +
+ + + + \ No newline at end of file diff --git a/static/uppy.html b/static/uppy.html new file mode 100644 index 00000000..c7abc83e --- /dev/null +++ b/static/uppy.html @@ -0,0 +1,47 @@ + + + + + go-fastdfs + + + + +
标准上传(强列建议使用这种方式)
+
+ +
+ 文件(file): + + 场景(scene): + + 输出(output): + + 自定义路径(path): + + google认证码(code): + + 自定义认证(auth_token): + + +
+
+
断点续传(如果文件很大时可以考虑)
+
+ +
+ + +
+ + \ No newline at end of file diff --git a/vendor/github.com/astaxie/beego/LICENSE b/vendor/github.com/astaxie/beego/LICENSE new file mode 100644 index 00000000..5dbd4243 --- /dev/null +++ b/vendor/github.com/astaxie/beego/LICENSE @@ -0,0 +1,13 @@ +Copyright 2014 astaxie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/astaxie/beego/httplib/README.md b/vendor/github.com/astaxie/beego/httplib/README.md new file mode 100644 index 00000000..97df8e6b --- /dev/null +++ b/vendor/github.com/astaxie/beego/httplib/README.md @@ -0,0 +1,97 @@ +# httplib +httplib is an libs help you to curl remote url. + +# How to use? + +## GET +you can use Get to crawl data. + + import "github.com/astaxie/beego/httplib" + + str, err := httplib.Get("http://beego.me/").String() + if err != nil { + // error + } + fmt.Println(str) + +## POST +POST data to remote url + + req := httplib.Post("http://beego.me/") + req.Param("username","astaxie") + req.Param("password","123456") + str, err := req.String() + if err != nil { + // error + } + fmt.Println(str) + +## Set timeout + +The default timeout is `60` seconds, function prototype: + + SetTimeout(connectTimeout, readWriteTimeout time.Duration) + +Example: + + // GET + httplib.Get("http://beego.me/").SetTimeout(100 * time.Second, 30 * time.Second) + + // POST + httplib.Post("http://beego.me/").SetTimeout(100 * time.Second, 30 * time.Second) + + +## Debug + +If you want to debug the request info, set the debug on + + httplib.Get("http://beego.me/").Debug(true) + +## Set HTTP Basic Auth + + str, err := Get("http://beego.me/").SetBasicAuth("user", "passwd").String() + if err != nil { + // error + } + fmt.Println(str) + +## Set HTTPS + +If request url is https, You can set the client support TSL: + + httplib.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: true}) + +More info about the `tls.Config` please visit http://golang.org/pkg/crypto/tls/#Config + +## Set HTTP Version + +some servers need to specify the protocol version of HTTP + + httplib.Get("http://beego.me/").SetProtocolVersion("HTTP/1.1") + +## Set Cookie + +some http request need setcookie. So set it like this: + + cookie := &http.Cookie{} + cookie.Name = "username" + cookie.Value = "astaxie" + httplib.Get("http://beego.me/").SetCookie(cookie) + +## Upload file + +httplib support mutil file upload, use `req.PostFile()` + + req := httplib.Post("http://beego.me/") + req.Param("username","astaxie") + req.PostFile("uploadfile1", "httplib.pdf") + str, err := req.String() + if err != nil { + // error + } + fmt.Println(str) + + +See godoc for further documentation and examples. + +* [godoc.org/github.com/astaxie/beego/httplib](https://godoc.org/github.com/astaxie/beego/httplib) diff --git a/vendor/github.com/astaxie/beego/httplib/httplib.go b/vendor/github.com/astaxie/beego/httplib/httplib.go new file mode 100644 index 00000000..074cf661 --- /dev/null +++ b/vendor/github.com/astaxie/beego/httplib/httplib.go @@ -0,0 +1,624 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package httplib is used as http.Client +// Usage: +// +// import "github.com/astaxie/beego/httplib" +// +// b := httplib.Post("http://beego.me/") +// b.Param("username","astaxie") +// b.Param("password","123456") +// b.PostFile("uploadfile1", "httplib.pdf") +// b.PostFile("uploadfile2", "httplib.txt") +// str, err := b.String() +// if err != nil { +// t.Fatal(err) +// } +// fmt.Println(str) +// +// more docs http://beego.me/docs/module/httplib.md +package httplib + +import ( + "bytes" + "compress/gzip" + "crypto/tls" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "log" + "mime/multipart" + "net" + "net/http" + "net/http/cookiejar" + "net/http/httputil" + "net/url" + "os" + "strings" + "sync" + "time" + "gopkg.in/yaml.v2" +) + +var defaultSetting = BeegoHTTPSettings{ + UserAgent: "beegoServer", + ConnectTimeout: 60 * time.Second, + ReadWriteTimeout: 60 * time.Second, + Gzip: true, + DumpBody: true, +} + +var defaultCookieJar http.CookieJar +var settingMutex sync.Mutex + +// createDefaultCookie creates a global cookiejar to store cookies. +func createDefaultCookie() { + settingMutex.Lock() + defer settingMutex.Unlock() + defaultCookieJar, _ = cookiejar.New(nil) +} + +// SetDefaultSetting Overwrite default settings +func SetDefaultSetting(setting BeegoHTTPSettings) { + settingMutex.Lock() + defer settingMutex.Unlock() + defaultSetting = setting +} + +// NewBeegoRequest return *BeegoHttpRequest with specific method +func NewBeegoRequest(rawurl, method string) *BeegoHTTPRequest { + var resp http.Response + u, err := url.Parse(rawurl) + if err != nil { + log.Println("Httplib:", err) + } + req := http.Request{ + URL: u, + Method: method, + Header: make(http.Header), + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + } + return &BeegoHTTPRequest{ + url: rawurl, + req: &req, + params: map[string][]string{}, + files: map[string]string{}, + setting: defaultSetting, + resp: &resp, + } +} + +// Get returns *BeegoHttpRequest with GET method. +func Get(url string) *BeegoHTTPRequest { + return NewBeegoRequest(url, "GET") +} + +// Post returns *BeegoHttpRequest with POST method. +func Post(url string) *BeegoHTTPRequest { + return NewBeegoRequest(url, "POST") +} + +// Put returns *BeegoHttpRequest with PUT method. +func Put(url string) *BeegoHTTPRequest { + return NewBeegoRequest(url, "PUT") +} + +// Delete returns *BeegoHttpRequest DELETE method. +func Delete(url string) *BeegoHTTPRequest { + return NewBeegoRequest(url, "DELETE") +} + +// Head returns *BeegoHttpRequest with HEAD method. +func Head(url string) *BeegoHTTPRequest { + return NewBeegoRequest(url, "HEAD") +} + +// BeegoHTTPSettings is the http.Client setting +type BeegoHTTPSettings struct { + ShowDebug bool + UserAgent string + ConnectTimeout time.Duration + ReadWriteTimeout time.Duration + TLSClientConfig *tls.Config + Proxy func(*http.Request) (*url.URL, error) + Transport http.RoundTripper + CheckRedirect func(req *http.Request, via []*http.Request) error + EnableCookie bool + Gzip bool + DumpBody bool + Retries int // if set to -1 means will retry forever +} + +// BeegoHTTPRequest provides more useful methods for requesting one url than http.Request. +type BeegoHTTPRequest struct { + url string + req *http.Request + params map[string][]string + files map[string]string + setting BeegoHTTPSettings + resp *http.Response + body []byte + dump []byte +} + +// GetRequest return the request object +func (b *BeegoHTTPRequest) GetRequest() *http.Request { + return b.req +} + +// Setting Change request settings +func (b *BeegoHTTPRequest) Setting(setting BeegoHTTPSettings) *BeegoHTTPRequest { + b.setting = setting + return b +} + +// SetBasicAuth sets the request's Authorization header to use HTTP Basic Authentication with the provided username and password. +func (b *BeegoHTTPRequest) SetBasicAuth(username, password string) *BeegoHTTPRequest { + b.req.SetBasicAuth(username, password) + return b +} + +// SetEnableCookie sets enable/disable cookiejar +func (b *BeegoHTTPRequest) SetEnableCookie(enable bool) *BeegoHTTPRequest { + b.setting.EnableCookie = enable + return b +} + +// SetUserAgent sets User-Agent header field +func (b *BeegoHTTPRequest) SetUserAgent(useragent string) *BeegoHTTPRequest { + b.setting.UserAgent = useragent + return b +} + +// Debug sets show debug or not when executing request. +func (b *BeegoHTTPRequest) Debug(isdebug bool) *BeegoHTTPRequest { + b.setting.ShowDebug = isdebug + return b +} + +// Retries sets Retries times. +// default is 0 means no retried. +// -1 means retried forever. +// others means retried times. +func (b *BeegoHTTPRequest) Retries(times int) *BeegoHTTPRequest { + b.setting.Retries = times + return b +} + +// DumpBody setting whether need to Dump the Body. +func (b *BeegoHTTPRequest) DumpBody(isdump bool) *BeegoHTTPRequest { + b.setting.DumpBody = isdump + return b +} + +// DumpRequest return the DumpRequest +func (b *BeegoHTTPRequest) DumpRequest() []byte { + return b.dump +} + +// SetTimeout sets connect time out and read-write time out for BeegoRequest. +func (b *BeegoHTTPRequest) SetTimeout(connectTimeout, readWriteTimeout time.Duration) *BeegoHTTPRequest { + b.setting.ConnectTimeout = connectTimeout + b.setting.ReadWriteTimeout = readWriteTimeout + return b +} + +// SetTLSClientConfig sets tls connection configurations if visiting https url. +func (b *BeegoHTTPRequest) SetTLSClientConfig(config *tls.Config) *BeegoHTTPRequest { + b.setting.TLSClientConfig = config + return b +} + +// Header add header item string in request. +func (b *BeegoHTTPRequest) Header(key, value string) *BeegoHTTPRequest { + b.req.Header.Set(key, value) + return b +} + +// SetHost set the request host +func (b *BeegoHTTPRequest) SetHost(host string) *BeegoHTTPRequest { + b.req.Host = host + return b +} + +// SetProtocolVersion Set the protocol version for incoming requests. +// Client requests always use HTTP/1.1. +func (b *BeegoHTTPRequest) SetProtocolVersion(vers string) *BeegoHTTPRequest { + if len(vers) == 0 { + vers = "HTTP/1.1" + } + + major, minor, ok := http.ParseHTTPVersion(vers) + if ok { + b.req.Proto = vers + b.req.ProtoMajor = major + b.req.ProtoMinor = minor + } + + return b +} + +// SetCookie add cookie into request. +func (b *BeegoHTTPRequest) SetCookie(cookie *http.Cookie) *BeegoHTTPRequest { + b.req.Header.Add("Cookie", cookie.String()) + return b +} + +// SetTransport set the setting transport +func (b *BeegoHTTPRequest) SetTransport(transport http.RoundTripper) *BeegoHTTPRequest { + b.setting.Transport = transport + return b +} + +// SetProxy set the http proxy +// example: +// +// func(req *http.Request) (*url.URL, error) { +// u, _ := url.ParseRequestURI("http://127.0.0.1:8118") +// return u, nil +// } +func (b *BeegoHTTPRequest) SetProxy(proxy func(*http.Request) (*url.URL, error)) *BeegoHTTPRequest { + b.setting.Proxy = proxy + return b +} + +// SetCheckRedirect specifies the policy for handling redirects. +// +// If CheckRedirect is nil, the Client uses its default policy, +// which is to stop after 10 consecutive requests. +func (b *BeegoHTTPRequest) SetCheckRedirect(redirect func(req *http.Request, via []*http.Request) error) *BeegoHTTPRequest { + b.setting.CheckRedirect = redirect + return b +} + +// Param adds query param in to request. +// params build query string as ?key1=value1&key2=value2... +func (b *BeegoHTTPRequest) Param(key, value string) *BeegoHTTPRequest { + if param, ok := b.params[key]; ok { + b.params[key] = append(param, value) + } else { + b.params[key] = []string{value} + } + return b +} + +// PostFile add a post file to the request +func (b *BeegoHTTPRequest) PostFile(formname, filename string) *BeegoHTTPRequest { + b.files[formname] = filename + return b +} + +// Body adds request raw body. +// it supports string and []byte. +func (b *BeegoHTTPRequest) Body(data interface{}) *BeegoHTTPRequest { + switch t := data.(type) { + case string: + bf := bytes.NewBufferString(t) + b.req.Body = ioutil.NopCloser(bf) + b.req.ContentLength = int64(len(t)) + case []byte: + bf := bytes.NewBuffer(t) + b.req.Body = ioutil.NopCloser(bf) + b.req.ContentLength = int64(len(t)) + } + return b +} + +// XMLBody adds request raw body encoding by XML. +func (b *BeegoHTTPRequest) XMLBody(obj interface{}) (*BeegoHTTPRequest, error) { + if b.req.Body == nil && obj != nil { + byts, err := xml.Marshal(obj) + if err != nil { + return b, err + } + b.req.Body = ioutil.NopCloser(bytes.NewReader(byts)) + b.req.ContentLength = int64(len(byts)) + b.req.Header.Set("Content-Type", "application/xml") + } + return b, nil +} + +// YAMLBody adds request raw body encoding by YAML. +func (b *BeegoHTTPRequest) YAMLBody(obj interface{}) (*BeegoHTTPRequest, error) { + if b.req.Body == nil && obj != nil { + byts, err := yaml.Marshal(obj) + if err != nil { + return b, err + } + b.req.Body = ioutil.NopCloser(bytes.NewReader(byts)) + b.req.ContentLength = int64(len(byts)) + b.req.Header.Set("Content-Type", "application/x+yaml") + } + return b, nil +} + +// JSONBody adds request raw body encoding by JSON. +func (b *BeegoHTTPRequest) JSONBody(obj interface{}) (*BeegoHTTPRequest, error) { + if b.req.Body == nil && obj != nil { + byts, err := json.Marshal(obj) + if err != nil { + return b, err + } + b.req.Body = ioutil.NopCloser(bytes.NewReader(byts)) + b.req.ContentLength = int64(len(byts)) + b.req.Header.Set("Content-Type", "application/json") + } + return b, nil +} + +func (b *BeegoHTTPRequest) buildURL(paramBody string) { + // build GET url with query string + if b.req.Method == "GET" && len(paramBody) > 0 { + if strings.Contains(b.url, "?") { + b.url += "&" + paramBody + } else { + b.url = b.url + "?" + paramBody + } + return + } + + // build POST/PUT/PATCH url and body + if (b.req.Method == "POST" || b.req.Method == "PUT" || b.req.Method == "PATCH" || b.req.Method == "DELETE") && b.req.Body == nil { + // with files + if len(b.files) > 0 { + pr, pw := io.Pipe() + bodyWriter := multipart.NewWriter(pw) + go func() { + for formname, filename := range b.files { + fileWriter, err := bodyWriter.CreateFormFile(formname, filename) + if err != nil { + log.Println("Httplib:", err) + } + fh, err := os.Open(filename) + if err != nil { + log.Println("Httplib:", err) + } + //iocopy + _, err = io.Copy(fileWriter, fh) + fh.Close() + if err != nil { + log.Println("Httplib:", err) + } + } + for k, v := range b.params { + for _, vv := range v { + bodyWriter.WriteField(k, vv) + } + } + bodyWriter.Close() + pw.Close() + }() + b.Header("Content-Type", bodyWriter.FormDataContentType()) + b.req.Body = ioutil.NopCloser(pr) + return + } + + // with params + if len(paramBody) > 0 { + b.Header("Content-Type", "application/x-www-form-urlencoded") + b.Body(paramBody) + } + } +} + +func (b *BeegoHTTPRequest) getResponse() (*http.Response, error) { + if b.resp.StatusCode != 0 { + return b.resp, nil + } + resp, err := b.DoRequest() + if err != nil { + return nil, err + } + b.resp = resp + return resp, nil +} + +// DoRequest will do the client.Do +func (b *BeegoHTTPRequest) DoRequest() (resp *http.Response, err error) { + var paramBody string + if len(b.params) > 0 { + var buf bytes.Buffer + for k, v := range b.params { + for _, vv := range v { + buf.WriteString(url.QueryEscape(k)) + buf.WriteByte('=') + buf.WriteString(url.QueryEscape(vv)) + buf.WriteByte('&') + } + } + paramBody = buf.String() + paramBody = paramBody[0 : len(paramBody)-1] + } + + b.buildURL(paramBody) + urlParsed, err := url.Parse(b.url) + if err != nil { + return nil, err + } + + b.req.URL = urlParsed + + trans := b.setting.Transport + + if trans == nil { + // create default transport + trans = &http.Transport{ + TLSClientConfig: b.setting.TLSClientConfig, + Proxy: b.setting.Proxy, + Dial: TimeoutDialer(b.setting.ConnectTimeout, b.setting.ReadWriteTimeout), + MaxIdleConnsPerHost: 100, + } + } else { + // if b.transport is *http.Transport then set the settings. + if t, ok := trans.(*http.Transport); ok { + if t.TLSClientConfig == nil { + t.TLSClientConfig = b.setting.TLSClientConfig + } + if t.Proxy == nil { + t.Proxy = b.setting.Proxy + } + if t.Dial == nil { + t.Dial = TimeoutDialer(b.setting.ConnectTimeout, b.setting.ReadWriteTimeout) + } + } + } + + var jar http.CookieJar + if b.setting.EnableCookie { + if defaultCookieJar == nil { + createDefaultCookie() + } + jar = defaultCookieJar + } + + client := &http.Client{ + Transport: trans, + Jar: jar, + } + + if b.setting.UserAgent != "" && b.req.Header.Get("User-Agent") == "" { + b.req.Header.Set("User-Agent", b.setting.UserAgent) + } + + if b.setting.CheckRedirect != nil { + client.CheckRedirect = b.setting.CheckRedirect + } + + if b.setting.ShowDebug { + dump, err := httputil.DumpRequest(b.req, b.setting.DumpBody) + if err != nil { + log.Println(err.Error()) + } + b.dump = dump + } + // retries default value is 0, it will run once. + // retries equal to -1, it will run forever until success + // retries is setted, it will retries fixed times. + for i := 0; b.setting.Retries == -1 || i <= b.setting.Retries; i++ { + resp, err = client.Do(b.req) + if err == nil { + break + } + } + return resp, err +} + +// String returns the body string in response. +// it calls Response inner. +func (b *BeegoHTTPRequest) String() (string, error) { + data, err := b.Bytes() + if err != nil { + return "", err + } + + return string(data), nil +} + +// Bytes returns the body []byte in response. +// it calls Response inner. +func (b *BeegoHTTPRequest) Bytes() ([]byte, error) { + if b.body != nil { + return b.body, nil + } + resp, err := b.getResponse() + if err != nil { + return nil, err + } + if resp.Body == nil { + return nil, nil + } + defer resp.Body.Close() + if b.setting.Gzip && resp.Header.Get("Content-Encoding") == "gzip" { + reader, err := gzip.NewReader(resp.Body) + if err != nil { + return nil, err + } + b.body, err = ioutil.ReadAll(reader) + return b.body, err + } + b.body, err = ioutil.ReadAll(resp.Body) + return b.body, err +} + +// ToFile saves the body data in response to one file. +// it calls Response inner. +func (b *BeegoHTTPRequest) ToFile(filename string) error { + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + + resp, err := b.getResponse() + if err != nil { + return err + } + if resp.Body == nil { + return nil + } + defer resp.Body.Close() + _, err = io.Copy(f, resp.Body) + return err +} + +// ToJSON returns the map that marshals from the body bytes as json in response . +// it calls Response inner. +func (b *BeegoHTTPRequest) ToJSON(v interface{}) error { + data, err := b.Bytes() + if err != nil { + return err + } + return json.Unmarshal(data, v) +} + +// ToXML returns the map that marshals from the body bytes as xml in response . +// it calls Response inner. +func (b *BeegoHTTPRequest) ToXML(v interface{}) error { + data, err := b.Bytes() + if err != nil { + return err + } + return xml.Unmarshal(data, v) +} + +// ToYAML returns the map that marshals from the body bytes as yaml in response . +// it calls Response inner. +func (b *BeegoHTTPRequest) ToYAML(v interface{}) error { + data, err := b.Bytes() + if err != nil { + return err + } + return yaml.Unmarshal(data, v) +} + +// Response executes request client gets response mannually. +func (b *BeegoHTTPRequest) Response() (*http.Response, error) { + return b.getResponse() +} + +// TimeoutDialer returns functions of connection dialer with timeout settings for http.Transport Dial field. +func TimeoutDialer(cTimeout time.Duration, rwTimeout time.Duration) func(net, addr string) (c net.Conn, err error) { + return func(netw, addr string) (net.Conn, error) { + conn, err := net.DialTimeout(netw, addr, cTimeout) + if err != nil { + return nil, err + } + err = conn.SetDeadline(time.Now().Add(rwTimeout)) + return conn, err + } +} diff --git a/vendor/github.com/bmizerany/pat/README.md b/vendor/github.com/bmizerany/pat/README.md new file mode 100644 index 00000000..2bb12fa6 --- /dev/null +++ b/vendor/github.com/bmizerany/pat/README.md @@ -0,0 +1,82 @@ +# pat (formerly pat.go) - A Sinatra style pattern muxer for Go's net/http library + +[![GoDoc](https://godoc.org/github.com/bmizerany/pat?status.svg)](https://godoc.org/github.com/bmizerany/pat) + +## INSTALL + + $ go get github.com/bmizerany/pat + +## USE + +```go +package main + +import ( + "io" + "net/http" + "github.com/bmizerany/pat" + "log" +) + +// hello world, the web server +func HelloServer(w http.ResponseWriter, req *http.Request) { + io.WriteString(w, "hello, "+req.URL.Query().Get(":name")+"!\n") +} + +func main() { + m := pat.New() + m.Get("/hello/:name", http.HandlerFunc(HelloServer)) + + // Register this pat with the default serve mux so that other packages + // may also be exported. (i.e. /debug/pprof/*) + http.Handle("/", m) + err := http.ListenAndServe(":12345", nil) + if err != nil { + log.Fatal("ListenAndServe: ", err) + } +} +``` + +It's that simple. + +For more information, see: +http://godoc.org/github.com/bmizerany/pat + +## CONTRIBUTORS + +* Alexis Svinartchouk (@zvin) +* Blake Mizerany (@bmizerany) +* Brian Ketelsen (@bketelsen) +* Bryan Matsuo (@bmatsuo) +* Caleb Spare (@cespare) +* Evan Shaw (@edsrzf) +* Gary Burd (@garyburd) +* George Rogers (@georgerogers42) +* Keith Rarick (@kr) +* Matt Williams (@mattyw) +* Mike Stipicevic (@wickedchicken) +* Nick Saika (@nesv) +* Timothy Cyrus (@tcyrus) +* binqin (@binku87) + +## LICENSE + +Copyright (C) 2012 by Keith Rarick, Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/bmizerany/pat/mux.go b/vendor/github.com/bmizerany/pat/mux.go new file mode 100644 index 00000000..3009e979 --- /dev/null +++ b/vendor/github.com/bmizerany/pat/mux.go @@ -0,0 +1,314 @@ +// Package pat implements a simple URL pattern muxer +package pat + +import ( + "net/http" + "net/url" + "strings" +) + +// PatternServeMux is an HTTP request multiplexer. It matches the URL of each +// incoming request against a list of registered patterns with their associated +// methods and calls the handler for the pattern that most closely matches the +// URL. +// +// Pattern matching attempts each pattern in the order in which they were +// registered. +// +// Patterns may contain literals or captures. Capture names start with a colon +// and consist of letters A-Z, a-z, _, and 0-9. The rest of the pattern +// matches literally. The portion of the URL matching each name ends with an +// occurrence of the character in the pattern immediately following the name, +// or a /, whichever comes first. It is possible for a name to match the empty +// string. +// +// Example pattern with one capture: +// /hello/:name +// Will match: +// /hello/blake +// /hello/keith +// Will not match: +// /hello/blake/ +// /hello/blake/foo +// /foo +// /foo/bar +// +// Example 2: +// /hello/:name/ +// Will match: +// /hello/blake/ +// /hello/keith/foo +// /hello/blake +// /hello/keith +// Will not match: +// /foo +// /foo/bar +// +// A pattern ending with a slash will add an implicit redirect for its non-slash +// version. For example: Get("/foo/", handler) also registers +// Get("/foo", handler) as a redirect. You may override it by registering +// Get("/foo", anotherhandler) before the slash version. +// +// Retrieve the capture from the r.URL.Query().Get(":name") in a handler (note +// the colon). If a capture name appears more than once, the additional values +// are appended to the previous values (see +// http://golang.org/pkg/net/url/#Values) +// +// A trivial example server is: +// +// package main +// +// import ( +// "io" +// "net/http" +// "github.com/bmizerany/pat" +// "log" +// ) +// +// // hello world, the web server +// func HelloServer(w http.ResponseWriter, req *http.Request) { +// io.WriteString(w, "hello, "+req.URL.Query().Get(":name")+"!\n") +// } +// +// func main() { +// m := pat.New() +// m.Get("/hello/:name", http.HandlerFunc(HelloServer)) +// +// // Register this pat with the default serve mux so that other packages +// // may also be exported. (i.e. /debug/pprof/*) +// http.Handle("/", m) +// err := http.ListenAndServe(":12345", nil) +// if err != nil { +// log.Fatal("ListenAndServe: ", err) +// } +// } +// +// When "Method Not Allowed": +// +// Pat knows what methods are allowed given a pattern and a URI. For +// convenience, PatternServeMux will add the Allow header for requests that +// match a pattern for a method other than the method requested and set the +// Status to "405 Method Not Allowed". +// +// If the NotFound handler is set, then it is used whenever the pattern doesn't +// match the request path for the current method (and the Allow header is not +// altered). +type PatternServeMux struct { + // NotFound, if set, is used whenever the request doesn't match any + // pattern for its method. NotFound should be set before serving any + // requests. + NotFound http.Handler + handlers map[string][]*patHandler +} + +// New returns a new PatternServeMux. +func New() *PatternServeMux { + return &PatternServeMux{handlers: make(map[string][]*patHandler)} +} + +// ServeHTTP matches r.URL.Path against its routing table using the rules +// described above. +func (p *PatternServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + for _, ph := range p.handlers[r.Method] { + if params, ok := ph.try(r.URL.EscapedPath()); ok { + if len(params) > 0 && !ph.redirect { + r.URL.RawQuery = url.Values(params).Encode() + "&" + r.URL.RawQuery + } + ph.ServeHTTP(w, r) + return + } + } + + if p.NotFound != nil { + p.NotFound.ServeHTTP(w, r) + return + } + + allowed := make([]string, 0, len(p.handlers)) + for meth, handlers := range p.handlers { + if meth == r.Method { + continue + } + + for _, ph := range handlers { + if _, ok := ph.try(r.URL.EscapedPath()); ok { + allowed = append(allowed, meth) + } + } + } + + if len(allowed) == 0 { + http.NotFound(w, r) + return + } + + w.Header().Add("Allow", strings.Join(allowed, ", ")) + http.Error(w, "Method Not Allowed", 405) +} + +// Head will register a pattern with a handler for HEAD requests. +func (p *PatternServeMux) Head(pat string, h http.Handler) { + p.Add("HEAD", pat, h) +} + +// Get will register a pattern with a handler for GET requests. +// It also registers pat for HEAD requests. If this needs to be overridden, use +// Head before Get with pat. +func (p *PatternServeMux) Get(pat string, h http.Handler) { + p.Add("HEAD", pat, h) + p.Add("GET", pat, h) +} + +// Post will register a pattern with a handler for POST requests. +func (p *PatternServeMux) Post(pat string, h http.Handler) { + p.Add("POST", pat, h) +} + +// Put will register a pattern with a handler for PUT requests. +func (p *PatternServeMux) Put(pat string, h http.Handler) { + p.Add("PUT", pat, h) +} + +// Del will register a pattern with a handler for DELETE requests. +func (p *PatternServeMux) Del(pat string, h http.Handler) { + p.Add("DELETE", pat, h) +} + +// Options will register a pattern with a handler for OPTIONS requests. +func (p *PatternServeMux) Options(pat string, h http.Handler) { + p.Add("OPTIONS", pat, h) +} + +// Patch will register a pattern with a handler for PATCH requests. +func (p *PatternServeMux) Patch(pat string, h http.Handler) { + p.Add("PATCH", pat, h) +} + +// Add will register a pattern with a handler for meth requests. +func (p *PatternServeMux) Add(meth, pat string, h http.Handler) { + p.add(meth, pat, h, false) +} + +func (p *PatternServeMux) add(meth, pat string, h http.Handler, redirect bool) { + handlers := p.handlers[meth] + for _, p1 := range handlers { + if p1.pat == pat { + return // found existing pattern; do nothing + } + } + handler := &patHandler{ + pat: pat, + Handler: h, + redirect: redirect, + } + p.handlers[meth] = append(handlers, handler) + + n := len(pat) + if n > 0 && pat[n-1] == '/' { + p.add(meth, pat[:n-1], http.HandlerFunc(addSlashRedirect), true) + } +} + +func addSlashRedirect(w http.ResponseWriter, r *http.Request) { + u := *r.URL + u.Path += "/" + http.Redirect(w, r, u.String(), http.StatusMovedPermanently) +} + +// Tail returns the trailing string in path after the final slash for a pat ending with a slash. +// +// Examples: +// +// Tail("/hello/:title/", "/hello/mr/mizerany") == "mizerany" +// Tail("/:a/", "/x/y/z") == "y/z" +// +func Tail(pat, path string) string { + var i, j int + for i < len(path) { + switch { + case j >= len(pat): + if pat[len(pat)-1] == '/' { + return path[i:] + } + return "" + case pat[j] == ':': + var nextc byte + _, nextc, j = match(pat, isAlnum, j+1) + _, _, i = match(path, matchPart(nextc), i) + case path[i] == pat[j]: + i++ + j++ + default: + return "" + } + } + return "" +} + +type patHandler struct { + pat string + http.Handler + redirect bool +} + +func (ph *patHandler) try(path string) (url.Values, bool) { + p := make(url.Values) + var i, j int + for i < len(path) { + switch { + case j >= len(ph.pat): + if ph.pat != "/" && len(ph.pat) > 0 && ph.pat[len(ph.pat)-1] == '/' { + return p, true + } + return nil, false + case ph.pat[j] == ':': + var name, val string + var nextc byte + name, nextc, j = match(ph.pat, isAlnum, j+1) + val, _, i = match(path, matchPart(nextc), i) + escval, err := url.QueryUnescape(val) + if err != nil { + return nil, false + } + p.Add(":"+name, escval) + case path[i] == ph.pat[j]: + i++ + j++ + default: + return nil, false + } + } + if j != len(ph.pat) { + return nil, false + } + return p, true +} + +func matchPart(b byte) func(byte) bool { + return func(c byte) bool { + return c != b && c != '/' + } +} + +func match(s string, f func(byte) bool, i int) (matched string, next byte, j int) { + j = i + for j < len(s) && f(s[j]) { + j++ + } + if j < len(s) { + next = s[j] + } + return s[i:j], next, j +} + +func isAlpha(ch byte) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' +} + +func isDigit(ch byte) bool { + return '0' <= ch && ch <= '9' +} + +func isAlnum(ch byte) bool { + return isAlpha(ch) || isDigit(ch) +} diff --git a/vendor/github.com/cihub/seelog/LICENSE.txt b/vendor/github.com/cihub/seelog/LICENSE.txt new file mode 100644 index 00000000..bd5611d9 --- /dev/null +++ b/vendor/github.com/cihub/seelog/LICENSE.txt @@ -0,0 +1,24 @@ +Copyright (c) 2012, Cloud Instruments Co., Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the Cloud Instruments Co., Ltd. nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/cihub/seelog/archive/archive.go b/vendor/github.com/cihub/seelog/archive/archive.go new file mode 100644 index 00000000..923036f2 --- /dev/null +++ b/vendor/github.com/cihub/seelog/archive/archive.go @@ -0,0 +1,198 @@ +package archive + +import ( + "archive/tar" + "archive/zip" + "fmt" + "io" + "io/ioutil" + "os" + "time" + + "github.com/cihub/seelog/archive/gzip" +) + +// Reader is the interface for reading files from an archive. +type Reader interface { + NextFile() (name string, err error) + io.Reader +} + +// ReadCloser is the interface that groups Reader with the Close method. +type ReadCloser interface { + Reader + io.Closer +} + +// Writer is the interface for writing files to an archived format. +type Writer interface { + NextFile(name string, fi os.FileInfo) error + io.Writer +} + +// WriteCloser is the interface that groups Writer with the Close method. +type WriteCloser interface { + Writer + io.Closer +} + +type nopCloser struct{ Reader } + +func (nopCloser) Close() error { return nil } + +// NopCloser returns a ReadCloser with a no-op Close method wrapping the +// provided Reader r. +func NopCloser(r Reader) ReadCloser { + return nopCloser{r} +} + +// Copy copies from src to dest until either EOF is reached on src or an error +// occurs. +// +// When the archive format of src matches that of dst, Copy streams the files +// directly into dst. Otherwise, copy buffers the contents to disk to compute +// headers before writing to dst. +func Copy(dst Writer, src Reader) error { + switch src := src.(type) { + case tarReader: + if dst, ok := dst.(tarWriter); ok { + return copyTar(dst, src) + } + case zipReader: + if dst, ok := dst.(zipWriter); ok { + return copyZip(dst, src) + } + // Switch on concrete type because gzip has no special methods + case *gzip.Reader: + if dst, ok := dst.(*gzip.Writer); ok { + _, err := io.Copy(dst, src) + return err + } + } + + return copyBuffer(dst, src) +} + +func copyBuffer(dst Writer, src Reader) (err error) { + const defaultFileMode = 0666 + + buf, err := ioutil.TempFile("", "archive_copy_buffer") + if err != nil { + return err + } + defer os.Remove(buf.Name()) // Do not care about failure removing temp + defer buf.Close() // Do not care about failure closing temp + for { + // Handle the next file + name, err := src.NextFile() + switch err { + case io.EOF: // Done copying + return nil + default: // Failed to write: bail out + return err + case nil: // Proceed below + } + + // Buffer the file + if _, err := io.Copy(buf, src); err != nil { + return fmt.Errorf("buffer to disk: %v", err) + } + + // Seek to the start of the file for full file copy + if _, err := buf.Seek(0, os.SEEK_SET); err != nil { + return err + } + + // Set desired file permissions + if err := os.Chmod(buf.Name(), defaultFileMode); err != nil { + return err + } + fi, err := buf.Stat() + if err != nil { + return err + } + + // Write the buffered file + if err := dst.NextFile(name, fi); err != nil { + return err + } + if _, err := io.Copy(dst, buf); err != nil { + return fmt.Errorf("copy to dst: %v", err) + } + if err := buf.Truncate(0); err != nil { + return err + } + if _, err := buf.Seek(0, os.SEEK_SET); err != nil { + return err + } + } +} + +type tarReader interface { + Next() (*tar.Header, error) + io.Reader +} + +type tarWriter interface { + WriteHeader(hdr *tar.Header) error + io.Writer +} + +type zipReader interface { + Files() []*zip.File +} + +type zipWriter interface { + CreateHeader(fh *zip.FileHeader) (io.Writer, error) +} + +func copyTar(w tarWriter, r tarReader) error { + for { + hdr, err := r.Next() + switch err { + case io.EOF: + return nil + default: // Handle error + return err + case nil: // Proceed below + } + + info := hdr.FileInfo() + // Skip directories + if info.IsDir() { + continue + } + if err := w.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(w, r); err != nil { + return err + } + } +} + +func copyZip(zw zipWriter, r zipReader) error { + for _, f := range r.Files() { + if err := copyZipFile(zw, f); err != nil { + return err + } + } + return nil +} + +func copyZipFile(zw zipWriter, f *zip.File) error { + rc, err := f.Open() + if err != nil { + return err + } + defer rc.Close() // Read-only + + hdr := f.FileHeader + hdr.SetModTime(time.Now()) + w, err := zw.CreateHeader(&hdr) + if err != nil { + return err + } + _, err = io.Copy(w, rc) + return err +} diff --git a/vendor/github.com/cihub/seelog/archive/gzip/gzip.go b/vendor/github.com/cihub/seelog/archive/gzip/gzip.go new file mode 100644 index 00000000..ea121018 --- /dev/null +++ b/vendor/github.com/cihub/seelog/archive/gzip/gzip.go @@ -0,0 +1,64 @@ +// Package gzip implements reading and writing of gzip format compressed files. +// See the compress/gzip package for more details. +package gzip + +import ( + "compress/gzip" + "fmt" + "io" + "os" +) + +// Reader is an io.Reader that can be read to retrieve uncompressed data from a +// gzip-format compressed file. +type Reader struct { + gzip.Reader + name string + isEOF bool +} + +// NewReader creates a new Reader reading the given reader. +func NewReader(r io.Reader, name string) (*Reader, error) { + gr, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + return &Reader{ + Reader: *gr, + name: name, + }, nil +} + +// NextFile returns the file name. Calls subsequent to the first call will +// return EOF. +func (r *Reader) NextFile() (name string, err error) { + if r.isEOF { + return "", io.EOF + } + + r.isEOF = true + return r.name, nil +} + +// Writer is an io.WriteCloser. Writes to a Writer are compressed and written to w. +type Writer struct { + gzip.Writer + name string + noMoreFiles bool +} + +// NextFile never returns a next file, and should not be called more than once. +func (w *Writer) NextFile(name string, _ os.FileInfo) error { + if w.noMoreFiles { + return fmt.Errorf("gzip: only accepts one file: already received %q and now %q", w.name, name) + } + w.noMoreFiles = true + w.name = name + return nil +} + +// NewWriter returns a new Writer. Writes to the returned writer are compressed +// and written to w. +func NewWriter(w io.Writer) *Writer { + return &Writer{Writer: *gzip.NewWriter(w)} +} diff --git a/vendor/github.com/cihub/seelog/archive/tar/tar.go b/vendor/github.com/cihub/seelog/archive/tar/tar.go new file mode 100644 index 00000000..8dd87f57 --- /dev/null +++ b/vendor/github.com/cihub/seelog/archive/tar/tar.go @@ -0,0 +1,72 @@ +package tar + +import ( + "archive/tar" + "io" + "os" +) + +// Reader provides sequential access to the contents of a tar archive. +type Reader struct { + tar.Reader +} + +// NewReader creates a new Reader reading from r. +func NewReader(r io.Reader) *Reader { + return &Reader{Reader: *tar.NewReader(r)} +} + +// NextFile advances to the next file in the tar archive. +func (r *Reader) NextFile() (name string, err error) { + hdr, err := r.Next() + if err != nil { + return "", err + } + return hdr.Name, nil +} + +// Writer provides sequential writing of a tar archive in POSIX.1 format. +type Writer struct { + tar.Writer + closers []io.Closer +} + +// NewWriter creates a new Writer writing to w. +func NewWriter(w io.Writer) *Writer { + return &Writer{Writer: *tar.NewWriter(w)} +} + +// NewWriteMultiCloser creates a new Writer writing to w that also closes all +// closers in order on close. +func NewWriteMultiCloser(w io.WriteCloser, closers ...io.Closer) *Writer { + return &Writer{ + Writer: *tar.NewWriter(w), + closers: closers, + } +} + +// NextFile computes and writes a header and prepares to accept the file's +// contents. +func (w *Writer) NextFile(name string, fi os.FileInfo) error { + if name == "" { + name = fi.Name() + } + hdr, err := tar.FileInfoHeader(fi, name) + if err != nil { + return err + } + hdr.Name = name + return w.WriteHeader(hdr) +} + +// Close closes the tar archive and all other closers, flushing any unwritten +// data to the underlying writer. +func (w *Writer) Close() error { + err := w.Writer.Close() + for _, c := range w.closers { + if cerr := c.Close(); cerr != nil && err == nil { + err = cerr + } + } + return err +} diff --git a/vendor/github.com/cihub/seelog/archive/zip/zip.go b/vendor/github.com/cihub/seelog/archive/zip/zip.go new file mode 100644 index 00000000..4210b03b --- /dev/null +++ b/vendor/github.com/cihub/seelog/archive/zip/zip.go @@ -0,0 +1,89 @@ +package zip + +import ( + "archive/zip" + "io" + "os" +) + +// Reader provides sequential access to the contents of a zip archive. +type Reader struct { + zip.Reader + unread []*zip.File + rc io.ReadCloser +} + +// NewReader returns a new Reader reading from r, which is assumed to have the +// given size in bytes. +func NewReader(r io.ReaderAt, size int64) (*Reader, error) { + zr, err := zip.NewReader(r, size) + if err != nil { + return nil, err + } + return &Reader{Reader: *zr}, nil +} + +// NextFile advances to the next file in the zip archive. +func (r *Reader) NextFile() (name string, err error) { + // Initialize unread + if r.unread == nil { + r.unread = r.Files()[:] + } + + // Close previous file + if r.rc != nil { + r.rc.Close() // Read-only + } + + if len(r.unread) == 0 { + return "", io.EOF + } + + // Open and return next unread + f := r.unread[0] + name, r.unread = f.Name, r.unread[1:] + r.rc, err = f.Open() + if err != nil { + return "", err + } + return name, nil +} + +func (r *Reader) Read(p []byte) (n int, err error) { + return r.rc.Read(p) +} + +// Files returns the full list of files in the zip archive. +func (r *Reader) Files() []*zip.File { + return r.File +} + +// Writer provides sequential writing of a zip archive.1 format. +type Writer struct { + zip.Writer + w io.Writer +} + +// NewWriter returns a new Writer writing to w. +func NewWriter(w io.Writer) *Writer { + return &Writer{Writer: *zip.NewWriter(w)} +} + +// NextFile computes and writes a header and prepares to accept the file's +// contents. +func (w *Writer) NextFile(name string, fi os.FileInfo) error { + if name == "" { + name = fi.Name() + } + hdr, err := zip.FileInfoHeader(fi) + if err != nil { + return err + } + hdr.Name = name + w.w, err = w.CreateHeader(hdr) + return err +} + +func (w *Writer) Write(p []byte) (n int, err error) { + return w.w.Write(p) +} diff --git a/vendor/github.com/deckarep/golang-set/LICENSE b/vendor/github.com/deckarep/golang-set/LICENSE new file mode 100644 index 00000000..b5768f89 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/LICENSE @@ -0,0 +1,22 @@ +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/deckarep/golang-set/README.md b/vendor/github.com/deckarep/golang-set/README.md new file mode 100644 index 00000000..c3b50b2c --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/README.md @@ -0,0 +1,95 @@ +[![Build Status](https://travis-ci.org/deckarep/golang-set.svg?branch=master)](https://travis-ci.org/deckarep/golang-set) +[![Go Report Card](https://goreportcard.com/badge/github.com/deckarep/golang-set)](https://goreportcard.com/report/github.com/deckarep/golang-set) +[![GoDoc](https://godoc.org/github.com/deckarep/golang-set?status.svg)](http://godoc.org/github.com/deckarep/golang-set) + +## golang-set + + +The missing set collection for the Go language. Until Go has sets built-in...use this. + +Coming from Python one of the things I miss is the superbly wonderful set collection. This is my attempt to mimic the primary features of the set from Python. +You can of course argue that there is no need for a set in Go, otherwise the creators would have added one to the standard library. To those I say simply ignore this repository +and carry-on and to the rest that find this useful please contribute in helping me make it better by: + +* Helping to make more idiomatic improvements to the code. +* Helping to increase the performance of it. ~~(So far, no attempt has been made, but since it uses a map internally, I expect it to be mostly performant.)~~ +* Helping to make the unit-tests more robust and kick-ass. +* Helping to fill in the [documentation.](http://godoc.org/github.com/deckarep/golang-set) +* Simply offering feedback and suggestions. (Positive, constructive feedback is appreciated.) + +I have to give some credit for helping seed the idea with this post on [stackoverflow.](http://programmers.stackexchange.com/questions/177428/sets-data-structure-in-golang) + +*Update* - as of 3/9/2014, you can use a compile-time generic version of this package in the [gen](http://clipperhouse.github.io/gen/) framework. This framework allows you to use the golang-set in a completely generic and type-safe way by allowing you to generate a supporting .go file based on your custom types. + +## Features (as of 9/22/2014) + +* a CartesianProduct() method has been added with unit-tests: [Read more about the cartesian product](http://en.wikipedia.org/wiki/Cartesian_product) + +## Features (as of 9/15/2014) + +* a PowerSet() method has been added with unit-tests: [Read more about the Power set](http://en.wikipedia.org/wiki/Power_set) + +## Features (as of 4/22/2014) + +* One common interface to both implementations +* Two set implementations to choose from + * a thread-safe implementation designed for concurrent use + * a non-thread-safe implementation designed for performance +* 75 benchmarks for both implementations +* 35 unit tests for both implementations +* 14 concurrent tests for the thread-safe implementation + + + +Please see the unit test file for additional usage examples. The Python set documentation will also do a better job than I can of explaining how a set typically [works.](http://docs.python.org/2/library/sets.html) Please keep in mind +however that the Python set is a built-in type and supports additional features and syntax that make it awesome. + +## Examples but not exhaustive: + +```go +requiredClasses := mapset.NewSet() +requiredClasses.Add("Cooking") +requiredClasses.Add("English") +requiredClasses.Add("Math") +requiredClasses.Add("Biology") + +scienceSlice := []interface{}{"Biology", "Chemistry"} +scienceClasses := mapset.NewSetFromSlice(scienceSlice) + +electiveClasses := mapset.NewSet() +electiveClasses.Add("Welding") +electiveClasses.Add("Music") +electiveClasses.Add("Automotive") + +bonusClasses := mapset.NewSet() +bonusClasses.Add("Go Programming") +bonusClasses.Add("Python Programming") + +//Show me all the available classes I can take +allClasses := requiredClasses.Union(scienceClasses).Union(electiveClasses).Union(bonusClasses) +fmt.Println(allClasses) //Set{Cooking, English, Math, Chemistry, Welding, Biology, Music, Automotive, Go Programming, Python Programming} + + +//Is cooking considered a science class? +fmt.Println(scienceClasses.Contains("Cooking")) //false + +//Show me all classes that are not science classes, since I hate science. +fmt.Println(allClasses.Difference(scienceClasses)) //Set{Music, Automotive, Go Programming, Python Programming, Cooking, English, Math, Welding} + +//Which science classes are also required classes? +fmt.Println(scienceClasses.Intersect(requiredClasses)) //Set{Biology} + +//How many bonus classes do you offer? +fmt.Println(bonusClasses.Cardinality()) //2 + +//Do you have the following classes? Welding, Automotive and English? +fmt.Println(allClasses.IsSuperset(mapset.NewSetFromSlice([]interface{}{"Welding", "Automotive", "English"}))) //true +``` + +Thanks! + +-Ralph + +[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/deckarep/golang-set/trend.png)](https://bitdeli.com/free "Bitdeli Badge") + +[![Analytics](https://ga-beacon.appspot.com/UA-42584447-2/deckarep/golang-set)](https://github.com/igrigorik/ga-beacon) diff --git a/vendor/github.com/deckarep/golang-set/iterator.go b/vendor/github.com/deckarep/golang-set/iterator.go new file mode 100644 index 00000000..9dfecade --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/iterator.go @@ -0,0 +1,58 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package mapset + +// Iterator defines an iterator over a Set, its C channel can be used to range over the Set's +// elements. +type Iterator struct { + C <-chan interface{} + stop chan struct{} +} + +// Stop stops the Iterator, no further elements will be received on C, C will be closed. +func (i *Iterator) Stop() { + // Allows for Stop() to be called multiple times + // (close() panics when called on already closed channel) + defer func() { + recover() + }() + + close(i.stop) + + // Exhaust any remaining elements. + for range i.C { + } +} + +// newIterator returns a new Iterator instance together with its item and stop channels. +func newIterator() (*Iterator, chan<- interface{}, <-chan struct{}) { + itemChan := make(chan interface{}) + stopChan := make(chan struct{}) + return &Iterator{ + C: itemChan, + stop: stopChan, + }, itemChan, stopChan +} diff --git a/vendor/github.com/deckarep/golang-set/set.go b/vendor/github.com/deckarep/golang-set/set.go new file mode 100644 index 00000000..7411982a --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/set.go @@ -0,0 +1,214 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Package mapset implements a simple and generic set collection. +// Items stored within it are unordered and unique. It supports +// typical set operations: membership testing, intersection, union, +// difference, symmetric difference and cloning. +// +// Package mapset provides two implementations of the Set +// interface. The default implementation is safe for concurrent +// access, but a non-thread-safe implementation is also provided for +// programs that can benefit from the slight speed improvement and +// that can enforce mutual exclusion through other means. +package mapset + +// Set is the primary interface provided by the mapset package. It +// represents an unordered set of data and a large number of +// operations that can be applied to that set. +type Set interface { + // Adds an element to the set. Returns whether + // the item was added. + Add(i interface{}) bool + + // Returns the number of elements in the set. + Cardinality() int + + // Removes all elements from the set, leaving + // the empty set. + Clear() + + // Returns a clone of the set using the same + // implementation, duplicating all keys. + Clone() Set + + // Returns whether the given items + // are all in the set. + Contains(i ...interface{}) bool + + // Returns the difference between this set + // and other. The returned set will contain + // all elements of this set that are not also + // elements of other. + // + // Note that the argument to Difference + // must be of the same type as the receiver + // of the method. Otherwise, Difference will + // panic. + Difference(other Set) Set + + // Determines if two sets are equal to each + // other. If they have the same cardinality + // and contain the same elements, they are + // considered equal. The order in which + // the elements were added is irrelevant. + // + // Note that the argument to Equal must be + // of the same type as the receiver of the + // method. Otherwise, Equal will panic. + Equal(other Set) bool + + // Returns a new set containing only the elements + // that exist only in both sets. + // + // Note that the argument to Intersect + // must be of the same type as the receiver + // of the method. Otherwise, Intersect will + // panic. + Intersect(other Set) Set + + // Determines if every element in this set is in + // the other set but the two sets are not equal. + // + // Note that the argument to IsProperSubset + // must be of the same type as the receiver + // of the method. Otherwise, IsProperSubset + // will panic. + IsProperSubset(other Set) bool + + // Determines if every element in the other set + // is in this set but the two sets are not + // equal. + // + // Note that the argument to IsSuperset + // must be of the same type as the receiver + // of the method. Otherwise, IsSuperset will + // panic. + IsProperSuperset(other Set) bool + + // Determines if every element in this set is in + // the other set. + // + // Note that the argument to IsSubset + // must be of the same type as the receiver + // of the method. Otherwise, IsSubset will + // panic. + IsSubset(other Set) bool + + // Determines if every element in the other set + // is in this set. + // + // Note that the argument to IsSuperset + // must be of the same type as the receiver + // of the method. Otherwise, IsSuperset will + // panic. + IsSuperset(other Set) bool + + // Iterates over elements and executes the passed func against each element. + // If passed func returns true, stop iteration at the time. + Each(func(interface{}) bool) + + // Returns a channel of elements that you can + // range over. + Iter() <-chan interface{} + + // Returns an Iterator object that you can + // use to range over the set. + Iterator() *Iterator + + // Remove a single element from the set. + Remove(i interface{}) + + // Provides a convenient string representation + // of the current state of the set. + String() string + + // Returns a new set with all elements which are + // in either this set or the other set but not in both. + // + // Note that the argument to SymmetricDifference + // must be of the same type as the receiver + // of the method. Otherwise, SymmetricDifference + // will panic. + SymmetricDifference(other Set) Set + + // Returns a new set with all elements in both sets. + // + // Note that the argument to Union must be of the + + // same type as the receiver of the method. + // Otherwise, IsSuperset will panic. + Union(other Set) Set + + // Returns all subsets of a given set (Power Set). + PowerSet() Set + + // Returns the Cartesian Product of two sets. + CartesianProduct(other Set) Set + + // Returns the members of the set as a slice. + ToSlice() []interface{} +} + +// NewSet creates and returns a reference to an empty set. Operations +// on the resulting set are thread-safe. +func NewSet(s ...interface{}) Set { + set := newThreadSafeSet() + for _, item := range s { + set.Add(item) + } + return &set +} + +// NewSetWith creates and returns a new set with the given elements. +// Operations on the resulting set are thread-safe. +func NewSetWith(elts ...interface{}) Set { + return NewSetFromSlice(elts) +} + +// NewSetFromSlice creates and returns a reference to a set from an +// existing slice. Operations on the resulting set are thread-safe. +func NewSetFromSlice(s []interface{}) Set { + a := NewSet(s...) + return a +} + +// NewThreadUnsafeSet creates and returns a reference to an empty set. +// Operations on the resulting set are not thread-safe. +func NewThreadUnsafeSet() Set { + set := newThreadUnsafeSet() + return &set +} + +// NewThreadUnsafeSetFromSlice creates and returns a reference to a +// set from an existing slice. Operations on the resulting set are +// not thread-safe. +func NewThreadUnsafeSetFromSlice(s []interface{}) Set { + a := NewThreadUnsafeSet() + for _, item := range s { + a.Add(item) + } + return a +} diff --git a/vendor/github.com/deckarep/golang-set/threadsafe.go b/vendor/github.com/deckarep/golang-set/threadsafe.go new file mode 100644 index 00000000..8dae1619 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/threadsafe.go @@ -0,0 +1,271 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package mapset + +import "sync" + +type threadSafeSet struct { + s threadUnsafeSet + sync.RWMutex +} + +func newThreadSafeSet() threadSafeSet { + return threadSafeSet{s: newThreadUnsafeSet()} +} + +func (set *threadSafeSet) Add(i interface{}) bool { + set.Lock() + ret := set.s.Add(i) + set.Unlock() + return ret +} + +func (set *threadSafeSet) Contains(i ...interface{}) bool { + set.RLock() + ret := set.s.Contains(i...) + set.RUnlock() + return ret +} + +func (set *threadSafeSet) IsSubset(other Set) bool { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + ret := set.s.IsSubset(&o.s) + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) IsProperSubset(other Set) bool { + o := other.(*threadSafeSet) + + set.RLock() + defer set.RUnlock() + o.RLock() + defer o.RUnlock() + + return set.s.IsProperSubset(&o.s) +} + +func (set *threadSafeSet) IsSuperset(other Set) bool { + return other.IsSubset(set) +} + +func (set *threadSafeSet) IsProperSuperset(other Set) bool { + return other.IsProperSubset(set) +} + +func (set *threadSafeSet) Union(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeUnion := set.s.Union(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeUnion} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) Intersect(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeIntersection := set.s.Intersect(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeIntersection} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) Difference(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeDifference := set.s.Difference(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeDifference} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) SymmetricDifference(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeDifference := set.s.SymmetricDifference(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeDifference} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) Clear() { + set.Lock() + set.s = newThreadUnsafeSet() + set.Unlock() +} + +func (set *threadSafeSet) Remove(i interface{}) { + set.Lock() + delete(set.s, i) + set.Unlock() +} + +func (set *threadSafeSet) Cardinality() int { + set.RLock() + defer set.RUnlock() + return len(set.s) +} + +func (set *threadSafeSet) Each(cb func(interface{}) bool) { + set.RLock() + for elem := range set.s { + if cb(elem) { + break + } + } + set.RUnlock() +} + +func (set *threadSafeSet) Iter() <-chan interface{} { + ch := make(chan interface{}) + go func() { + set.RLock() + + for elem := range set.s { + ch <- elem + } + close(ch) + set.RUnlock() + }() + + return ch +} + +func (set *threadSafeSet) Iterator() *Iterator { + iterator, ch, stopCh := newIterator() + + go func() { + set.RLock() + L: + for elem := range set.s { + select { + case <-stopCh: + break L + case ch <- elem: + } + } + close(ch) + set.RUnlock() + }() + + return iterator +} + +func (set *threadSafeSet) Equal(other Set) bool { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + ret := set.s.Equal(&o.s) + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) Clone() Set { + set.RLock() + + unsafeClone := set.s.Clone().(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeClone} + set.RUnlock() + return ret +} + +func (set *threadSafeSet) String() string { + set.RLock() + ret := set.s.String() + set.RUnlock() + return ret +} + +func (set *threadSafeSet) PowerSet() Set { + set.RLock() + ret := set.s.PowerSet() + set.RUnlock() + return ret +} + +func (set *threadSafeSet) CartesianProduct(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeCartProduct := set.s.CartesianProduct(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeCartProduct} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) ToSlice() []interface{} { + keys := make([]interface{}, 0, set.Cardinality()) + set.RLock() + for elem := range set.s { + keys = append(keys, elem) + } + set.RUnlock() + return keys +} + +func (set *threadSafeSet) MarshalJSON() ([]byte, error) { + set.RLock() + b, err := set.s.MarshalJSON() + set.RUnlock() + + return b, err +} + +func (set *threadSafeSet) UnmarshalJSON(p []byte) error { + set.RLock() + err := set.s.UnmarshalJSON(p) + set.RUnlock() + + return err +} diff --git a/vendor/github.com/deckarep/golang-set/threadunsafe.go b/vendor/github.com/deckarep/golang-set/threadunsafe.go new file mode 100644 index 00000000..fec2e378 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/threadunsafe.go @@ -0,0 +1,325 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package mapset + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strings" +) + +type threadUnsafeSet map[interface{}]struct{} + +// An OrderedPair represents a 2-tuple of values. +type OrderedPair struct { + First interface{} + Second interface{} +} + +func newThreadUnsafeSet() threadUnsafeSet { + return make(threadUnsafeSet) +} + +// Equal says whether two 2-tuples contain the same values in the same order. +func (pair *OrderedPair) Equal(other OrderedPair) bool { + if pair.First == other.First && + pair.Second == other.Second { + return true + } + + return false +} + +func (set *threadUnsafeSet) Add(i interface{}) bool { + _, found := (*set)[i] + (*set)[i] = struct{}{} + return !found //False if it existed already +} + +func (set *threadUnsafeSet) Contains(i ...interface{}) bool { + for _, val := range i { + if _, ok := (*set)[val]; !ok { + return false + } + } + return true +} + +func (set *threadUnsafeSet) IsSubset(other Set) bool { + _ = other.(*threadUnsafeSet) + for elem := range *set { + if !other.Contains(elem) { + return false + } + } + return true +} + +func (set *threadUnsafeSet) IsProperSubset(other Set) bool { + return set.IsSubset(other) && !set.Equal(other) +} + +func (set *threadUnsafeSet) IsSuperset(other Set) bool { + return other.IsSubset(set) +} + +func (set *threadUnsafeSet) IsProperSuperset(other Set) bool { + return set.IsSuperset(other) && !set.Equal(other) +} + +func (set *threadUnsafeSet) Union(other Set) Set { + o := other.(*threadUnsafeSet) + + unionedSet := newThreadUnsafeSet() + + for elem := range *set { + unionedSet.Add(elem) + } + for elem := range *o { + unionedSet.Add(elem) + } + return &unionedSet +} + +func (set *threadUnsafeSet) Intersect(other Set) Set { + o := other.(*threadUnsafeSet) + + intersection := newThreadUnsafeSet() + // loop over smaller set + if set.Cardinality() < other.Cardinality() { + for elem := range *set { + if other.Contains(elem) { + intersection.Add(elem) + } + } + } else { + for elem := range *o { + if set.Contains(elem) { + intersection.Add(elem) + } + } + } + return &intersection +} + +func (set *threadUnsafeSet) Difference(other Set) Set { + _ = other.(*threadUnsafeSet) + + difference := newThreadUnsafeSet() + for elem := range *set { + if !other.Contains(elem) { + difference.Add(elem) + } + } + return &difference +} + +func (set *threadUnsafeSet) SymmetricDifference(other Set) Set { + _ = other.(*threadUnsafeSet) + + aDiff := set.Difference(other) + bDiff := other.Difference(set) + return aDiff.Union(bDiff) +} + +func (set *threadUnsafeSet) Clear() { + *set = newThreadUnsafeSet() +} + +func (set *threadUnsafeSet) Remove(i interface{}) { + delete(*set, i) +} + +func (set *threadUnsafeSet) Cardinality() int { + return len(*set) +} + +func (set *threadUnsafeSet) Each(cb func(interface{}) bool) { + for elem := range *set { + if cb(elem) { + break + } + } +} + +func (set *threadUnsafeSet) Iter() <-chan interface{} { + ch := make(chan interface{}) + go func() { + for elem := range *set { + ch <- elem + } + close(ch) + }() + + return ch +} + +func (set *threadUnsafeSet) Iterator() *Iterator { + iterator, ch, stopCh := newIterator() + + go func() { + L: + for elem := range *set { + select { + case <-stopCh: + break L + case ch <- elem: + } + } + close(ch) + }() + + return iterator +} + +func (set *threadUnsafeSet) Equal(other Set) bool { + _ = other.(*threadUnsafeSet) + + if set.Cardinality() != other.Cardinality() { + return false + } + for elem := range *set { + if !other.Contains(elem) { + return false + } + } + return true +} + +func (set *threadUnsafeSet) Clone() Set { + clonedSet := newThreadUnsafeSet() + for elem := range *set { + clonedSet.Add(elem) + } + return &clonedSet +} + +func (set *threadUnsafeSet) String() string { + items := make([]string, 0, len(*set)) + + for elem := range *set { + items = append(items, fmt.Sprintf("%v", elem)) + } + return fmt.Sprintf("Set{%s}", strings.Join(items, ", ")) +} + +// String outputs a 2-tuple in the form "(A, B)". +func (pair OrderedPair) String() string { + return fmt.Sprintf("(%v, %v)", pair.First, pair.Second) +} + +func (set *threadUnsafeSet) PowerSet() Set { + powSet := NewThreadUnsafeSet() + nullset := newThreadUnsafeSet() + powSet.Add(&nullset) + + for es := range *set { + u := newThreadUnsafeSet() + j := powSet.Iter() + for er := range j { + p := newThreadUnsafeSet() + if reflect.TypeOf(er).Name() == "" { + k := er.(*threadUnsafeSet) + for ek := range *(k) { + p.Add(ek) + } + } else { + p.Add(er) + } + p.Add(es) + u.Add(&p) + } + + powSet = powSet.Union(&u) + } + + return powSet +} + +func (set *threadUnsafeSet) CartesianProduct(other Set) Set { + o := other.(*threadUnsafeSet) + cartProduct := NewThreadUnsafeSet() + + for i := range *set { + for j := range *o { + elem := OrderedPair{First: i, Second: j} + cartProduct.Add(elem) + } + } + + return cartProduct +} + +func (set *threadUnsafeSet) ToSlice() []interface{} { + keys := make([]interface{}, 0, set.Cardinality()) + for elem := range *set { + keys = append(keys, elem) + } + + return keys +} + +// MarshalJSON creates a JSON array from the set, it marshals all elements +func (set *threadUnsafeSet) MarshalJSON() ([]byte, error) { + items := make([]string, 0, set.Cardinality()) + + for elem := range *set { + b, err := json.Marshal(elem) + if err != nil { + return nil, err + } + + items = append(items, string(b)) + } + + return []byte(fmt.Sprintf("[%s]", strings.Join(items, ","))), nil +} + +// UnmarshalJSON recreates a set from a JSON array, it only decodes +// primitive types. Numbers are decoded as json.Number. +func (set *threadUnsafeSet) UnmarshalJSON(b []byte) error { + var i []interface{} + + d := json.NewDecoder(bytes.NewReader(b)) + d.UseNumber() + err := d.Decode(&i) + if err != nil { + return err + } + + for _, v := range i { + switch t := v.(type) { + case []interface{}, map[string]interface{}: + continue + default: + set.Add(t) + } + } + + return nil +} diff --git a/vendor/github.com/eventials/go-tus/Dockerfile b/vendor/github.com/eventials/go-tus/Dockerfile new file mode 100644 index 00000000..41407b71 --- /dev/null +++ b/vendor/github.com/eventials/go-tus/Dockerfile @@ -0,0 +1,9 @@ +FROM golang:1.9 + +RUN mkdir -p /go/src/github.com/eventials/go-tus + +WORKDIR /go/src/github.com/eventials/go-tus + +RUN go get github.com/stretchr/testify +RUN go get github.com/tus/tusd +RUN go get github.com/syndtr/goleveldb/leveldb diff --git a/vendor/github.com/eventials/go-tus/LICENSE b/vendor/github.com/eventials/go-tus/LICENSE new file mode 100644 index 00000000..795dda81 --- /dev/null +++ b/vendor/github.com/eventials/go-tus/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Eventials + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/eventials/go-tus/README.md b/vendor/github.com/eventials/go-tus/README.md new file mode 100644 index 00000000..d7ee4760 --- /dev/null +++ b/vendor/github.com/eventials/go-tus/README.md @@ -0,0 +1,62 @@ +# go-tus [![Build Status](https://travis-ci.org/eventials/go-tus.svg?branch=master)](https://travis-ci.org/eventials/go-tus) [![Go Report Card](https://goreportcard.com/badge/github.com/eventials/go-tus)](https://goreportcard.com/report/github.com/eventials/go-tus) [![GoDoc](https://godoc.org/github.com/eventials/go-tus?status.svg)](http://godoc.org/github.com/eventials/go-tus) + +A pure Go client for the [tus resumable upload protocol](http://tus.io/) + +## Example + +```go +package main + +import ( + "os" + "github.com/eventials/go-tus" +) + +func main() { + f, err := os.Open("my-file.txt") + + if err != nil { + panic(err) + } + + defer f.Close() + + // create the tus client. + client, _ := tus.NewClient("https://tus.example.org/files", nil) + + // create an upload from a file. + upload, _ := tus.NewUploadFromFile(f) + + // create the uploader. + uploader, _ := client.CreateUpload(upload) + + // start the uploading process. + uploader.Upload() +} +``` + +## Features + +> This is not a full protocol client implementation. + +Checksum, Termination and Concatenation extensions are not implemented yet. + +This client allows to resume an upload if a Store is used. + +## Built in Store + +Store is used to map an upload's fingerprint with the corresponding upload URL. + +| Name | Backend | Dependencies | +|:----:|:-------:|:------------:| +| MemoryStore | In-Memory | None | +| LeveldbStore | LevelDB | [goleveldb](https://github.com/syndtr/goleveldb) | + +## Future Work + +- [ ] SQLite store +- [ ] Redis store +- [ ] Memcached store +- [ ] Checksum extension +- [ ] Termination extension +- [ ] Concatenation extension diff --git a/vendor/github.com/eventials/go-tus/client.go b/vendor/github.com/eventials/go-tus/client.go new file mode 100644 index 00000000..0b0f8625 --- /dev/null +++ b/vendor/github.com/eventials/go-tus/client.go @@ -0,0 +1,260 @@ +package tus + +import ( + "io" + "io/ioutil" + "net/http" + netUrl "net/url" + "strconv" +) + +const ( + ProtocolVersion = "1.0.0" +) + +// Client represents the tus client. +// You can use it in goroutines to create parallels uploads. +type Client struct { + Config *Config + Url string + Version string + Header http.Header + + client *http.Client +} + +// NewClient creates a new tus client. +func NewClient(url string, config *Config) (*Client, error) { + if config == nil { + config = DefaultConfig() + } else { + if err := config.Validate(); err != nil { + return nil, err + } + } + + if config.Header == nil { + config.Header = make(http.Header) + } + + var c *http.Client + + if config.Transport == nil { + c = &http.Client{} + } else { + c = &http.Client{ + Transport: config.Transport, + } + } + + return &Client{ + Config: config, + Url: url, + Version: ProtocolVersion, + Header: config.Header, + + client: c, + }, nil +} + +func (c *Client) Do(req *http.Request) (*http.Response, error) { + for k, v := range c.Header { + req.Header[k] = v + } + + req.Header.Set("Tus-Resumable", ProtocolVersion) + + return c.client.Do(req) +} + +// CreateUpload creates a new upload in the server. +func (c *Client) CreateUpload(u *Upload) (*Uploader, error) { + if u == nil { + return nil, ErrNilUpload + } + + if c.Config.Resume && len(u.Fingerprint) == 0 { + return nil, ErrFingerprintNotSet + } + + req, err := http.NewRequest("POST", c.Url, nil) + + if err != nil { + return nil, err + } + + req.Header.Set("Content-Length", "0") + req.Header.Set("Upload-Length", strconv.FormatInt(u.size, 10)) + req.Header.Set("Upload-Metadata", u.EncodedMetadata()) + + res, err := c.Do(req) + + if err != nil { + return nil, err + } + defer res.Body.Close() + + switch res.StatusCode { + case 201: + url := res.Header.Get("Location") + + baseUrl, err := netUrl.Parse(c.Url) + if err != nil { + return nil, ErrUrlNotRecognized + } + + newUrl, err := netUrl.Parse(url) + if err != nil { + return nil, ErrUrlNotRecognized + } + if newUrl.Scheme == "" { + newUrl.Scheme = baseUrl.Scheme + url = newUrl.String() + } + + if c.Config.Resume { + c.Config.Store.Set(u.Fingerprint, url) + } + + return NewUploader(c, url, u, 0), nil + case 412: + return nil, ErrVersionMismatch + case 413: + return nil, ErrLargeUpload + default: + return nil, newClientError(res) + } +} + +// ResumeUpload resumes the upload if already created, otherwise it will return an error. +func (c *Client) ResumeUpload(u *Upload) (*Uploader, error) { + if u == nil { + return nil, ErrNilUpload + } + + if !c.Config.Resume { + return nil, ErrResumeNotEnabled + } else if len(u.Fingerprint) == 0 { + return nil, ErrFingerprintNotSet + } + + url, found := c.Config.Store.Get(u.Fingerprint) + + if !found { + return nil, ErrUploadNotFound + } + + offset, err := c.getUploadOffset(url) + + if err != nil { + return nil, err + } + + return NewUploader(c, url, u, offset), nil +} + +// CreateOrResumeUpload resumes the upload if already created or creates a new upload in the server. +func (c *Client) CreateOrResumeUpload(u *Upload) (*Uploader, error) { + if u == nil { + return nil, ErrNilUpload + } + + uploader, err := c.ResumeUpload(u) + + if err == nil { + return uploader, err + } else if (err == ErrResumeNotEnabled) || (err == ErrUploadNotFound) { + return c.CreateUpload(u) + } + + return nil, err +} + +func (c *Client) uploadChunck(url string, body io.Reader, size int64, offset int64) (int64, error) { + var method string + + if !c.Config.OverridePatchMethod { + method = "PATCH" + } else { + method = "POST" + } + + req, err := http.NewRequest(method, url, body) + + if err != nil { + return -1, err + } + + req.Header.Set("Content-Type", "application/offset+octet-stream") + req.Header.Set("Content-Length", strconv.FormatInt(size, 10)) + req.Header.Set("Upload-Offset", strconv.FormatInt(offset, 10)) + + if c.Config.OverridePatchMethod { + req.Header.Set("X-HTTP-Method-Override", "PATCH") + } + + res, err := c.Do(req) + + if err != nil { + return -1, err + } + defer res.Body.Close() + + switch res.StatusCode { + case 204: + if newOffset, err := strconv.ParseInt(res.Header.Get("Upload-Offset"), 10, 64); err == nil { + return newOffset, nil + } else { + return -1, err + } + case 409: + return -1, ErrOffsetMismatch + case 412: + return -1, ErrVersionMismatch + case 413: + return -1, ErrLargeUpload + default: + return -1, newClientError(res) + } +} + +func (c *Client) getUploadOffset(url string) (int64, error) { + req, err := http.NewRequest("HEAD", url, nil) + + if err != nil { + return -1, err + } + + res, err := c.Do(req) + + if err != nil { + return -1, err + } + defer res.Body.Close() + + switch res.StatusCode { + case 200: + i, err := strconv.ParseInt(res.Header.Get("Upload-Offset"), 10, 64) + + if err == nil { + return i, nil + } else { + return -1, err + } + case 403, 404, 410: + // file doesn't exists. + return -1, ErrUploadNotFound + case 412: + return -1, ErrVersionMismatch + default: + return -1, newClientError(res) + } +} + +func newClientError(res *http.Response) ClientError { + body, _ := ioutil.ReadAll(res.Body) + return ClientError{ + Code: res.StatusCode, + Body: body, + } +} diff --git a/vendor/github.com/eventials/go-tus/config.go b/vendor/github.com/eventials/go-tus/config.go new file mode 100644 index 00000000..45ad9354 --- /dev/null +++ b/vendor/github.com/eventials/go-tus/config.go @@ -0,0 +1,48 @@ +package tus + +import ( + "net/http" +) + +// Config provides a way to configure the Client depending on your needs. +type Config struct { + // ChunkSize divide the file into chunks. + ChunkSize int64 + // Resume enables resumable upload. + Resume bool + // OverridePatchMethod allow to by pass proxies sendind a POST request instead of PATCH. + OverridePatchMethod bool + // Store map an upload's fingerprint with the corresponding upload URL. + // If Resume is true the Store is required. + Store Store + // Set custom header values used in all requests. + Header http.Header + // Set custom Transport settings. + // Use this if you ahe behind a proxy. + Transport *http.Transport +} + +// DefaultConfig return the default Client configuration. +func DefaultConfig() *Config { + return &Config{ + ChunkSize: 2 * 1024 * 1024, + Resume: false, + OverridePatchMethod: false, + Store: nil, + Header: make(http.Header), + Transport: nil, + } +} + +// Validate validates the custom configuration. +func (c *Config) Validate() error { + if c.ChunkSize < 1 { + return ErrChuckSize + } + + if c.Resume && c.Store == nil { + return ErrNilStore + } + + return nil +} diff --git a/vendor/github.com/eventials/go-tus/doc.go b/vendor/github.com/eventials/go-tus/doc.go new file mode 100644 index 00000000..125a72ca --- /dev/null +++ b/vendor/github.com/eventials/go-tus/doc.go @@ -0,0 +1,8 @@ +// Package tus provides a client to tus protocol version 1.0.0. +// +// tus is a protocol based on HTTP for resumable file uploads. Resumable means that +// an upload can be interrupted at any moment and can be resumed without +// re-uploading the previous data again. An interruption may happen willingly, if +// the user wants to pause, or by accident in case of an network issue or server +// outage (http://tus.io). +package tus diff --git a/vendor/github.com/eventials/go-tus/docker-compose.yml b/vendor/github.com/eventials/go-tus/docker-compose.yml new file mode 100644 index 00000000..2033c5f0 --- /dev/null +++ b/vendor/github.com/eventials/go-tus/docker-compose.yml @@ -0,0 +1,8 @@ +version: '2' +services: + app: + build: . + working_dir: /go/src/github.com/eventials/go-tus + command: go run main.go + volumes: + - .:/go/src/github.com/eventials/go-tus diff --git a/vendor/github.com/eventials/go-tus/errors.go b/vendor/github.com/eventials/go-tus/errors.go new file mode 100644 index 00000000..ead5e675 --- /dev/null +++ b/vendor/github.com/eventials/go-tus/errors.go @@ -0,0 +1,29 @@ +package tus + +import ( + "errors" + "fmt" +) + +var ( + ErrChuckSize = errors.New("chunk size must be greater than zero.") + ErrNilLogger = errors.New("logger can't be nil.") + ErrNilStore = errors.New("store can't be nil if Resume is enable.") + ErrNilUpload = errors.New("upload can't be nil.") + ErrLargeUpload = errors.New("upload body is to large.") + ErrVersionMismatch = errors.New("protocol version mismatch.") + ErrOffsetMismatch = errors.New("upload offset mismatch.") + ErrUploadNotFound = errors.New("upload not found.") + ErrResumeNotEnabled = errors.New("resuming not enabled.") + ErrFingerprintNotSet = errors.New("fingerprint not set.") + ErrUrlNotRecognized = errors.New("url not recognized") +) + +type ClientError struct { + Code int + Body []byte +} + +func (c ClientError) Error() string { + return fmt.Sprintf("unexpected status code: %d", c.Code) +} diff --git a/vendor/github.com/eventials/go-tus/store.go b/vendor/github.com/eventials/go-tus/store.go new file mode 100644 index 00000000..a2edbfda --- /dev/null +++ b/vendor/github.com/eventials/go-tus/store.go @@ -0,0 +1,8 @@ +package tus + +type Store interface { + Get(fingerprint string) (string, bool) + Set(fingerprint, url string) + Delete(fingerprint string) + Close() +} diff --git a/vendor/github.com/eventials/go-tus/upload.go b/vendor/github.com/eventials/go-tus/upload.go new file mode 100644 index 00000000..61975bc6 --- /dev/null +++ b/vendor/github.com/eventials/go-tus/upload.go @@ -0,0 +1,107 @@ +package tus + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "os" + "strings" +) + +type Metadata map[string]string + +type Upload struct { + stream io.ReadSeeker + size int64 + offset int64 + + Fingerprint string + Metadata Metadata +} + +// Updates the Upload information based on offset. +func (u *Upload) updateProgress(offset int64) { + u.offset = offset +} + +// Returns whether this upload is finished or not. +func (u *Upload) Finished() bool { + return u.offset >= u.size +} + +// Returns the progress in a percentage. +func (u *Upload) Progress() int64 { + return (u.offset * 100) / u.size +} + +// Returns the current upload offset. +func (u *Upload) Offset() int64 { + return u.offset +} + +// Returns the size of the upload body. +func (u *Upload) Size() int64 { + return u.size +} + +// EncodedMetadata encodes the upload metadata. +func (u *Upload) EncodedMetadata() string { + var encoded []string + + for k, v := range u.Metadata { + encoded = append(encoded, fmt.Sprintf("%s %s", k, b64encode(v))) + } + + return strings.Join(encoded, ",") +} + +// NewUploadFromFile creates a new Upload from an os.File. +func NewUploadFromFile(f *os.File) (*Upload, error) { + fi, err := f.Stat() + + if err != nil { + return nil, err + } + + metadata := map[string]string{ + "filename": fi.Name(), + } + + fingerprint := fmt.Sprintf("%s-%d-%s", fi.Name(), fi.Size(), fi.ModTime()) + + return NewUpload(f, fi.Size(), metadata, fingerprint), nil +} + +// NewUploadFromBytes creates a new upload from a byte array. +func NewUploadFromBytes(b []byte) *Upload { + buffer := bytes.NewReader(b) + return NewUpload(buffer, buffer.Size(), nil, "") +} + +// NewUpload creates a new upload from an io.Reader. +func NewUpload(reader io.Reader, size int64, metadata Metadata, fingerprint string) *Upload { + stream, ok := reader.(io.ReadSeeker) + + if !ok { + buf := new(bytes.Buffer) + buf.ReadFrom(reader) + stream = bytes.NewReader(buf.Bytes()) + } + + if metadata == nil { + metadata = make(Metadata) + } + + return &Upload{ + stream: stream, + size: size, + + Fingerprint: fingerprint, + Metadata: metadata, + } +} + +func b64encode(s string) string { + return base64.StdEncoding.EncodeToString([]byte(s)) +} diff --git a/vendor/github.com/eventials/go-tus/uploader.go b/vendor/github.com/eventials/go-tus/uploader.go new file mode 100644 index 00000000..8e39cd7c --- /dev/null +++ b/vendor/github.com/eventials/go-tus/uploader.go @@ -0,0 +1,115 @@ +package tus + +import ( + "bytes" +) + +type Uploader struct { + client *Client + url string + upload *Upload + offset int64 + aborted bool + uploadSubs []chan Upload + notifyChan chan bool +} + +// Subscribes to progress updates. +func (u *Uploader) NotifyUploadProgress(c chan Upload) { + u.uploadSubs = append(u.uploadSubs, c) +} + +// Abort aborts the upload process. +// It doens't abort the current chunck, only the remaining. +func (u *Uploader) Abort() { + u.aborted = true +} + +// IsAborted returns true if the upload was aborted. +func (u *Uploader) IsAborted() bool { + return u.aborted +} + +// Url returns the upload url. +func (u *Uploader) Url() string { + return u.url +} + +// Offset returns the current offset uploaded. +func (u *Uploader) Offset() int64 { + return u.offset +} + +// Upload uploads the entire body to the server. +func (u *Uploader) Upload() error { + for u.offset < u.upload.size && !u.aborted { + err := u.UploadChunck() + + if err != nil { + return err + } + } + + return nil +} + +// UploadChunck uploads a single chunck. +func (u *Uploader) UploadChunck() error { + data := make([]byte, u.client.Config.ChunkSize) + + _, err := u.upload.stream.Seek(u.offset, 0) + + if err != nil { + return err + } + + size, err := u.upload.stream.Read(data) + + if err != nil { + return err + } + + body := bytes.NewBuffer(data[:size]) + + newOffset, err := u.client.uploadChunck(u.url, body, int64(size), u.offset) + + if err != nil { + return err + } + + u.offset = newOffset + + u.upload.updateProgress(u.offset) + + u.notifyChan <- true + + return nil +} + +// Waits for a signal to broadcast to all subscribers +func (u *Uploader) broadcastProgress() { + for _ = range u.notifyChan { + for _, c := range u.uploadSubs { + c <- *u.upload + } + } +} + +// NewUploader creates a new Uploader. +func NewUploader(client *Client, url string, upload *Upload, offset int64) *Uploader { + notifyChan := make(chan bool) + + uploader := &Uploader{ + client, + url, + upload, + offset, + false, + nil, + notifyChan, + } + + go uploader.broadcastProgress() + + return uploader +} diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS new file mode 100644 index 00000000..bcfa1952 --- /dev/null +++ b/vendor/github.com/golang/snappy/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Damian Gryski +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS new file mode 100644 index 00000000..931ae316 --- /dev/null +++ b/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Damian Gryski +Jan Mercl <0xjnml@gmail.com> +Kai Backman +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE new file mode 100644 index 00000000..6050c10f --- /dev/null +++ b/vendor/github.com/golang/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README new file mode 100644 index 00000000..cea12879 --- /dev/null +++ b/vendor/github.com/golang/snappy/README @@ -0,0 +1,107 @@ +The Snappy compression format in the Go programming language. + +To download and install from source: +$ go get github.com/golang/snappy + +Unless otherwise noted, the Snappy-Go source files are distributed +under the BSD-style license found in the LICENSE file. + + + +Benchmarks. + +The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten +or so files, the same set used by the C++ Snappy code (github.com/google/snappy +and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ +3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: + +"go test -test.bench=." + +_UFlat0-8 2.19GB/s ± 0% html +_UFlat1-8 1.41GB/s ± 0% urls +_UFlat2-8 23.5GB/s ± 2% jpg +_UFlat3-8 1.91GB/s ± 0% jpg_200 +_UFlat4-8 14.0GB/s ± 1% pdf +_UFlat5-8 1.97GB/s ± 0% html4 +_UFlat6-8 814MB/s ± 0% txt1 +_UFlat7-8 785MB/s ± 0% txt2 +_UFlat8-8 857MB/s ± 0% txt3 +_UFlat9-8 719MB/s ± 1% txt4 +_UFlat10-8 2.84GB/s ± 0% pb +_UFlat11-8 1.05GB/s ± 0% gaviota + +_ZFlat0-8 1.04GB/s ± 0% html +_ZFlat1-8 534MB/s ± 0% urls +_ZFlat2-8 15.7GB/s ± 1% jpg +_ZFlat3-8 740MB/s ± 3% jpg_200 +_ZFlat4-8 9.20GB/s ± 1% pdf +_ZFlat5-8 991MB/s ± 0% html4 +_ZFlat6-8 379MB/s ± 0% txt1 +_ZFlat7-8 352MB/s ± 0% txt2 +_ZFlat8-8 396MB/s ± 1% txt3 +_ZFlat9-8 327MB/s ± 1% txt4 +_ZFlat10-8 1.33GB/s ± 1% pb +_ZFlat11-8 605MB/s ± 1% gaviota + + + +"go test -test.bench=. -tags=noasm" + +_UFlat0-8 621MB/s ± 2% html +_UFlat1-8 494MB/s ± 1% urls +_UFlat2-8 23.2GB/s ± 1% jpg +_UFlat3-8 1.12GB/s ± 1% jpg_200 +_UFlat4-8 4.35GB/s ± 1% pdf +_UFlat5-8 609MB/s ± 0% html4 +_UFlat6-8 296MB/s ± 0% txt1 +_UFlat7-8 288MB/s ± 0% txt2 +_UFlat8-8 309MB/s ± 1% txt3 +_UFlat9-8 280MB/s ± 1% txt4 +_UFlat10-8 753MB/s ± 0% pb +_UFlat11-8 400MB/s ± 0% gaviota + +_ZFlat0-8 409MB/s ± 1% html +_ZFlat1-8 250MB/s ± 1% urls +_ZFlat2-8 12.3GB/s ± 1% jpg +_ZFlat3-8 132MB/s ± 0% jpg_200 +_ZFlat4-8 2.92GB/s ± 0% pdf +_ZFlat5-8 405MB/s ± 1% html4 +_ZFlat6-8 179MB/s ± 1% txt1 +_ZFlat7-8 170MB/s ± 1% txt2 +_ZFlat8-8 189MB/s ± 1% txt3 +_ZFlat9-8 164MB/s ± 1% txt4 +_ZFlat10-8 479MB/s ± 1% pb +_ZFlat11-8 270MB/s ± 1% gaviota + + + +For comparison (Go's encoded output is byte-for-byte identical to C++'s), here +are the numbers from C++ Snappy's + +make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log + +BM_UFlat/0 2.4GB/s html +BM_UFlat/1 1.4GB/s urls +BM_UFlat/2 21.8GB/s jpg +BM_UFlat/3 1.5GB/s jpg_200 +BM_UFlat/4 13.3GB/s pdf +BM_UFlat/5 2.1GB/s html4 +BM_UFlat/6 1.0GB/s txt1 +BM_UFlat/7 959.4MB/s txt2 +BM_UFlat/8 1.0GB/s txt3 +BM_UFlat/9 864.5MB/s txt4 +BM_UFlat/10 2.9GB/s pb +BM_UFlat/11 1.2GB/s gaviota + +BM_ZFlat/0 944.3MB/s html (22.31 %) +BM_ZFlat/1 501.6MB/s urls (47.78 %) +BM_ZFlat/2 14.3GB/s jpg (99.95 %) +BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) +BM_ZFlat/4 8.3GB/s pdf (83.30 %) +BM_ZFlat/5 903.5MB/s html4 (22.52 %) +BM_ZFlat/6 336.0MB/s txt1 (57.88 %) +BM_ZFlat/7 312.3MB/s txt2 (61.91 %) +BM_ZFlat/8 353.1MB/s txt3 (54.99 %) +BM_ZFlat/9 289.9MB/s txt4 (66.26 %) +BM_ZFlat/10 1.2GB/s pb (19.68 %) +BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go new file mode 100644 index 00000000..72efb035 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode.go @@ -0,0 +1,237 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go new file mode 100644 index 00000000..fcd192b8 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s new file mode 100644 index 00000000..e6179f65 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.s @@ -0,0 +1,490 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go new file mode 100644 index 00000000..8c9f2049 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_other.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike + // the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + for end := d + length; d != end; d++ { + dst[d] = dst[d-offset] + } + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go new file mode 100644 index 00000000..8d393e90 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode.go @@ -0,0 +1,285 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go new file mode 100644 index 00000000..150d91bc --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s new file mode 100644 index 00000000..adfd979f --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go new file mode 100644 index 00000000..dbcae905 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go new file mode 100644 index 00000000..ece692ea --- /dev/null +++ b/vendor/github.com/golang/snappy/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snappy // import "github.com/golang/snappy" + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock new file mode 100644 index 00000000..f34f5b4a --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.lock @@ -0,0 +1,33 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + branch = "master" + name = "github.com/google/gofuzz" + packages = ["."] + revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" + +[[projects]] + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + name = "github.com/stretchr/testify" + packages = ["assert","require"] + revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0" + version = "v1.1.4" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "f8b7cf3941d3792cbbd570bb53c093adaf774334d1162c651565c97a58dc9d09" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml new file mode 100644 index 00000000..0ac55ef8 --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.toml @@ -0,0 +1,33 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + name = "github.com/davecgh/go-spew" + version = "1.1.0" + +[[constraint]] + branch = "master" + name = "github.com/google/gofuzz" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.1.4" diff --git a/vendor/github.com/json-iterator/go/LICENSE b/vendor/github.com/json-iterator/go/LICENSE new file mode 100644 index 00000000..2cf4f5ab --- /dev/null +++ b/vendor/github.com/json-iterator/go/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 json-iterator + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md new file mode 100644 index 00000000..3a0d6809 --- /dev/null +++ b/vendor/github.com/json-iterator/go/README.md @@ -0,0 +1,86 @@ +[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/json-iterator/go) +[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) +[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) +[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE) +[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) + +A high-performance 100% compatible drop-in replacement of "encoding/json" + +``` +Go开发者们请加入我们,滴滴出行平台技术部 taowen@didichuxing.com +``` + +# Benchmark + +![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) + +Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go + +Raw Result (easyjson requires static code generation) + +| | ns/op | allocation bytes | allocation times | +| --- | --- | --- | --- | +| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | +| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | +| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | +| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | +| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | +| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | + +# Usage + +100% compatibility with standard lib + +Replace + +```go +import "encoding/json" +json.Marshal(&data) +``` + +with + +```go +import "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Marshal(&data) +``` + +Replace + +```go +import "encoding/json" +json.Unmarshal(input, &data) +``` + +with + +```go +import "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Unmarshal(input, &data) +``` + +[More documentation](http://jsoniter.com/migrate-from-go-std.html) + +# How to get + +``` +go get github.com/json-iterator/go +``` + +# Contribution Welcomed ! + +Contributors + +* [thockin](https://github.com/thockin) +* [mattn](https://github.com/mattn) +* [cch123](https://github.com/cch123) +* [Oleg Shaldybin](https://github.com/olegshaldybin) +* [Jason Toffaletti](https://github.com/toffaletti) + +Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh new file mode 100644 index 00000000..b45ef688 --- /dev/null +++ b/vendor/github.com/json-iterator/go/build.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e +set -x + +if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then + mkdir -p /tmp/build-golang/src/github.com/json-iterator + ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go +fi +export GOPATH=/tmp/build-golang +go get -u github.com/golang/dep/cmd/dep +cd /tmp/build-golang/src/github.com/json-iterator/go +exec $GOPATH/bin/dep ensure -update diff --git a/vendor/github.com/json-iterator/go/feature_adapter.go b/vendor/github.com/json-iterator/go/feature_adapter.go new file mode 100644 index 00000000..0214b711 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_adapter.go @@ -0,0 +1,133 @@ +package jsoniter + +import ( + "bytes" + "io" +) + +// RawMessage to make replace json with jsoniter +type RawMessage []byte + +// Unmarshal adapts to json/encoding Unmarshal API +// +// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. +// Refer to https://godoc.org/encoding/json#Unmarshal for more information +func Unmarshal(data []byte, v interface{}) error { + return ConfigDefault.Unmarshal(data, v) +} + +func lastNotSpacePos(data []byte) int { + for i := len(data) - 1; i >= 0; i-- { + if data[i] != ' ' && data[i] != '\t' && data[i] != '\r' && data[i] != '\n' { + return i + 1 + } + } + return 0 +} + +// UnmarshalFromString convenient method to read from string instead of []byte +func UnmarshalFromString(str string, v interface{}) error { + return ConfigDefault.UnmarshalFromString(str, v) +} + +// Get quick method to get value from deeply nested JSON structure +func Get(data []byte, path ...interface{}) Any { + return ConfigDefault.Get(data, path...) +} + +// Marshal adapts to json/encoding Marshal API +// +// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API +// Refer to https://godoc.org/encoding/json#Marshal for more information +func Marshal(v interface{}) ([]byte, error) { + return ConfigDefault.Marshal(v) +} + +// MarshalIndent same as json.MarshalIndent. Prefix is not supported. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return ConfigDefault.MarshalIndent(v, prefix, indent) +} + +// MarshalToString convenient method to write as string instead of []byte +func MarshalToString(v interface{}) (string, error) { + return ConfigDefault.MarshalToString(v) +} + +// NewDecoder adapts to json/stream NewDecoder API. +// +// NewDecoder returns a new decoder that reads from r. +// +// Instead of a json/encoding Decoder, an Decoder is returned +// Refer to https://godoc.org/encoding/json#NewDecoder for more information +func NewDecoder(reader io.Reader) *Decoder { + return ConfigDefault.NewDecoder(reader) +} + +// Decoder reads and decodes JSON values from an input stream. +// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) +type Decoder struct { + iter *Iterator +} + +// Decode decode JSON into interface{} +func (adapter *Decoder) Decode(obj interface{}) error { + adapter.iter.ReadVal(obj) + err := adapter.iter.Error + if err == io.EOF { + return nil + } + return adapter.iter.Error +} + +// More is there more? +func (adapter *Decoder) More() bool { + return adapter.iter.head != adapter.iter.tail +} + +// Buffered remaining buffer +func (adapter *Decoder) Buffered() io.Reader { + remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] + return bytes.NewReader(remaining) +} + +// UseNumber for number JSON element, use float64 or json.NumberValue (alias of string) +func (adapter *Decoder) UseNumber() { + origCfg := adapter.iter.cfg.configBeforeFrozen + origCfg.UseNumber = true + adapter.iter.cfg = origCfg.Froze().(*frozenConfig) +} + +// NewEncoder same as json.NewEncoder +func NewEncoder(writer io.Writer) *Encoder { + return ConfigDefault.NewEncoder(writer) +} + +// Encoder same as json.Encoder +type Encoder struct { + stream *Stream +} + +// Encode encode interface{} as JSON to io.Writer +func (adapter *Encoder) Encode(val interface{}) error { + adapter.stream.WriteVal(val) + adapter.stream.WriteRaw("\n") + adapter.stream.Flush() + return adapter.stream.Error +} + +// SetIndent set the indention. Prefix is not supported +func (adapter *Encoder) SetIndent(prefix, indent string) { + adapter.stream.cfg.indentionStep = len(indent) +} + +// SetEscapeHTML escape html by default, set to false to disable +func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { + config := adapter.stream.cfg.configBeforeFrozen + config.EscapeHTML = escapeHTML + adapter.stream.cfg = config.Froze().(*frozenConfig) +} + +// Valid reports whether data is a valid JSON encoding. +func Valid(data []byte) bool { + return ConfigDefault.Valid(data) +} diff --git a/vendor/github.com/json-iterator/go/feature_any.go b/vendor/github.com/json-iterator/go/feature_any.go new file mode 100644 index 00000000..87716d1f --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any.go @@ -0,0 +1,245 @@ +package jsoniter + +import ( + "errors" + "fmt" + "io" + "reflect" +) + +// Any generic object representation. +// The lazy json implementation holds []byte and parse lazily. +type Any interface { + LastError() error + ValueType() ValueType + MustBeValid() Any + ToBool() bool + ToInt() int + ToInt32() int32 + ToInt64() int64 + ToUint() uint + ToUint32() uint32 + ToUint64() uint64 + ToFloat32() float32 + ToFloat64() float64 + ToString() string + ToVal(val interface{}) + Get(path ...interface{}) Any + // TODO: add Set + Size() int + Keys() []string + GetInterface() interface{} + WriteTo(stream *Stream) +} + +type baseAny struct{} + +func (any *baseAny) Get(path ...interface{}) Any { + return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)} +} + +func (any *baseAny) Size() int { + return 0 +} + +func (any *baseAny) Keys() []string { + return []string{} +} + +func (any *baseAny) ToVal(obj interface{}) { + panic("not implemented") +} + +// WrapInt32 turn int32 into Any interface +func WrapInt32(val int32) Any { + return &int32Any{baseAny{}, val} +} + +// WrapInt64 turn int64 into Any interface +func WrapInt64(val int64) Any { + return &int64Any{baseAny{}, val} +} + +// WrapUint32 turn uint32 into Any interface +func WrapUint32(val uint32) Any { + return &uint32Any{baseAny{}, val} +} + +// WrapUint64 turn uint64 into Any interface +func WrapUint64(val uint64) Any { + return &uint64Any{baseAny{}, val} +} + +// WrapFloat64 turn float64 into Any interface +func WrapFloat64(val float64) Any { + return &floatAny{baseAny{}, val} +} + +// WrapString turn string into Any interface +func WrapString(val string) Any { + return &stringAny{baseAny{}, val} +} + +// Wrap turn a go object into Any interface +func Wrap(val interface{}) Any { + if val == nil { + return &nilAny{} + } + asAny, isAny := val.(Any) + if isAny { + return asAny + } + typ := reflect.TypeOf(val) + switch typ.Kind() { + case reflect.Slice: + return wrapArray(val) + case reflect.Struct: + return wrapStruct(val) + case reflect.Map: + return wrapMap(val) + case reflect.String: + return WrapString(val.(string)) + case reflect.Int: + return WrapInt64(int64(val.(int))) + case reflect.Int8: + return WrapInt32(int32(val.(int8))) + case reflect.Int16: + return WrapInt32(int32(val.(int16))) + case reflect.Int32: + return WrapInt32(val.(int32)) + case reflect.Int64: + return WrapInt64(val.(int64)) + case reflect.Uint: + return WrapUint64(uint64(val.(uint))) + case reflect.Uint8: + return WrapUint32(uint32(val.(uint8))) + case reflect.Uint16: + return WrapUint32(uint32(val.(uint16))) + case reflect.Uint32: + return WrapUint32(uint32(val.(uint32))) + case reflect.Uint64: + return WrapUint64(val.(uint64)) + case reflect.Float32: + return WrapFloat64(float64(val.(float32))) + case reflect.Float64: + return WrapFloat64(val.(float64)) + case reflect.Bool: + if val.(bool) == true { + return &trueAny{} + } + return &falseAny{} + } + return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} +} + +// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. +func (iter *Iterator) ReadAny() Any { + return iter.readAny() +} + +func (iter *Iterator) readAny() Any { + c := iter.nextToken() + switch c { + case '"': + iter.unreadByte() + return &stringAny{baseAny{}, iter.ReadString()} + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + return &nilAny{} + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + return &trueAny{} + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + return &falseAny{} + case '{': + return iter.readObjectAny() + case '[': + return iter.readArrayAny() + case '-': + return iter.readNumberAny(false) + case 0: + return &invalidAny{baseAny{}, errors.New("input is empty")} + default: + return iter.readNumberAny(true) + } +} + +func (iter *Iterator) readNumberAny(positive bool) Any { + iter.startCapture(iter.head - 1) + iter.skipNumber() + lazyBuf := iter.stopCapture() + return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readObjectAny() Any { + iter.startCapture(iter.head - 1) + iter.skipObject() + lazyBuf := iter.stopCapture() + return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readArrayAny() Any { + iter.startCapture(iter.head - 1) + iter.skipArray() + lazyBuf := iter.stopCapture() + return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func locateObjectField(iter *Iterator, target string) []byte { + var found []byte + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + if field == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + return true + }) + return found +} + +func locateArrayElement(iter *Iterator, target int) []byte { + var found []byte + n := 0 + iter.ReadArrayCB(func(iter *Iterator) bool { + if n == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + n++ + return true + }) + return found +} + +func locatePath(iter *Iterator, path []interface{}) Any { + for i, pathKeyObj := range path { + switch pathKey := pathKeyObj.(type) { + case string: + valueBytes := locateObjectField(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int: + valueBytes := locateArrayElement(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int32: + if '*' == pathKey { + return iter.readAny().Get(path[i:]...) + } + return newInvalidAny(path[i:]) + default: + return newInvalidAny(path[i:]) + } + } + if iter.Error != nil && iter.Error != io.EOF { + return &invalidAny{baseAny{}, iter.Error} + } + return iter.readAny() +} diff --git a/vendor/github.com/json-iterator/go/feature_any_array.go b/vendor/github.com/json-iterator/go/feature_any_array.go new file mode 100644 index 00000000..0449e9aa --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_array.go @@ -0,0 +1,278 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type arrayLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *arrayLazyAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayLazyAny) MustBeValid() Any { + return any +} + +func (any *arrayLazyAny) LastError() error { + return any.err +} + +func (any *arrayLazyAny) ToBool() bool { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.ReadArray() +} + +func (any *arrayLazyAny) ToInt() int { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt32() int32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt64() int64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint() uint { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint32() uint32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint64() uint64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat32() float32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat64() float64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *arrayLazyAny) ToVal(val interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(val) +} + +func (any *arrayLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateArrayElement(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + arr := make([]Any, 0) + iter.ReadArrayCB(func(iter *Iterator) bool { + found := iter.readAny().Get(path[1:]...) + if found.ValueType() != InvalidValue { + arr = append(arr, found) + } + return true + }) + return wrapArray(arr) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadArrayCB(func(iter *Iterator) bool { + size++ + iter.Skip() + return true + }) + return size +} + +func (any *arrayLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *arrayLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type arrayAny struct { + baseAny + val reflect.Value +} + +func wrapArray(val interface{}) *arrayAny { + return &arrayAny{baseAny{}, reflect.ValueOf(val)} +} + +func (any *arrayAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayAny) MustBeValid() Any { + return any +} + +func (any *arrayAny) LastError() error { + return nil +} + +func (any *arrayAny) ToBool() bool { + return any.val.Len() != 0 +} + +func (any *arrayAny) ToInt() int { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt32() int32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt64() int64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint() uint { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint32() uint32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint64() uint64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat32() float32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat64() float64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToString() string { + str, _ := MarshalToString(any.val.Interface()) + return str +} + +func (any *arrayAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + if firstPath < 0 || firstPath >= any.val.Len() { + return newInvalidAny(path) + } + return Wrap(any.val.Index(firstPath).Interface()) + case int32: + if '*' == firstPath { + mappedAll := make([]Any, 0) + for i := 0; i < any.val.Len(); i++ { + mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll = append(mappedAll, mapped) + } + } + return wrapArray(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayAny) Size() int { + return any.val.Len() +} + +func (any *arrayAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *arrayAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/feature_any_bool.go b/vendor/github.com/json-iterator/go/feature_any_bool.go new file mode 100644 index 00000000..9452324a --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_bool.go @@ -0,0 +1,137 @@ +package jsoniter + +type trueAny struct { + baseAny +} + +func (any *trueAny) LastError() error { + return nil +} + +func (any *trueAny) ToBool() bool { + return true +} + +func (any *trueAny) ToInt() int { + return 1 +} + +func (any *trueAny) ToInt32() int32 { + return 1 +} + +func (any *trueAny) ToInt64() int64 { + return 1 +} + +func (any *trueAny) ToUint() uint { + return 1 +} + +func (any *trueAny) ToUint32() uint32 { + return 1 +} + +func (any *trueAny) ToUint64() uint64 { + return 1 +} + +func (any *trueAny) ToFloat32() float32 { + return 1 +} + +func (any *trueAny) ToFloat64() float64 { + return 1 +} + +func (any *trueAny) ToString() string { + return "true" +} + +func (any *trueAny) WriteTo(stream *Stream) { + stream.WriteTrue() +} + +func (any *trueAny) Parse() *Iterator { + return nil +} + +func (any *trueAny) GetInterface() interface{} { + return true +} + +func (any *trueAny) ValueType() ValueType { + return BoolValue +} + +func (any *trueAny) MustBeValid() Any { + return any +} + +type falseAny struct { + baseAny +} + +func (any *falseAny) LastError() error { + return nil +} + +func (any *falseAny) ToBool() bool { + return false +} + +func (any *falseAny) ToInt() int { + return 0 +} + +func (any *falseAny) ToInt32() int32 { + return 0 +} + +func (any *falseAny) ToInt64() int64 { + return 0 +} + +func (any *falseAny) ToUint() uint { + return 0 +} + +func (any *falseAny) ToUint32() uint32 { + return 0 +} + +func (any *falseAny) ToUint64() uint64 { + return 0 +} + +func (any *falseAny) ToFloat32() float32 { + return 0 +} + +func (any *falseAny) ToFloat64() float64 { + return 0 +} + +func (any *falseAny) ToString() string { + return "false" +} + +func (any *falseAny) WriteTo(stream *Stream) { + stream.WriteFalse() +} + +func (any *falseAny) Parse() *Iterator { + return nil +} + +func (any *falseAny) GetInterface() interface{} { + return false +} + +func (any *falseAny) ValueType() ValueType { + return BoolValue +} + +func (any *falseAny) MustBeValid() Any { + return any +} diff --git a/vendor/github.com/json-iterator/go/feature_any_float.go b/vendor/github.com/json-iterator/go/feature_any_float.go new file mode 100644 index 00000000..35fdb094 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_float.go @@ -0,0 +1,83 @@ +package jsoniter + +import ( + "strconv" +) + +type floatAny struct { + baseAny + val float64 +} + +func (any *floatAny) Parse() *Iterator { + return nil +} + +func (any *floatAny) ValueType() ValueType { + return NumberValue +} + +func (any *floatAny) MustBeValid() Any { + return any +} + +func (any *floatAny) LastError() error { + return nil +} + +func (any *floatAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *floatAny) ToInt() int { + return int(any.val) +} + +func (any *floatAny) ToInt32() int32 { + return int32(any.val) +} + +func (any *floatAny) ToInt64() int64 { + return int64(any.val) +} + +func (any *floatAny) ToUint() uint { + if any.val > 0 { + return uint(any.val) + } + return 0 +} + +func (any *floatAny) ToUint32() uint32 { + if any.val > 0 { + return uint32(any.val) + } + return 0 +} + +func (any *floatAny) ToUint64() uint64 { + if any.val > 0 { + return uint64(any.val) + } + return 0 +} + +func (any *floatAny) ToFloat32() float32 { + return float32(any.val) +} + +func (any *floatAny) ToFloat64() float64 { + return any.val +} + +func (any *floatAny) ToString() string { + return strconv.FormatFloat(any.val, 'E', -1, 64) +} + +func (any *floatAny) WriteTo(stream *Stream) { + stream.WriteFloat64(any.val) +} + +func (any *floatAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_int32.go b/vendor/github.com/json-iterator/go/feature_any_int32.go new file mode 100644 index 00000000..1b56f399 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_int32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int32Any struct { + baseAny + val int32 +} + +func (any *int32Any) LastError() error { + return nil +} + +func (any *int32Any) ValueType() ValueType { + return NumberValue +} + +func (any *int32Any) MustBeValid() Any { + return any +} + +func (any *int32Any) ToBool() bool { + return any.val != 0 +} + +func (any *int32Any) ToInt() int { + return int(any.val) +} + +func (any *int32Any) ToInt32() int32 { + return any.val +} + +func (any *int32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *int32Any) ToUint() uint { + return uint(any.val) +} + +func (any *int32Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *int32Any) WriteTo(stream *Stream) { + stream.WriteInt32(any.val) +} + +func (any *int32Any) Parse() *Iterator { + return nil +} + +func (any *int32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_int64.go b/vendor/github.com/json-iterator/go/feature_any_int64.go new file mode 100644 index 00000000..c440d72b --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_int64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int64Any struct { + baseAny + val int64 +} + +func (any *int64Any) LastError() error { + return nil +} + +func (any *int64Any) ValueType() ValueType { + return NumberValue +} + +func (any *int64Any) MustBeValid() Any { + return any +} + +func (any *int64Any) ToBool() bool { + return any.val != 0 +} + +func (any *int64Any) ToInt() int { + return int(any.val) +} + +func (any *int64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *int64Any) ToInt64() int64 { + return any.val +} + +func (any *int64Any) ToUint() uint { + return uint(any.val) +} + +func (any *int64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int64Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int64Any) ToString() string { + return strconv.FormatInt(any.val, 10) +} + +func (any *int64Any) WriteTo(stream *Stream) { + stream.WriteInt64(any.val) +} + +func (any *int64Any) Parse() *Iterator { + return nil +} + +func (any *int64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_invalid.go b/vendor/github.com/json-iterator/go/feature_any_invalid.go new file mode 100644 index 00000000..1d859eac --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_invalid.go @@ -0,0 +1,82 @@ +package jsoniter + +import "fmt" + +type invalidAny struct { + baseAny + err error +} + +func newInvalidAny(path []interface{}) *invalidAny { + return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)} +} + +func (any *invalidAny) LastError() error { + return any.err +} + +func (any *invalidAny) ValueType() ValueType { + return InvalidValue +} + +func (any *invalidAny) MustBeValid() Any { + panic(any.err) +} + +func (any *invalidAny) ToBool() bool { + return false +} + +func (any *invalidAny) ToInt() int { + return 0 +} + +func (any *invalidAny) ToInt32() int32 { + return 0 +} + +func (any *invalidAny) ToInt64() int64 { + return 0 +} + +func (any *invalidAny) ToUint() uint { + return 0 +} + +func (any *invalidAny) ToUint32() uint32 { + return 0 +} + +func (any *invalidAny) ToUint64() uint64 { + return 0 +} + +func (any *invalidAny) ToFloat32() float32 { + return 0 +} + +func (any *invalidAny) ToFloat64() float64 { + return 0 +} + +func (any *invalidAny) ToString() string { + return "" +} + +func (any *invalidAny) WriteTo(stream *Stream) { +} + +func (any *invalidAny) Get(path ...interface{}) Any { + if any.err == nil { + return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)} + } + return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)} +} + +func (any *invalidAny) Parse() *Iterator { + return nil +} + +func (any *invalidAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/feature_any_nil.go b/vendor/github.com/json-iterator/go/feature_any_nil.go new file mode 100644 index 00000000..d04cb54c --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_nil.go @@ -0,0 +1,69 @@ +package jsoniter + +type nilAny struct { + baseAny +} + +func (any *nilAny) LastError() error { + return nil +} + +func (any *nilAny) ValueType() ValueType { + return NilValue +} + +func (any *nilAny) MustBeValid() Any { + return any +} + +func (any *nilAny) ToBool() bool { + return false +} + +func (any *nilAny) ToInt() int { + return 0 +} + +func (any *nilAny) ToInt32() int32 { + return 0 +} + +func (any *nilAny) ToInt64() int64 { + return 0 +} + +func (any *nilAny) ToUint() uint { + return 0 +} + +func (any *nilAny) ToUint32() uint32 { + return 0 +} + +func (any *nilAny) ToUint64() uint64 { + return 0 +} + +func (any *nilAny) ToFloat32() float32 { + return 0 +} + +func (any *nilAny) ToFloat64() float64 { + return 0 +} + +func (any *nilAny) ToString() string { + return "" +} + +func (any *nilAny) WriteTo(stream *Stream) { + stream.WriteNil() +} + +func (any *nilAny) Parse() *Iterator { + return nil +} + +func (any *nilAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/feature_any_number.go b/vendor/github.com/json-iterator/go/feature_any_number.go new file mode 100644 index 00000000..4e1c2764 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_number.go @@ -0,0 +1,104 @@ +package jsoniter + +import "unsafe" + +type numberLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *numberLazyAny) ValueType() ValueType { + return NumberValue +} + +func (any *numberLazyAny) MustBeValid() Any { + return any +} + +func (any *numberLazyAny) LastError() error { + return any.err +} + +func (any *numberLazyAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *numberLazyAny) ToInt() int { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToInt32() int32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt32() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToInt64() int64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt64() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToUint() uint { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToUint32() uint32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint32() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToUint64() uint64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint64() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToFloat32() float32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat32() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToFloat64() float64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat64() + any.err = iter.Error + return val +} + +func (any *numberLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *numberLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *numberLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} diff --git a/vendor/github.com/json-iterator/go/feature_any_object.go b/vendor/github.com/json-iterator/go/feature_any_object.go new file mode 100644 index 00000000..c44ef5c9 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_object.go @@ -0,0 +1,374 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type objectLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *objectLazyAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectLazyAny) MustBeValid() Any { + return any +} + +func (any *objectLazyAny) LastError() error { + return any.err +} + +func (any *objectLazyAny) ToBool() bool { + return true +} + +func (any *objectLazyAny) ToInt() int { + return 0 +} + +func (any *objectLazyAny) ToInt32() int32 { + return 0 +} + +func (any *objectLazyAny) ToInt64() int64 { + return 0 +} + +func (any *objectLazyAny) ToUint() uint { + return 0 +} + +func (any *objectLazyAny) ToUint32() uint32 { + return 0 +} + +func (any *objectLazyAny) ToUint64() uint64 { + return 0 +} + +func (any *objectLazyAny) ToFloat32() float32 { + return 0 +} + +func (any *objectLazyAny) ToFloat64() float64 { + return 0 +} + +func (any *objectLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *objectLazyAny) ToVal(obj interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(obj) +} + +func (any *objectLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateObjectField(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + mapped := locatePath(iter, path[1:]) + if mapped.ValueType() != InvalidValue { + mappedAll[field] = mapped + } + return true + }) + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectLazyAny) Keys() []string { + keys := []string{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + iter.Skip() + keys = append(keys, field) + return true + }) + return keys +} + +func (any *objectLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + size++ + return true + }) + return size +} + +func (any *objectLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *objectLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type objectAny struct { + baseAny + err error + val reflect.Value +} + +func wrapStruct(val interface{}) *objectAny { + return &objectAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *objectAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectAny) MustBeValid() Any { + return any +} + +func (any *objectAny) Parse() *Iterator { + return nil +} + +func (any *objectAny) LastError() error { + return any.err +} + +func (any *objectAny) ToBool() bool { + return any.val.NumField() != 0 +} + +func (any *objectAny) ToInt() int { + return 0 +} + +func (any *objectAny) ToInt32() int32 { + return 0 +} + +func (any *objectAny) ToInt64() int64 { + return 0 +} + +func (any *objectAny) ToUint() uint { + return 0 +} + +func (any *objectAny) ToUint32() uint32 { + return 0 +} + +func (any *objectAny) ToUint64() uint64 { + return 0 +} + +func (any *objectAny) ToFloat32() float32 { + return 0 +} + +func (any *objectAny) ToFloat64() float64 { + return 0 +} + +func (any *objectAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *objectAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + field := any.val.FieldByName(firstPath) + if !field.IsValid() { + return newInvalidAny(path) + } + return Wrap(field.Interface()) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for i := 0; i < any.val.NumField(); i++ { + field := any.val.Field(i) + if field.CanInterface() { + mapped := Wrap(field.Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[any.val.Type().Field(i).Name] = mapped + } + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectAny) Keys() []string { + keys := make([]string, 0, any.val.NumField()) + for i := 0; i < any.val.NumField(); i++ { + keys = append(keys, any.val.Type().Field(i).Name) + } + return keys +} + +func (any *objectAny) Size() int { + return any.val.NumField() +} + +func (any *objectAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *objectAny) GetInterface() interface{} { + return any.val.Interface() +} + +type mapAny struct { + baseAny + err error + val reflect.Value +} + +func wrapMap(val interface{}) *mapAny { + return &mapAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *mapAny) ValueType() ValueType { + return ObjectValue +} + +func (any *mapAny) MustBeValid() Any { + return any +} + +func (any *mapAny) Parse() *Iterator { + return nil +} + +func (any *mapAny) LastError() error { + return any.err +} + +func (any *mapAny) ToBool() bool { + return true +} + +func (any *mapAny) ToInt() int { + return 0 +} + +func (any *mapAny) ToInt32() int32 { + return 0 +} + +func (any *mapAny) ToInt64() int64 { + return 0 +} + +func (any *mapAny) ToUint() uint { + return 0 +} + +func (any *mapAny) ToUint32() uint32 { + return 0 +} + +func (any *mapAny) ToUint64() uint64 { + return 0 +} + +func (any *mapAny) ToFloat32() float32 { + return 0 +} + +func (any *mapAny) ToFloat64() float64 { + return 0 +} + +func (any *mapAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *mapAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for _, key := range any.val.MapKeys() { + keyAsStr := key.String() + element := Wrap(any.val.MapIndex(key).Interface()) + mapped := element.Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[keyAsStr] = mapped + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + value := any.val.MapIndex(reflect.ValueOf(firstPath)) + if !value.IsValid() { + return newInvalidAny(path) + } + return Wrap(value.Interface()) + } +} + +func (any *mapAny) Keys() []string { + keys := make([]string, 0, any.val.Len()) + for _, key := range any.val.MapKeys() { + keys = append(keys, key.String()) + } + return keys +} + +func (any *mapAny) Size() int { + return any.val.Len() +} + +func (any *mapAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *mapAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/feature_any_string.go b/vendor/github.com/json-iterator/go/feature_any_string.go new file mode 100644 index 00000000..abf060bd --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_string.go @@ -0,0 +1,166 @@ +package jsoniter + +import ( + "fmt" + "strconv" +) + +type stringAny struct { + baseAny + val string +} + +func (any *stringAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)} +} + +func (any *stringAny) Parse() *Iterator { + return nil +} + +func (any *stringAny) ValueType() ValueType { + return StringValue +} + +func (any *stringAny) MustBeValid() Any { + return any +} + +func (any *stringAny) LastError() error { + return nil +} + +func (any *stringAny) ToBool() bool { + str := any.ToString() + if str == "0" { + return false + } + for _, c := range str { + switch c { + case ' ', '\n', '\r', '\t': + default: + return true + } + } + return false +} + +func (any *stringAny) ToInt() int { + return int(any.ToInt64()) + +} + +func (any *stringAny) ToInt32() int32 { + return int32(any.ToInt64()) +} + +func (any *stringAny) ToInt64() int64 { + if any.val == "" { + return 0 + } + + flag := 1 + startPos := 0 + endPos := 0 + if any.val[0] == '+' || any.val[0] == '-' { + startPos = 1 + } + + if any.val[0] == '-' { + flag = -1 + } + + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) + return int64(flag) * parsed +} + +func (any *stringAny) ToUint() uint { + return uint(any.ToUint64()) +} + +func (any *stringAny) ToUint32() uint32 { + return uint32(any.ToUint64()) +} + +func (any *stringAny) ToUint64() uint64 { + if any.val == "" { + return 0 + } + + startPos := 0 + endPos := 0 + + if any.val[0] == '-' { + return 0 + } + if any.val[0] == '+' { + startPos = 1 + } + + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) + return parsed +} + +func (any *stringAny) ToFloat32() float32 { + return float32(any.ToFloat64()) +} + +func (any *stringAny) ToFloat64() float64 { + if len(any.val) == 0 { + return 0 + } + + // first char invalid + if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { + return 0 + } + + // extract valid num expression from string + // eg 123true => 123, -12.12xxa => -12.12 + endPos := 1 + for i := 1; i < len(any.val); i++ { + if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { + endPos = i + 1 + continue + } + + // end position is the first char which is not digit + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + endPos = i + break + } + } + parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) + return parsed +} + +func (any *stringAny) ToString() string { + return any.val +} + +func (any *stringAny) WriteTo(stream *Stream) { + stream.WriteString(any.val) +} + +func (any *stringAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_uint32.go b/vendor/github.com/json-iterator/go/feature_any_uint32.go new file mode 100644 index 00000000..656bbd33 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_uint32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint32Any struct { + baseAny + val uint32 +} + +func (any *uint32Any) LastError() error { + return nil +} + +func (any *uint32Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint32Any) MustBeValid() Any { + return any +} + +func (any *uint32Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint32Any) ToInt() int { + return int(any.val) +} + +func (any *uint32Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint32Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint32Any) ToUint32() uint32 { + return any.val +} + +func (any *uint32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *uint32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *uint32Any) WriteTo(stream *Stream) { + stream.WriteUint32(any.val) +} + +func (any *uint32Any) Parse() *Iterator { + return nil +} + +func (any *uint32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_any_uint64.go b/vendor/github.com/json-iterator/go/feature_any_uint64.go new file mode 100644 index 00000000..7df2fce3 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_any_uint64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint64Any struct { + baseAny + val uint64 +} + +func (any *uint64Any) LastError() error { + return nil +} + +func (any *uint64Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint64Any) MustBeValid() Any { + return any +} + +func (any *uint64Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint64Any) ToInt() int { + return int(any.val) +} + +func (any *uint64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint64Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint64Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *uint64Any) ToUint64() uint64 { + return any.val +} + +func (any *uint64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint64Any) ToString() string { + return strconv.FormatUint(any.val, 10) +} + +func (any *uint64Any) WriteTo(stream *Stream) { + stream.WriteUint64(any.val) +} + +func (any *uint64Any) Parse() *Iterator { + return nil +} + +func (any *uint64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/feature_config.go b/vendor/github.com/json-iterator/go/feature_config.go new file mode 100644 index 00000000..78a2ce1a --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_config.go @@ -0,0 +1,348 @@ +package jsoniter + +import ( + "encoding/json" + "errors" + "io" + "reflect" + "sync/atomic" + "unsafe" +) + +// Config customize how the API should behave. +// The API is created from Config by Froze. +type Config struct { + IndentionStep int + MarshalFloatWith6Digits bool + EscapeHTML bool + SortMapKeys bool + UseNumber bool + TagKey string + ValidateJsonRawMessage bool + ObjectFieldMustBeSimpleString bool +} + +type frozenConfig struct { + configBeforeFrozen Config + sortMapKeys bool + indentionStep int + objectFieldMustBeSimpleString bool + decoderCache unsafe.Pointer + encoderCache unsafe.Pointer + extensions []Extension + streamPool chan *Stream + iteratorPool chan *Iterator +} + +// API the public interface of this package. +// Primary Marshal and Unmarshal. +type API interface { + IteratorPool + StreamPool + MarshalToString(v interface{}) (string, error) + Marshal(v interface{}) ([]byte, error) + MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) + UnmarshalFromString(str string, v interface{}) error + Unmarshal(data []byte, v interface{}) error + Get(data []byte, path ...interface{}) Any + NewEncoder(writer io.Writer) *Encoder + NewDecoder(reader io.Reader) *Decoder + Valid(data []byte) bool + RegisterExtension(extension Extension) +} + +// ConfigDefault the default API +var ConfigDefault = Config{ + EscapeHTML: true, +}.Froze() + +// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior +var ConfigCompatibleWithStandardLibrary = Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, +}.Froze() + +// ConfigFastest marshals float with only 6 digits precision +var ConfigFastest = Config{ + EscapeHTML: false, + MarshalFloatWith6Digits: true, // will lose precession + ObjectFieldMustBeSimpleString: true, // do not unescape object field +}.Froze() + +// Froze forge API from config +func (cfg Config) Froze() API { + // TODO: cache frozen config + frozenConfig := &frozenConfig{ + sortMapKeys: cfg.SortMapKeys, + indentionStep: cfg.IndentionStep, + objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString, + streamPool: make(chan *Stream, 16), + iteratorPool: make(chan *Iterator, 16), + } + atomic.StorePointer(&frozenConfig.decoderCache, unsafe.Pointer(&map[string]ValDecoder{})) + atomic.StorePointer(&frozenConfig.encoderCache, unsafe.Pointer(&map[string]ValEncoder{})) + if cfg.MarshalFloatWith6Digits { + frozenConfig.marshalFloatWith6Digits() + } + if cfg.EscapeHTML { + frozenConfig.escapeHTML() + } + if cfg.UseNumber { + frozenConfig.useNumber() + } + if cfg.ValidateJsonRawMessage { + frozenConfig.validateJsonRawMessage() + } + frozenConfig.configBeforeFrozen = cfg + return frozenConfig +} + +func (cfg *frozenConfig) validateJsonRawMessage() { + encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { + rawMessage := *(*json.RawMessage)(ptr) + iter := cfg.BorrowIterator([]byte(rawMessage)) + iter.Read() + if iter.Error != nil { + stream.WriteRaw("null") + } else { + cfg.ReturnIterator(iter) + stream.WriteRaw(string(rawMessage)) + } + }, func(ptr unsafe.Pointer) bool { + return false + }} + cfg.addEncoderToCache(reflect.TypeOf((*json.RawMessage)(nil)).Elem(), encoder) + cfg.addEncoderToCache(reflect.TypeOf((*RawMessage)(nil)).Elem(), encoder) +} + +func (cfg *frozenConfig) useNumber() { + cfg.addDecoderToCache(reflect.TypeOf((*interface{})(nil)).Elem(), &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() == NumberValue { + *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) + } else { + *((*interface{})(ptr)) = iter.Read() + } + }}) +} +func (cfg *frozenConfig) getTagKey() string { + tagKey := cfg.configBeforeFrozen.TagKey + if tagKey == "" { + return "json" + } + return tagKey +} + +func (cfg *frozenConfig) RegisterExtension(extension Extension) { + cfg.extensions = append(cfg.extensions, extension) +} + +type lossyFloat32Encoder struct { +} + +func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32Lossy(*((*float32)(ptr))) +} + +func (encoder *lossyFloat32Encoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type lossyFloat64Encoder struct { +} + +func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64Lossy(*((*float64)(ptr))) +} + +func (encoder *lossyFloat64Encoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +// EnableLossyFloatMarshalling keeps 10**(-6) precision +// for float variables for better performance. +func (cfg *frozenConfig) marshalFloatWith6Digits() { + // for better performance + cfg.addEncoderToCache(reflect.TypeOf((*float32)(nil)).Elem(), &lossyFloat32Encoder{}) + cfg.addEncoderToCache(reflect.TypeOf((*float64)(nil)).Elem(), &lossyFloat64Encoder{}) +} + +type htmlEscapedStringEncoder struct { +} + +func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteStringWithHTMLEscaped(str) +} + +func (encoder *htmlEscapedStringEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +func (cfg *frozenConfig) escapeHTML() { + cfg.addEncoderToCache(reflect.TypeOf((*string)(nil)).Elem(), &htmlEscapedStringEncoder{}) +} + +func (cfg *frozenConfig) addDecoderToCache(cacheKey reflect.Type, decoder ValDecoder) { + done := false + for !done { + ptr := atomic.LoadPointer(&cfg.decoderCache) + cache := *(*map[reflect.Type]ValDecoder)(ptr) + copied := map[reflect.Type]ValDecoder{} + for k, v := range cache { + copied[k] = v + } + copied[cacheKey] = decoder + done = atomic.CompareAndSwapPointer(&cfg.decoderCache, ptr, unsafe.Pointer(&copied)) + } +} + +func (cfg *frozenConfig) addEncoderToCache(cacheKey reflect.Type, encoder ValEncoder) { + done := false + for !done { + ptr := atomic.LoadPointer(&cfg.encoderCache) + cache := *(*map[reflect.Type]ValEncoder)(ptr) + copied := map[reflect.Type]ValEncoder{} + for k, v := range cache { + copied[k] = v + } + copied[cacheKey] = encoder + done = atomic.CompareAndSwapPointer(&cfg.encoderCache, ptr, unsafe.Pointer(&copied)) + } +} + +func (cfg *frozenConfig) getDecoderFromCache(cacheKey reflect.Type) ValDecoder { + ptr := atomic.LoadPointer(&cfg.decoderCache) + cache := *(*map[reflect.Type]ValDecoder)(ptr) + return cache[cacheKey] +} + +func (cfg *frozenConfig) getEncoderFromCache(cacheKey reflect.Type) ValEncoder { + ptr := atomic.LoadPointer(&cfg.encoderCache) + cache := *(*map[reflect.Type]ValEncoder)(ptr) + return cache[cacheKey] +} + +func (cfg *frozenConfig) cleanDecoders() { + typeDecoders = map[string]ValDecoder{} + fieldDecoders = map[string]ValDecoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) cleanEncoders() { + typeEncoders = map[string]ValEncoder{} + fieldEncoders = map[string]ValEncoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return "", stream.Error + } + return string(stream.Buffer()), nil +} + +func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return nil, stream.Error + } + result := stream.Buffer() + copied := make([]byte, len(result)) + copy(copied, result) + return copied, nil +} + +func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + if prefix != "" { + panic("prefix is not supported") + } + for _, r := range indent { + if r != ' ' { + panic("indent can only be space") + } + } + newCfg := cfg.configBeforeFrozen + newCfg.IndentionStep = len(indent) + return newCfg.Froze().Marshal(v) +} + +func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { + data := []byte(str) + data = data[:lastNotSpacePos(data)] + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + if iter.head == iter.tail { + iter.loadMore() + } + if iter.Error == io.EOF { + return nil + } + if iter.Error == nil { + iter.ReportError("UnmarshalFromString", "there are bytes left after unmarshal") + } + return iter.Error +} + +func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + return locatePath(iter, path) +} + +func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { + data = data[:lastNotSpacePos(data)] + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + typ := reflect.TypeOf(v) + if typ.Kind() != reflect.Ptr { + // return non-pointer error + return errors.New("the second param must be ptr type") + } + iter.ReadVal(v) + if iter.head == iter.tail { + iter.loadMore() + } + if iter.Error == io.EOF { + return nil + } + if iter.Error == nil { + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + } + return iter.Error +} + +func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { + stream := NewStream(cfg, writer, 512) + return &Encoder{stream} +} + +func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { + iter := Parse(cfg, reader, 512) + return &Decoder{iter} +} + +func (cfg *frozenConfig) Valid(data []byte) bool { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.Skip() + return iter.Error == nil +} diff --git a/vendor/github.com/json-iterator/go/feature_iter.go b/vendor/github.com/json-iterator/go/feature_iter.go new file mode 100644 index 00000000..95ae54fb --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter.go @@ -0,0 +1,322 @@ +package jsoniter + +import ( + "encoding/json" + "fmt" + "io" +) + +// ValueType the type for JSON element +type ValueType int + +const ( + // InvalidValue invalid JSON element + InvalidValue ValueType = iota + // StringValue JSON element "string" + StringValue + // NumberValue JSON element 100 or 0.10 + NumberValue + // NilValue JSON element null + NilValue + // BoolValue JSON element true or false + BoolValue + // ArrayValue JSON element [] + ArrayValue + // ObjectValue JSON element {} + ObjectValue +) + +var hexDigits []byte +var valueTypes []ValueType + +func init() { + hexDigits = make([]byte, 256) + for i := 0; i < len(hexDigits); i++ { + hexDigits[i] = 255 + } + for i := '0'; i <= '9'; i++ { + hexDigits[i] = byte(i - '0') + } + for i := 'a'; i <= 'f'; i++ { + hexDigits[i] = byte((i - 'a') + 10) + } + for i := 'A'; i <= 'F'; i++ { + hexDigits[i] = byte((i - 'A') + 10) + } + valueTypes = make([]ValueType, 256) + for i := 0; i < len(valueTypes); i++ { + valueTypes[i] = InvalidValue + } + valueTypes['"'] = StringValue + valueTypes['-'] = NumberValue + valueTypes['0'] = NumberValue + valueTypes['1'] = NumberValue + valueTypes['2'] = NumberValue + valueTypes['3'] = NumberValue + valueTypes['4'] = NumberValue + valueTypes['5'] = NumberValue + valueTypes['6'] = NumberValue + valueTypes['7'] = NumberValue + valueTypes['8'] = NumberValue + valueTypes['9'] = NumberValue + valueTypes['t'] = BoolValue + valueTypes['f'] = BoolValue + valueTypes['n'] = NilValue + valueTypes['['] = ArrayValue + valueTypes['{'] = ObjectValue +} + +// Iterator is a io.Reader like object, with JSON specific read functions. +// Error is not returned as return value, but stored as Error member on this iterator instance. +type Iterator struct { + cfg *frozenConfig + reader io.Reader + buf []byte + head int + tail int + captureStartedAt int + captured []byte + Error error + Attachment interface{} // open for customized decoder +} + +// NewIterator creates an empty Iterator instance +func NewIterator(cfg API) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: nil, + head: 0, + tail: 0, + } +} + +// Parse creates an Iterator instance from io.Reader +func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: reader, + buf: make([]byte, bufSize), + head: 0, + tail: 0, + } +} + +// ParseBytes creates an Iterator instance from byte array +func ParseBytes(cfg API, input []byte) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: input, + head: 0, + tail: len(input), + } +} + +// ParseString creates an Iterator instance from string +func ParseString(cfg API, input string) *Iterator { + return ParseBytes(cfg, []byte(input)) +} + +// Pool returns a pool can provide more iterator with same configuration +func (iter *Iterator) Pool() IteratorPool { + return iter.cfg +} + +// Reset reuse iterator instance by specifying another reader +func (iter *Iterator) Reset(reader io.Reader) *Iterator { + iter.reader = reader + iter.head = 0 + iter.tail = 0 + return iter +} + +// ResetBytes reuse iterator instance by specifying another byte array as input +func (iter *Iterator) ResetBytes(input []byte) *Iterator { + iter.reader = nil + iter.buf = input + iter.head = 0 + iter.tail = len(input) + return iter +} + +// WhatIsNext gets ValueType of relatively next json element +func (iter *Iterator) WhatIsNext() ValueType { + valueType := valueTypes[iter.nextToken()] + iter.unreadByte() + return valueType +} + +func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + return false + } + return true +} + +func (iter *Iterator) isObjectEnd() bool { + c := iter.nextToken() + if c == ',' { + return false + } + if c == '}' { + return true + } + iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c})) + return true +} + +func (iter *Iterator) nextToken() byte { + // a variation of skip whitespaces, returning the next non-whitespace token + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + 1 + return c + } + if !iter.loadMore() { + return 0 + } + } +} + +// ReportError record a error in iterator instance with current position. +func (iter *Iterator) ReportError(operation string, msg string) { + if iter.Error != nil { + if iter.Error != io.EOF { + return + } + } + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + peekEnd := iter.head + 10 + if peekEnd > iter.tail { + peekEnd = iter.tail + } + parsing := string(iter.buf[peekStart:peekEnd]) + contextStart := iter.head - 50 + if contextStart < 0 { + contextStart = 0 + } + contextEnd := iter.head + 50 + if contextEnd > iter.tail { + contextEnd = iter.tail + } + context := string(iter.buf[contextStart:contextEnd]) + iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...", + operation, msg, iter.head-peekStart, parsing, context) +} + +// CurrentBuffer gets current buffer as string for debugging purpose +func (iter *Iterator) CurrentBuffer() string { + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head, + string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) +} + +func (iter *Iterator) readByte() (ret byte) { + if iter.head == iter.tail { + if iter.loadMore() { + ret = iter.buf[iter.head] + iter.head++ + return ret + } + return 0 + } + ret = iter.buf[iter.head] + iter.head++ + return ret +} + +func (iter *Iterator) loadMore() bool { + if iter.reader == nil { + if iter.Error == nil { + iter.head = iter.tail + iter.Error = io.EOF + } + return false + } + if iter.captured != nil { + iter.captured = append(iter.captured, + iter.buf[iter.captureStartedAt:iter.tail]...) + iter.captureStartedAt = 0 + } + for { + n, err := iter.reader.Read(iter.buf) + if n == 0 { + if err != nil { + if iter.Error == nil { + iter.Error = err + } + return false + } + } else { + iter.head = 0 + iter.tail = n + return true + } + } +} + +func (iter *Iterator) unreadByte() { + if iter.Error != nil { + return + } + iter.head-- + return +} + +// Read read the next JSON element as generic interface{}. +func (iter *Iterator) Read() interface{} { + valueType := iter.WhatIsNext() + switch valueType { + case StringValue: + return iter.ReadString() + case NumberValue: + if iter.cfg.configBeforeFrozen.UseNumber { + return json.Number(iter.readNumberAsString()) + } + return iter.ReadFloat64() + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + return nil + case BoolValue: + return iter.ReadBool() + case ArrayValue: + arr := []interface{}{} + iter.ReadArrayCB(func(iter *Iterator) bool { + var elem interface{} + iter.ReadVal(&elem) + arr = append(arr, elem) + return true + }) + return arr + case ObjectValue: + obj := map[string]interface{}{} + iter.ReadMapCB(func(Iter *Iterator, field string) bool { + var elem interface{} + iter.ReadVal(&elem) + obj[field] = elem + return true + }) + return obj + default: + iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType)) + return nil + } +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_array.go b/vendor/github.com/json-iterator/go/feature_iter_array.go new file mode 100644 index 00000000..6188cb45 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_array.go @@ -0,0 +1,58 @@ +package jsoniter + +// ReadArray read array element, tells if the array has more element to read. +func (iter *Iterator) ReadArray() (ret bool) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return false // null + case '[': + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + return true + } + return false + case ']': + return false + case ',': + return true + default: + iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c})) + return + } +} + +// ReadArrayCB read array with callback +func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { + c := iter.nextToken() + if c == '[' { + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + if !callback(iter) { + return false + } + c = iter.nextToken() + for c == ',' { + if !callback(iter) { + return false + } + c = iter.nextToken() + } + if c != ']' { + iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) + return false + } + return true + } + return true + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c})) + return false +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_float.go b/vendor/github.com/json-iterator/go/feature_iter_float.go new file mode 100644 index 00000000..86f45991 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_float.go @@ -0,0 +1,341 @@ +package jsoniter + +import ( + "io" + "math/big" + "strconv" + "strings" + "unsafe" +) + +var floatDigits []int8 + +const invalidCharForNumber = int8(-1) +const endOfNumber = int8(-2) +const dotInNumber = int8(-3) + +func init() { + floatDigits = make([]int8, 256) + for i := 0; i < len(floatDigits); i++ { + floatDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + floatDigits[i] = i - int8('0') + } + floatDigits[','] = endOfNumber + floatDigits[']'] = endOfNumber + floatDigits['}'] = endOfNumber + floatDigits[' '] = endOfNumber + floatDigits['\t'] = endOfNumber + floatDigits['\n'] = endOfNumber + floatDigits['.'] = dotInNumber +} + +// ReadBigFloat read big.Float +func (iter *Iterator) ReadBigFloat() (ret *big.Float) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + prec := 64 + if len(str) > prec { + prec = len(str) + } + val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero) + if err != nil { + iter.Error = err + return nil + } + return val +} + +// ReadBigInt read big.Int +func (iter *Iterator) ReadBigInt() (ret *big.Int) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + ret = big.NewInt(0) + var success bool + ret, success = ret.SetString(str, 10) + if !success { + iter.ReportError("ReadBigInt", "invalid big int") + return nil + } + return ret +} + +//ReadFloat32 read float32 +func (iter *Iterator) ReadFloat32() (ret float32) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat32() + } + iter.unreadByte() + return iter.readPositiveFloat32() +} + +func (iter *Iterator) readPositiveFloat32() (ret float32) { + value := uint64(0) + c := byte(' ') + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c = iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.ReportError("readFloat32", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat32", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat32", "leading zero is invalid") + return + } + } + value = uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.head = i + return float32(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat32SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float32(float64(value) / float64(pow10[decimalPlaces])) + } + // too many decimal places + return iter.readFloat32SlowPath() + case invalidCharForNumber: + fallthrough + case dotInNumber: + return iter.readFloat32SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat32SlowPath() +} + +func (iter *Iterator) readNumberAsString() (ret string) { + strBuf := [16]byte{} + str := strBuf[0:0] +load_loop: + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + str = append(str, c) + continue + default: + iter.head = i + break load_loop + } + } + if !iter.loadMore() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + return + } + if len(str) == 0 { + iter.ReportError("readNumberAsString", "invalid number") + } + return *(*string)(unsafe.Pointer(&str)) +} + +func (iter *Iterator) readFloat32SlowPath() (ret float32) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat32SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 32) + if err != nil { + iter.Error = err + return + } + return float32(val) +} + +// ReadFloat64 read float64 +func (iter *Iterator) ReadFloat64() (ret float64) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat64() + } + iter.unreadByte() + return iter.readPositiveFloat64() +} + +func (iter *Iterator) readPositiveFloat64() (ret float64) { + value := uint64(0) + c := byte(' ') + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c = iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.ReportError("readFloat64", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat64", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat64", "leading zero is invalid") + return + } + } + value = uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.head = i + return float64(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat64SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float64(value) / float64(pow10[decimalPlaces]) + } + // too many decimal places + return iter.readFloat64SlowPath() + case invalidCharForNumber: + fallthrough + case dotInNumber: + return iter.readFloat64SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat64SlowPath() +} + +func (iter *Iterator) readFloat64SlowPath() (ret float64) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat64SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 64) + if err != nil { + iter.Error = err + return + } + return val +} + +func validateFloat(str string) string { + // strconv.ParseFloat is not validating `1.` or `1.e1` + if len(str) == 0 { + return "empty number" + } + if str[0] == '-' { + return "-- is not valid" + } + dotPos := strings.IndexByte(str, '.') + if dotPos != -1 { + if dotPos == len(str)-1 { + return "dot can not be last character" + } + switch str[dotPos+1] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + return "missing digit after dot" + } + } + return "" +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_int.go b/vendor/github.com/json-iterator/go/feature_iter_int.go new file mode 100644 index 00000000..4781c639 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_int.go @@ -0,0 +1,339 @@ +package jsoniter + +import ( + "math" + "strconv" +) + +var intDigits []int8 + +const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 +const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 + +func init() { + intDigits = make([]int8, 256) + for i := 0; i < len(intDigits); i++ { + intDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + intDigits[i] = i - int8('0') + } +} + +// ReadUint read uint +func (iter *Iterator) ReadUint() uint { + return uint(iter.ReadUint64()) +} + +// ReadInt read int +func (iter *Iterator) ReadInt() int { + return int(iter.ReadInt64()) +} + +// ReadInt8 read int8 +func (iter *Iterator) ReadInt8() (ret int8) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt8+1 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int8(val) + } + val := iter.readUint32(c) + if val > math.MaxInt8 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int8(val) +} + +// ReadUint8 read uint8 +func (iter *Iterator) ReadUint8() (ret uint8) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint8 { + iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint8(val) +} + +// ReadInt16 read int16 +func (iter *Iterator) ReadInt16() (ret int16) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt16+1 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int16(val) + } + val := iter.readUint32(c) + if val > math.MaxInt16 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int16(val) +} + +// ReadUint16 read uint16 +func (iter *Iterator) ReadUint16() (ret uint16) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint16 { + iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint16(val) +} + +// ReadInt32 read int32 +func (iter *Iterator) ReadInt32() (ret int32) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt32+1 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int32(val) + } + val := iter.readUint32(c) + if val > math.MaxInt32 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int32(val) +} + +// ReadUint32 read uint32 +func (iter *Iterator) ReadUint32() (ret uint32) { + return iter.readUint32(iter.nextToken()) +} + +func (iter *Iterator) readUint32(c byte) (ret uint32) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint32(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint32(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint32(ind2)*10 + uint32(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint32SafeToMultiply10 { + value2 := (value << 3) + (value << 1) + uint32(ind) + if value2 < value { + iter.ReportError("readUint32", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint32(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +// ReadInt64 read int64 +func (iter *Iterator) ReadInt64() (ret int64) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint64(iter.readByte()) + if val > math.MaxInt64+1 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return -int64(val) + } + val := iter.readUint64(c) + if val > math.MaxInt64 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return int64(val) +} + +// ReadUint64 read uint64 +func (iter *Iterator) ReadUint64() uint64 { + return iter.readUint64(iter.nextToken()) +} + +func (iter *Iterator) readUint64(c byte) (ret uint64) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint64(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint64(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint64(ind2)*10 + uint64(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint64SafeToMultiple10 { + value2 := (value << 3) + (value << 1) + uint64(ind) + if value2 < value { + iter.ReportError("readUint64", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint64(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +func (iter *Iterator) assertInteger() { + if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' { + iter.ReportError("assertInteger", "can not decode float as int") + } +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_object.go b/vendor/github.com/json-iterator/go/feature_iter_object.go new file mode 100644 index 00000000..dfd91fa6 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_object.go @@ -0,0 +1,267 @@ +package jsoniter + +import ( + "fmt" + "unicode" + "unsafe" +) + +// ReadObject read one field from object. +// If object ended, returns empty string. +// Otherwise, returns the field name. +func (iter *Iterator) ReadObject() (ret string) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return "" // null + case '{': + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + if iter.cfg.objectFieldMustBeSimpleString { + return string(iter.readObjectFieldAsBytes()) + } else { + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + } + } + if c == '}' { + return "" // end of object + } + iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c})) + return + case ',': + if iter.cfg.objectFieldMustBeSimpleString { + return string(iter.readObjectFieldAsBytes()) + } else { + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + } + case '}': + return "" // end of object + default: + iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c}))) + return + } +} + +func (iter *Iterator) readFieldHash() int32 { + hash := int64(0x811c9dc5) + c := iter.nextToken() + if c == '"' { + for { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + b := iter.buf[i] + if !iter.cfg.objectFieldMustBeSimpleString && b == '\\' { + iter.head = i + for _, b := range iter.readStringSlowPath() { + if 'A' <= b && b <= 'Z' { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return int32(hash) + } + if b == '"' { + iter.head = i + 1 + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return int32(hash) + } + if 'A' <= b && b <= 'Z' { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + if !iter.loadMore() { + iter.ReportError("readFieldHash", `incomplete field name`) + return 0 + } + } + } + iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) + return 0 +} + +func calcHash(str string) int32 { + hash := int64(0x811c9dc5) + for _, b := range str { + hash ^= int64(unicode.ToLower(b)) + hash *= 0x1000193 + } + return int32(hash) +} + +// ReadObjectCB read object with callback, the key is ascii only and field name not copied +func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + var fieldBytes []byte + var field string + if c == '{' { + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + if iter.cfg.objectFieldMustBeSimpleString { + fieldBytes = iter.readObjectFieldAsBytes() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + } else { + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + for c == ',' { + if iter.cfg.objectFieldMustBeSimpleString { + fieldBytes = iter.readObjectFieldAsBytes() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + } else { + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadObjectCB", `object not ended with }`) + return false + } + return true + } + if c == '}' { + return true + } + iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c})) + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +// ReadMapCB read map with callback, the key can be any string +func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return false + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return false + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadMapCB", `object not ended with }`) + return false + } + return true + } + if c == '}' { + return true + } + iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectStart() bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '}' { + return false + } + iter.unreadByte() + return true + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return false + } + iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { + str := iter.ReadStringAsSlice() + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if iter.buf[iter.head] != ':' { + iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]})) + return + } + iter.head++ + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if ret == nil { + return str + } + return ret +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip.go b/vendor/github.com/json-iterator/go/feature_iter_skip.go new file mode 100644 index 00000000..f58beb91 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_skip.go @@ -0,0 +1,129 @@ +package jsoniter + +import "fmt" + +// ReadNil reads a json object as nil and +// returns whether it's a nil or not +func (iter *Iterator) ReadNil() (ret bool) { + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') // null + return true + } + iter.unreadByte() + return false +} + +// ReadBool reads a json object as BoolValue +func (iter *Iterator) ReadBool() (ret bool) { + c := iter.nextToken() + if c == 't' { + iter.skipThreeBytes('r', 'u', 'e') + return true + } + if c == 'f' { + iter.skipFourBytes('a', 'l', 's', 'e') + return false + } + iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c})) + return +} + +// SkipAndReturnBytes skip next JSON element, and return its content as []byte. +// The []byte can be kept, it is a copy of data. +func (iter *Iterator) SkipAndReturnBytes() []byte { + iter.startCapture(iter.head) + iter.Skip() + return iter.stopCapture() +} + +type captureBuffer struct { + startedAt int + captured []byte +} + +func (iter *Iterator) startCapture(captureStartedAt int) { + if iter.captured != nil { + panic("already in capture mode") + } + iter.captureStartedAt = captureStartedAt + iter.captured = make([]byte, 0, 32) +} + +func (iter *Iterator) stopCapture() []byte { + if iter.captured == nil { + panic("not in capture mode") + } + captured := iter.captured + remaining := iter.buf[iter.captureStartedAt:iter.head] + iter.captureStartedAt = -1 + iter.captured = nil + if len(captured) == 0 { + copied := make([]byte, len(remaining)) + copy(copied, remaining) + return copied + } + captured = append(captured, remaining...) + return captured +} + +// Skip skips a json object and positions to relatively the next json object +func (iter *Iterator) Skip() { + c := iter.nextToken() + switch c { + case '"': + iter.skipString() + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + case '0': + iter.unreadByte() + iter.ReadFloat32() + case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.skipNumber() + case '[': + iter.skipArray() + case '{': + iter.skipObject() + default: + iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c)) + return + } +} + +func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b4 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } +} + +func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go new file mode 100644 index 00000000..8fcdc3b6 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_skip_sloppy.go @@ -0,0 +1,144 @@ +//+build jsoniter_sloppy + +package jsoniter + +// sloppy but faster implementation, do not validate the input json + +func (iter *Iterator) skipNumber() { + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\r', '\t', ',', '}', ']': + iter.head = i + return + } + } + if !iter.loadMore() { + return + } + } +} + +func (iter *Iterator) skipArray() { + level := 1 + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '[': // If open symbol, increase level + level++ + case ']': // If close symbol, increase level + level-- + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete array") + return + } + } +} + +func (iter *Iterator) skipObject() { + level := 1 + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '{': // If open symbol, increase level + level++ + case '}': // If close symbol, increase level + level-- + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete object") + return + } + } +} + +func (iter *Iterator) skipString() { + for { + end, escaped := iter.findStringEnd() + if end == -1 { + if !iter.loadMore() { + iter.ReportError("skipString", "incomplete string") + return + } + if escaped { + iter.head = 1 // skip the first char as last char read is \ + } + } else { + iter.head = end + return + } + } +} + +// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go +// Tries to find the end of string +// Support if string contains escaped quote symbols. +func (iter *Iterator) findStringEnd() (int, bool) { + escaped := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + if !escaped { + return i + 1, false + } + j := i - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return i + 1, true + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + } + } else if c == '\\' { + escaped = true + } + } + j := iter.tail - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return -1, false // do not end with \ + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + + } + return -1, true // end with \ +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go b/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go new file mode 100644 index 00000000..f67bc2e8 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_skip_strict.go @@ -0,0 +1,89 @@ +//+build !jsoniter_sloppy + +package jsoniter + +import "fmt" + +func (iter *Iterator) skipNumber() { + if !iter.trySkipNumber() { + iter.unreadByte() + iter.ReadFloat32() + } +} + +func (iter *Iterator) trySkipNumber() bool { + dotFound := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + case '.': + if dotFound { + iter.ReportError("validateNumber", `more than one dot found in number`) + return true // already failed + } + if i+1 == iter.tail { + return false + } + c = iter.buf[i+1] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + iter.ReportError("validateNumber", `missing digit after dot`) + return true // already failed + } + dotFound = true + default: + switch c { + case ',', ']', '}', ' ', '\t', '\n', '\r': + if iter.head == i { + return false // if - without following digits + } + iter.head = i + return true // must be valid + } + return false // may be invalid + } + } + return false +} + +func (iter *Iterator) skipString() { + if !iter.trySkipString() { + iter.unreadByte() + iter.ReadString() + } +} + +func (iter *Iterator) trySkipString() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + iter.head = i + 1 + return true // valid + } else if c == '\\' { + return false + } else if c < ' ' { + iter.ReportError("trySkipString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return true // already failed + } + } + return false +} + +func (iter *Iterator) skipObject() { + iter.unreadByte() + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + return true + }) +} + +func (iter *Iterator) skipArray() { + iter.unreadByte() + iter.ReadArrayCB(func(iter *Iterator) bool { + iter.Skip() + return true + }) +} diff --git a/vendor/github.com/json-iterator/go/feature_iter_string.go b/vendor/github.com/json-iterator/go/feature_iter_string.go new file mode 100644 index 00000000..adc487ea --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_iter_string.go @@ -0,0 +1,215 @@ +package jsoniter + +import ( + "fmt" + "unicode/utf16" +) + +// ReadString read string from iterator +func (iter *Iterator) ReadString() (ret string) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + ret = string(iter.buf[iter.head:i]) + iter.head = i + 1 + return ret + } else if c == '\\' { + break + } else if c < ' ' { + iter.ReportError("ReadString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return + } + } + return iter.readStringSlowPath() + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return "" + } + iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readStringSlowPath() (ret string) { + var str []byte + var c byte + for iter.Error == nil { + c = iter.readByte() + if c == '"' { + return string(str) + } + if c == '\\' { + c = iter.readByte() + str = iter.readEscapedChar(c, str) + } else { + str = append(str, c) + } + } + iter.ReportError("readStringSlowPath", "unexpected end of input") + return +} + +func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte { + switch c { + case 'u': + r := iter.readU4() + if utf16.IsSurrogate(r) { + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != '\\' { + iter.unreadByte() + str = appendRune(str, r) + return str + } + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != 'u' { + str = appendRune(str, r) + return iter.readEscapedChar(c, str) + } + r2 := iter.readU4() + if iter.Error != nil { + return nil + } + combined := utf16.DecodeRune(r, r2) + if combined == '\uFFFD' { + str = appendRune(str, r) + str = appendRune(str, r2) + } else { + str = appendRune(str, combined) + } + } else { + str = appendRune(str, r) + } + case '"': + str = append(str, '"') + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + default: + iter.ReportError("readEscapedChar", + `invalid escape char after \`) + return nil + } + return str +} + +// ReadStringAsSlice read string from iterator without copying into string form. +// The []byte can not be kept, as it will change after next iterator call. +func (iter *Iterator) ReadStringAsSlice() (ret []byte) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + // for: field name, base64, number + if iter.buf[i] == '"' { + // fast path: reuse the underlying buffer + ret = iter.buf[iter.head:i] + iter.head = i + 1 + return ret + } + } + readLen := iter.tail - iter.head + copied := make([]byte, readLen, readLen*2) + copy(copied, iter.buf[iter.head:iter.tail]) + iter.head = iter.tail + for iter.Error == nil { + c := iter.readByte() + if c == '"' { + return copied + } + copied = append(copied, c) + } + return copied + } + iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readU4() (ret rune) { + for i := 0; i < 4; i++ { + c := iter.readByte() + if iter.Error != nil { + return + } + if c >= '0' && c <= '9' { + ret = ret*16 + rune(c-'0') + } else if c >= 'a' && c <= 'f' { + ret = ret*16 + rune(c-'a'+10) + } else if c >= 'A' && c <= 'F' { + ret = ret*16 + rune(c-'A'+10) + } else { + iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c})) + return + } + } + return ret +} + +const ( + t1 = 0x00 // 0000 0000 + tx = 0x80 // 1000 0000 + t2 = 0xC0 // 1100 0000 + t3 = 0xE0 // 1110 0000 + t4 = 0xF0 // 1111 0000 + t5 = 0xF8 // 1111 1000 + + maskx = 0x3F // 0011 1111 + mask2 = 0x1F // 0001 1111 + mask3 = 0x0F // 0000 1111 + mask4 = 0x07 // 0000 0111 + + rune1Max = 1<<7 - 1 + rune2Max = 1<<11 - 1 + rune3Max = 1<<16 - 1 + + surrogateMin = 0xD800 + surrogateMax = 0xDFFF + + maxRune = '\U0010FFFF' // Maximum valid Unicode code point. + runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character" +) + +func appendRune(p []byte, r rune) []byte { + // Negative values are erroneous. Making it unsigned addresses the problem. + switch i := uint32(r); { + case i <= rune1Max: + p = append(p, byte(r)) + return p + case i <= rune2Max: + p = append(p, t2|byte(r>>6)) + p = append(p, tx|byte(r)&maskx) + return p + case i > maxRune, surrogateMin <= i && i <= surrogateMax: + r = runeError + fallthrough + case i <= rune3Max: + p = append(p, t3|byte(r>>12)) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + default: + p = append(p, t4|byte(r>>18)) + p = append(p, tx|byte(r>>12)&maskx) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + } +} diff --git a/vendor/github.com/json-iterator/go/feature_json_number.go b/vendor/github.com/json-iterator/go/feature_json_number.go new file mode 100644 index 00000000..e187b200 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_json_number.go @@ -0,0 +1,31 @@ +package jsoniter + +import ( + "encoding/json" + "strconv" +) + +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +func CastJsonNumber(val interface{}) (string, bool) { + switch typedVal := val.(type) { + case json.Number: + return string(typedVal), true + case Number: + return string(typedVal), true + } + return "", false +} diff --git a/vendor/github.com/json-iterator/go/feature_pool.go b/vendor/github.com/json-iterator/go/feature_pool.go new file mode 100644 index 00000000..52d38e68 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_pool.go @@ -0,0 +1,59 @@ +package jsoniter + +import ( + "io" +) + +// IteratorPool a thread safe pool of iterators with same configuration +type IteratorPool interface { + BorrowIterator(data []byte) *Iterator + ReturnIterator(iter *Iterator) +} + +// StreamPool a thread safe pool of streams with same configuration +type StreamPool interface { + BorrowStream(writer io.Writer) *Stream + ReturnStream(stream *Stream) +} + +func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { + select { + case stream := <-cfg.streamPool: + stream.Reset(writer) + return stream + default: + return NewStream(cfg, writer, 512) + } +} + +func (cfg *frozenConfig) ReturnStream(stream *Stream) { + stream.Error = nil + stream.Attachment = nil + select { + case cfg.streamPool <- stream: + return + default: + return + } +} + +func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { + select { + case iter := <-cfg.iteratorPool: + iter.ResetBytes(data) + return iter + default: + return ParseBytes(cfg, data) + } +} + +func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { + iter.Error = nil + iter.Attachment = nil + select { + case cfg.iteratorPool <- iter: + return + default: + return + } +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect.go b/vendor/github.com/json-iterator/go/feature_reflect.go new file mode 100644 index 00000000..75d533b0 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect.go @@ -0,0 +1,607 @@ +package jsoniter + +import ( + "encoding" + "encoding/json" + "fmt" + "reflect" + "time" + "unsafe" +) + +// ValDecoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValDecoder with json.Decoder. +// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). +// +// Reflection on type to create decoders, which is then cached +// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions +// 1. create instance of new value, for example *int will need a int to be allocated +// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New +// 3. assignment to map, both key and value will be reflect.Value +// For a simple struct binding, it will be reflect.Value free and allocation free +type ValDecoder interface { + Decode(ptr unsafe.Pointer, iter *Iterator) +} + +// ValEncoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValEncoder with json.Encoder. +// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). +type ValEncoder interface { + IsEmpty(ptr unsafe.Pointer) bool + Encode(ptr unsafe.Pointer, stream *Stream) + EncodeInterface(val interface{}, stream *Stream) +} + +type checkIsEmpty interface { + IsEmpty(ptr unsafe.Pointer) bool +} + +// WriteToStream the default implementation for TypeEncoder method EncodeInterface +func WriteToStream(val interface{}, stream *Stream, encoder ValEncoder) { + e := (*emptyInterface)(unsafe.Pointer(&val)) + if e.word == nil { + stream.WriteNil() + return + } + if reflect.TypeOf(val).Kind() == reflect.Ptr { + encoder.Encode(unsafe.Pointer(&e.word), stream) + } else { + encoder.Encode(e.word, stream) + } +} + +var jsonNumberType reflect.Type +var jsoniterNumberType reflect.Type +var jsonRawMessageType reflect.Type +var jsoniterRawMessageType reflect.Type +var anyType reflect.Type +var marshalerType reflect.Type +var unmarshalerType reflect.Type +var textMarshalerType reflect.Type +var textUnmarshalerType reflect.Type + +func init() { + jsonNumberType = reflect.TypeOf((*json.Number)(nil)).Elem() + jsoniterNumberType = reflect.TypeOf((*Number)(nil)).Elem() + jsonRawMessageType = reflect.TypeOf((*json.RawMessage)(nil)).Elem() + jsoniterRawMessageType = reflect.TypeOf((*RawMessage)(nil)).Elem() + anyType = reflect.TypeOf((*Any)(nil)).Elem() + marshalerType = reflect.TypeOf((*json.Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +} + +// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal +func (iter *Iterator) ReadVal(obj interface{}) { + typ := reflect.TypeOf(obj) + cacheKey := typ.Elem() + decoder := decoderOfType(iter.cfg, "", cacheKey) + e := (*emptyInterface)(unsafe.Pointer(&obj)) + if e.word == nil { + iter.ReportError("ReadVal", "can not read into nil pointer") + return + } + decoder.Decode(e.word, iter) +} + +// WriteVal copy the go interface into underlying JSON, same as json.Marshal +func (stream *Stream) WriteVal(val interface{}) { + if nil == val { + stream.WriteNil() + return + } + typ := reflect.TypeOf(val) + cacheKey := typ + encoder := encoderOfType(stream.cfg, "", cacheKey) + encoder.EncodeInterface(val, stream) +} + +func decoderOfType(cfg *frozenConfig, prefix string, typ reflect.Type) ValDecoder { + cacheKey := typ + decoder := cfg.getDecoderFromCache(cacheKey) + if decoder != nil { + return decoder + } + decoder = getTypeDecoderFromExtension(cfg, typ) + if decoder != nil { + cfg.addDecoderToCache(cacheKey, decoder) + return decoder + } + decoder = &placeholderDecoder{cfg: cfg, cacheKey: cacheKey} + cfg.addDecoderToCache(cacheKey, decoder) + decoder = createDecoderOfType(cfg, prefix, typ) + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + for _, extension := range cfg.extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + cfg.addDecoderToCache(cacheKey, decoder) + return decoder +} + +func createDecoderOfType(cfg *frozenConfig, prefix string, typ reflect.Type) ValDecoder { + typeName := typ.String() + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + if typ.Implements(unmarshalerType) { + templateInterface := reflect.New(typ).Elem().Interface() + var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)} + if typ.Kind() == reflect.Ptr { + decoder = &OptionalDecoder{typ.Elem(), decoder} + } + return decoder + } + if reflect.PtrTo(typ).Implements(unmarshalerType) { + templateInterface := reflect.New(typ).Interface() + var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)} + return decoder + } + if typ.Implements(textUnmarshalerType) { + templateInterface := reflect.New(typ).Elem().Interface() + var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)} + if typ.Kind() == reflect.Ptr { + decoder = &OptionalDecoder{typ.Elem(), decoder} + } + return decoder + } + if reflect.PtrTo(typ).Implements(textUnmarshalerType) { + templateInterface := reflect.New(typ).Interface() + var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)} + return decoder + } + if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(cfg, prefix, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + if typ.Implements(anyType) { + return &anyCodec{} + } + switch typ.Kind() { + case reflect.String: + if typeName != "string" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*int)(nil)).Elem()) + } + return &intCodec{} + case reflect.Int8: + if typeName != "int8" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*uint)(nil)).Elem()) + } + return &uintCodec{} + case reflect.Uint8: + if typeName != "uint8" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*uintptr)(nil)).Elem()) + } + return &uintptrCodec{} + case reflect.Uint64: + if typeName != "uint64" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return decoderOfType(cfg, prefix, reflect.TypeOf((*bool)(nil)).Elem()) + } + return &boolCodec{} + case reflect.Interface: + if typ.NumMethod() == 0 { + return &emptyInterfaceCodec{} + } + return &nonEmptyInterfaceCodec{} + case reflect.Struct: + return decoderOfStruct(cfg, prefix, typ) + case reflect.Array: + return decoderOfArray(cfg, prefix, typ) + case reflect.Slice: + return decoderOfSlice(cfg, prefix, typ) + case reflect.Map: + return decoderOfMap(cfg, prefix, typ) + case reflect.Ptr: + return decoderOfOptional(cfg, prefix, typ) + default: + return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", prefix, typ.String())} + } +} + +func encoderOfType(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { + cacheKey := typ + encoder := cfg.getEncoderFromCache(cacheKey) + if encoder != nil { + return encoder + } + encoder = getTypeEncoderFromExtension(cfg, typ) + if encoder != nil { + cfg.addEncoderToCache(cacheKey, encoder) + return encoder + } + encoder = &placeholderEncoder{cfg: cfg, cacheKey: cacheKey} + cfg.addEncoderToCache(cacheKey, encoder) + encoder = createEncoderOfType(cfg, prefix, typ) + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + for _, extension := range cfg.extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + cfg.addEncoderToCache(cacheKey, encoder) + return encoder +} + +func createEncoderOfType(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + if typ.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(cfg, typ) + templateInterface := reflect.New(typ).Elem().Interface() + var encoder ValEncoder = &marshalerEncoder{ + templateInterface: extractInterface(templateInterface), + checkIsEmpty: checkIsEmpty, + } + if typ.Kind() == reflect.Ptr { + encoder = &OptionalEncoder{encoder} + } + return encoder + } + if reflect.PtrTo(typ).Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(cfg, reflect.PtrTo(typ)) + templateInterface := reflect.New(typ).Interface() + var encoder ValEncoder = &marshalerEncoder{ + templateInterface: extractInterface(templateInterface), + checkIsEmpty: checkIsEmpty, + } + return encoder + } + if typ.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(cfg, typ) + templateInterface := reflect.New(typ).Elem().Interface() + var encoder ValEncoder = &textMarshalerEncoder{ + templateInterface: extractInterface(templateInterface), + checkIsEmpty: checkIsEmpty, + } + if typ.Kind() == reflect.Ptr { + encoder = &OptionalEncoder{encoder} + } + return encoder + } + if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 { + return &base64Codec{} + } + if typ.Implements(anyType) { + return &anyCodec{} + } + return createEncoderOfSimpleType(cfg, prefix, typ) +} + +func createCheckIsEmpty(cfg *frozenConfig, typ reflect.Type) checkIsEmpty { + kind := typ.Kind() + switch kind { + case reflect.String: + return &stringCodec{} + case reflect.Int: + return &intCodec{} + case reflect.Int8: + return &int8Codec{} + case reflect.Int16: + return &int16Codec{} + case reflect.Int32: + return &int32Codec{} + case reflect.Int64: + return &int64Codec{} + case reflect.Uint: + return &uintCodec{} + case reflect.Uint8: + return &uint8Codec{} + case reflect.Uint16: + return &uint16Codec{} + case reflect.Uint32: + return &uint32Codec{} + case reflect.Uintptr: + return &uintptrCodec{} + case reflect.Uint64: + return &uint64Codec{} + case reflect.Float32: + return &float32Codec{} + case reflect.Float64: + return &float64Codec{} + case reflect.Bool: + return &boolCodec{} + case reflect.Interface: + if typ.NumMethod() == 0 { + return &emptyInterfaceCodec{} + } + return &nonEmptyInterfaceCodec{} + case reflect.Struct: + return &structEncoder{typ: typ} + case reflect.Array: + return &arrayEncoder{} + case reflect.Slice: + return &sliceEncoder{} + case reflect.Map: + return encoderOfMap(cfg, "", typ) + case reflect.Ptr: + return &OptionalEncoder{} + default: + return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)} + } +} + +func createEncoderOfSimpleType(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { + typeName := typ.String() + kind := typ.Kind() + switch kind { + case reflect.String: + if typeName != "string" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*int)(nil)).Elem()) + } + return &intCodec{} + case reflect.Int8: + if typeName != "int8" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*uint)(nil)).Elem()) + } + return &uintCodec{} + case reflect.Uint8: + if typeName != "uint8" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*uintptr)(nil)).Elem()) + } + return &uintptrCodec{} + case reflect.Uint64: + if typeName != "uint64" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return encoderOfType(cfg, prefix, reflect.TypeOf((*bool)(nil)).Elem()) + } + return &boolCodec{} + case reflect.Interface: + if typ.NumMethod() == 0 { + return &emptyInterfaceCodec{} + } + return &nonEmptyInterfaceCodec{} + case reflect.Struct: + return encoderOfStruct(cfg, prefix, typ) + case reflect.Array: + return encoderOfArray(cfg, prefix, typ) + case reflect.Slice: + return encoderOfSlice(cfg, prefix, typ) + case reflect.Map: + return encoderOfMap(cfg, prefix, typ) + case reflect.Ptr: + return encoderOfOptional(cfg, prefix, typ) + default: + return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", prefix, typ.String())} + } +} + +type placeholderEncoder struct { + cfg *frozenConfig + cacheKey reflect.Type +} + +func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.getRealEncoder().Encode(ptr, stream) +} + +func (encoder *placeholderEncoder) EncodeInterface(val interface{}, stream *Stream) { + encoder.getRealEncoder().EncodeInterface(val, stream) +} + +func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.getRealEncoder().IsEmpty(ptr) +} + +func (encoder *placeholderEncoder) getRealEncoder() ValEncoder { + for i := 0; i < 500; i++ { + realDecoder := encoder.cfg.getEncoderFromCache(encoder.cacheKey) + _, isPlaceholder := realDecoder.(*placeholderEncoder) + if isPlaceholder { + time.Sleep(10 * time.Millisecond) + } else { + return realDecoder + } + } + panic(fmt.Sprintf("real encoder not found for cache key: %v", encoder.cacheKey)) +} + +type placeholderDecoder struct { + cfg *frozenConfig + cacheKey reflect.Type +} + +func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + for i := 0; i < 500; i++ { + realDecoder := decoder.cfg.getDecoderFromCache(decoder.cacheKey) + _, isPlaceholder := realDecoder.(*placeholderDecoder) + if isPlaceholder { + time.Sleep(10 * time.Millisecond) + } else { + realDecoder.Decode(ptr, iter) + return + } + } + panic(fmt.Sprintf("real decoder not found for cache key: %v", decoder.cacheKey)) +} + +type lazyErrorDecoder struct { + err error +} + +func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() != NilValue { + if iter.Error == nil { + iter.Error = decoder.err + } + } else { + iter.Skip() + } +} + +type lazyErrorEncoder struct { + err error +} + +func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if ptr == nil { + stream.WriteNil() + } else if stream.Error == nil { + stream.Error = encoder.err + } +} + +func (encoder *lazyErrorEncoder) EncodeInterface(val interface{}, stream *Stream) { + if val == nil { + stream.WriteNil() + } else if stream.Error == nil { + stream.Error = encoder.err + } +} + +func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +func extractInterface(val interface{}) emptyInterface { + return *((*emptyInterface)(unsafe.Pointer(&val))) +} + +// emptyInterface is the header for an interface{} value. +type emptyInterface struct { + typ unsafe.Pointer + word unsafe.Pointer +} + +// emptyInterface is the header for an interface with method (not interface{}) +type nonEmptyInterface struct { + // see ../runtime/iface.go:/Itab + itab *struct { + ityp unsafe.Pointer // static interface type + typ unsafe.Pointer // dynamic concrete type + link unsafe.Pointer + bad int32 + unused int32 + fun [100000]unsafe.Pointer // method table + } + word unsafe.Pointer +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_array.go b/vendor/github.com/json-iterator/go/feature_reflect_array.go new file mode 100644 index 00000000..f4e211dc --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_array.go @@ -0,0 +1,93 @@ +package jsoniter + +import ( + "fmt" + "io" + "reflect" + "unsafe" +) + +func decoderOfArray(cfg *frozenConfig, prefix string, typ reflect.Type) ValDecoder { + decoder := decoderOfType(cfg, prefix+"[array]->", typ.Elem()) + return &arrayDecoder{typ, typ.Elem(), decoder} +} + +func encoderOfArray(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { + encoder := encoderOfType(cfg, prefix+"[array]->", typ.Elem()) + if typ.Elem().Kind() == reflect.Map { + encoder = &OptionalEncoder{encoder} + } + return &arrayEncoder{typ, typ.Elem(), encoder} +} + +type arrayEncoder struct { + arrayType reflect.Type + elemType reflect.Type + elemEncoder ValEncoder +} + +func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteArrayStart() + elemPtr := unsafe.Pointer(ptr) + encoder.elemEncoder.Encode(elemPtr, stream) + for i := 1; i < encoder.arrayType.Len(); i++ { + stream.WriteMore() + elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size()) + encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) + } +} + +func (encoder *arrayEncoder) EncodeInterface(val interface{}, stream *Stream) { + // special optimization for interface{} + e := (*emptyInterface)(unsafe.Pointer(&val)) + if e.word == nil { + stream.WriteArrayStart() + stream.WriteNil() + stream.WriteArrayEnd() + return + } + elemType := encoder.arrayType.Elem() + if encoder.arrayType.Len() == 1 && (elemType.Kind() == reflect.Ptr || elemType.Kind() == reflect.Map) { + ptr := uintptr(e.word) + e.word = unsafe.Pointer(&ptr) + } + if reflect.TypeOf(val).Kind() == reflect.Ptr { + encoder.Encode(unsafe.Pointer(&e.word), stream) + } else { + encoder.Encode(e.word, stream) + } +} + +func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type arrayDecoder struct { + arrayType reflect.Type + elemType reflect.Type + elemDecoder ValDecoder +} + +func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) + } +} + +func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + offset := uintptr(0) + iter.ReadArrayCB(func(iter *Iterator) bool { + if offset < decoder.arrayType.Size() { + decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(ptr)+offset), iter) + offset += decoder.elemType.Size() + } else { + iter.Skip() + } + return true + }) +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_extension.go b/vendor/github.com/json-iterator/go/feature_reflect_extension.go new file mode 100644 index 00000000..96d4cda1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_extension.go @@ -0,0 +1,421 @@ +package jsoniter + +import ( + "fmt" + "reflect" + "sort" + "strings" + "unicode" + "unsafe" +) + +var typeDecoders = map[string]ValDecoder{} +var fieldDecoders = map[string]ValDecoder{} +var typeEncoders = map[string]ValEncoder{} +var fieldEncoders = map[string]ValEncoder{} +var extensions = []Extension{} + +// StructDescriptor describe how should we encode/decode the struct +type StructDescriptor struct { + onePtrEmbedded bool + onePtrOptimization bool + Type reflect.Type + Fields []*Binding +} + +// GetField get one field from the descriptor by its name. +// Can not use map here to keep field orders. +func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { + for _, binding := range structDescriptor.Fields { + if binding.Field.Name == fieldName { + return binding + } + } + return nil +} + +// Binding describe how should we encode/decode the struct field +type Binding struct { + levels []int + Field *reflect.StructField + FromNames []string + ToNames []string + Encoder ValEncoder + Decoder ValDecoder +} + +// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder. +// Can also rename fields by UpdateStructDescriptor. +type Extension interface { + UpdateStructDescriptor(structDescriptor *StructDescriptor) + CreateDecoder(typ reflect.Type) ValDecoder + CreateEncoder(typ reflect.Type) ValEncoder + DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder + DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder +} + +// DummyExtension embed this type get dummy implementation for all methods of Extension +type DummyExtension struct { +} + +// UpdateStructDescriptor No-op +func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateDecoder No-op +func (extension *DummyExtension) CreateDecoder(typ reflect.Type) ValDecoder { + return nil +} + +// CreateEncoder No-op +func (extension *DummyExtension) CreateEncoder(typ reflect.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension *DummyExtension) DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension *DummyExtension) DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type funcDecoder struct { + fun DecoderFunc +} + +func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.fun(ptr, iter) +} + +type funcEncoder struct { + fun EncoderFunc + isEmptyFunc func(ptr unsafe.Pointer) bool +} + +func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.fun(ptr, stream) +} + +func (encoder *funcEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { + if encoder.isEmptyFunc == nil { + return false + } + return encoder.isEmptyFunc(ptr) +} + +// DecoderFunc the function form of TypeDecoder +type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator) + +// EncoderFunc the function form of TypeEncoder +type EncoderFunc func(ptr unsafe.Pointer, stream *Stream) + +// RegisterTypeDecoderFunc register TypeDecoder for a type with function +func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) { + typeDecoders[typ] = &funcDecoder{fun} +} + +// RegisterTypeDecoder register TypeDecoder for a typ +func RegisterTypeDecoder(typ string, decoder ValDecoder) { + typeDecoders[typ] = decoder +} + +// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function +func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) { + RegisterFieldDecoder(typ, field, &funcDecoder{fun}) +} + +// RegisterFieldDecoder register TypeDecoder for a struct field +func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) { + fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder +} + +// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function +func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc} +} + +// RegisterTypeEncoder register TypeEncoder for a type +func RegisterTypeEncoder(typ string, encoder ValEncoder) { + typeEncoders[typ] = encoder +} + +// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function +func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc}) +} + +// RegisterFieldEncoder register TypeEncoder for a struct field +func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) { + fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder +} + +// RegisterExtension register extension +func RegisterExtension(extension Extension) { + extensions = append(extensions, extension) +} + +func getTypeDecoderFromExtension(cfg *frozenConfig, typ reflect.Type) ValDecoder { + decoder := _getTypeDecoderFromExtension(cfg, typ) + if decoder != nil { + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + for _, extension := range cfg.extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + } + return decoder +} +func _getTypeDecoderFromExtension(cfg *frozenConfig, typ reflect.Type) ValDecoder { + for _, extension := range extensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + for _, extension := range cfg.extensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + typeName := typ.String() + decoder := typeDecoders[typeName] + if decoder != nil { + return decoder + } + if typ.Kind() == reflect.Ptr { + decoder := typeDecoders[typ.Elem().String()] + if decoder != nil { + return &OptionalDecoder{typ.Elem(), decoder} + } + } + return nil +} + +func getTypeEncoderFromExtension(cfg *frozenConfig, typ reflect.Type) ValEncoder { + encoder := _getTypeEncoderFromExtension(cfg, typ) + if encoder != nil { + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + for _, extension := range cfg.extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + } + return encoder +} + +func _getTypeEncoderFromExtension(cfg *frozenConfig, typ reflect.Type) ValEncoder { + for _, extension := range extensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + for _, extension := range cfg.extensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + typeName := typ.String() + encoder := typeEncoders[typeName] + if encoder != nil { + return encoder + } + if typ.Kind() == reflect.Ptr { + encoder := typeEncoders[typ.Elem().String()] + if encoder != nil { + return &OptionalEncoder{encoder} + } + } + return nil +} + +func describeStruct(cfg *frozenConfig, prefix string, typ reflect.Type) *StructDescriptor { + embeddedBindings := []*Binding{} + bindings := []*Binding{} + for i := 0; i < typ.NumField(); i++ { + field := typ.Field(i) + tag := field.Tag.Get(cfg.getTagKey()) + tagParts := strings.Split(tag, ",") + if tag == "-" { + continue + } + if field.Anonymous && (tag == "" || tagParts[0] == "") { + if field.Type.Kind() == reflect.Struct { + structDescriptor := describeStruct(cfg, prefix, field.Type) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty} + binding.Decoder = &structFieldDecoder{&field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } else if field.Type.Kind() == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct { + structDescriptor := describeStruct(cfg, prefix, field.Type.Elem()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &OptionalEncoder{binding.Encoder} + binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty} + binding.Decoder = &deferenceDecoder{field.Type.Elem(), binding.Decoder} + binding.Decoder = &structFieldDecoder{&field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } + } + fieldNames := calcFieldNames(field.Name, tagParts[0], tag) + fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name) + decoder := fieldDecoders[fieldCacheKey] + if decoder == nil { + decoder = decoderOfType(cfg, prefix+typ.String()+"."+field.Name+"->", field.Type) + } + encoder := fieldEncoders[fieldCacheKey] + if encoder == nil { + encoder = encoderOfType(cfg, prefix+typ.String()+"."+field.Name+"->", field.Type) + // map is stored as pointer in the struct, + // and treat nil or empty map as empty field + if encoder != nil && field.Type.Kind() == reflect.Map { + encoder = &optionalMapEncoder{encoder} + } + } + binding := &Binding{ + Field: &field, + FromNames: fieldNames, + ToNames: fieldNames, + Decoder: decoder, + Encoder: encoder, + } + binding.levels = []int{i} + bindings = append(bindings, binding) + } + return createStructDescriptor(cfg, typ, bindings, embeddedBindings) +} +func createStructDescriptor(cfg *frozenConfig, typ reflect.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { + onePtrEmbedded := false + onePtrOptimization := false + if typ.NumField() == 1 { + firstField := typ.Field(0) + switch firstField.Type.Kind() { + case reflect.Ptr: + if firstField.Anonymous && firstField.Type.Elem().Kind() == reflect.Struct { + onePtrEmbedded = true + } + fallthrough + case reflect.Map: + onePtrOptimization = true + case reflect.Struct: + onePtrOptimization = isStructOnePtr(firstField.Type) + } + } + structDescriptor := &StructDescriptor{ + onePtrEmbedded: onePtrEmbedded, + onePtrOptimization: onePtrOptimization, + Type: typ, + Fields: bindings, + } + for _, extension := range extensions { + extension.UpdateStructDescriptor(structDescriptor) + } + for _, extension := range cfg.extensions { + extension.UpdateStructDescriptor(structDescriptor) + } + processTags(structDescriptor, cfg) + // merge normal & embedded bindings & sort with original order + allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) + sort.Sort(allBindings) + structDescriptor.Fields = allBindings + return structDescriptor +} + +func isStructOnePtr(typ reflect.Type) bool { + if typ.NumField() == 1 { + firstField := typ.Field(0) + switch firstField.Type.Kind() { + case reflect.Ptr: + return true + case reflect.Map: + return true + case reflect.Struct: + return isStructOnePtr(firstField.Type) + } + } + return false +} + +type sortableBindings []*Binding + +func (bindings sortableBindings) Len() int { + return len(bindings) +} + +func (bindings sortableBindings) Less(i, j int) bool { + left := bindings[i].levels + right := bindings[j].levels + k := 0 + for { + if left[k] < right[k] { + return true + } else if left[k] > right[k] { + return false + } + k++ + } +} + +func (bindings sortableBindings) Swap(i, j int) { + bindings[i], bindings[j] = bindings[j], bindings[i] +} + +func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { + for _, binding := range structDescriptor.Fields { + shouldOmitEmpty := false + tagParts := strings.Split(binding.Field.Tag.Get(cfg.getTagKey()), ",") + for _, tagPart := range tagParts[1:] { + if tagPart == "omitempty" { + shouldOmitEmpty = true + } else if tagPart == "string" { + if binding.Field.Type.Kind() == reflect.String { + binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} + binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} + } else { + binding.Decoder = &stringModeNumberDecoder{binding.Decoder} + binding.Encoder = &stringModeNumberEncoder{binding.Encoder} + } + } + } + binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder} + binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty} + } +} + +func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string { + // ignore? + if wholeTag == "-" { + return []string{} + } + // rename? + var fieldNames []string + if tagProvidedFieldName == "" { + fieldNames = []string{originalFieldName} + } else { + fieldNames = []string{tagProvidedFieldName} + } + // private? + isNotExported := unicode.IsLower(rune(originalFieldName[0])) + if isNotExported { + fieldNames = []string{} + } + return fieldNames +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_map.go b/vendor/github.com/json-iterator/go/feature_reflect_map.go new file mode 100644 index 00000000..cb40538f --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_map.go @@ -0,0 +1,260 @@ +package jsoniter + +import ( + "encoding" + "encoding/json" + "reflect" + "sort" + "strconv" + "unsafe" +) + +func decoderOfMap(cfg *frozenConfig, prefix string, typ reflect.Type) ValDecoder { + decoder := decoderOfType(cfg, prefix+"[map]->", typ.Elem()) + mapInterface := reflect.New(typ).Interface() + return &mapDecoder{typ, typ.Key(), typ.Elem(), decoder, extractInterface(mapInterface)} +} + +func encoderOfMap(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { + elemType := typ.Elem() + encoder := encoderOfType(cfg, prefix+"[map]->", elemType) + mapInterface := reflect.New(typ).Elem().Interface() + if cfg.sortMapKeys { + return &sortKeysMapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))} + } + return &mapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))} +} + +type mapDecoder struct { + mapType reflect.Type + keyType reflect.Type + elemType reflect.Type + elemDecoder ValDecoder + mapInterface emptyInterface +} + +func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + // dark magic to cast unsafe.Pointer back to interface{} using reflect.Type + mapInterface := decoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface).Elem() + if iter.ReadNil() { + realVal.Set(reflect.Zero(decoder.mapType)) + return + } + if realVal.IsNil() { + realVal.Set(reflect.MakeMap(realVal.Type())) + } + iter.ReadMapCB(func(iter *Iterator, keyStr string) bool { + elem := reflect.New(decoder.elemType) + decoder.elemDecoder.Decode(unsafe.Pointer(elem.Pointer()), iter) + // to put into map, we have to use reflection + keyType := decoder.keyType + // TODO: remove this from loop + switch { + case keyType.Kind() == reflect.String: + realVal.SetMapIndex(reflect.ValueOf(keyStr).Convert(keyType), elem.Elem()) + return true + case keyType.Implements(textUnmarshalerType): + textUnmarshaler := reflect.New(keyType.Elem()).Interface().(encoding.TextUnmarshaler) + err := textUnmarshaler.UnmarshalText([]byte(keyStr)) + if err != nil { + iter.ReportError("read map key as TextUnmarshaler", err.Error()) + return false + } + realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler), elem.Elem()) + return true + case reflect.PtrTo(keyType).Implements(textUnmarshalerType): + textUnmarshaler := reflect.New(keyType).Interface().(encoding.TextUnmarshaler) + err := textUnmarshaler.UnmarshalText([]byte(keyStr)) + if err != nil { + iter.ReportError("read map key as TextUnmarshaler", err.Error()) + return false + } + realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler).Elem(), elem.Elem()) + return true + default: + switch keyType.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(keyStr, 10, 64) + if err != nil || reflect.Zero(keyType).OverflowInt(n) { + iter.ReportError("read map key as int64", "read int64 failed") + return false + } + realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem()) + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(keyStr, 10, 64) + if err != nil || reflect.Zero(keyType).OverflowUint(n) { + iter.ReportError("read map key as uint64", "read uint64 failed") + return false + } + realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem()) + return true + } + } + iter.ReportError("read map key", "unexpected map key type "+keyType.String()) + return true + }) +} + +type mapEncoder struct { + mapType reflect.Type + elemType reflect.Type + elemEncoder ValEncoder + mapInterface emptyInterface +} + +func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + mapInterface := encoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface) + stream.WriteObjectStart() + for i, key := range realVal.MapKeys() { + if i != 0 { + stream.WriteMore() + } + encodeMapKey(key, stream) + if stream.indention > 0 { + stream.writeTwoBytes(byte(':'), byte(' ')) + } else { + stream.writeByte(':') + } + val := realVal.MapIndex(key).Interface() + encoder.elemEncoder.EncodeInterface(val, stream) + } + stream.WriteObjectEnd() +} + +func encodeMapKey(key reflect.Value, stream *Stream) { + if key.Kind() == reflect.String { + stream.WriteString(key.String()) + return + } + if tm, ok := key.Interface().(encoding.TextMarshaler); ok { + buf, err := tm.MarshalText() + if err != nil { + stream.Error = err + return + } + stream.writeByte('"') + stream.Write(buf) + stream.writeByte('"') + return + } + switch key.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + stream.writeByte('"') + stream.WriteInt64(key.Int()) + stream.writeByte('"') + return + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + stream.writeByte('"') + stream.WriteUint64(key.Uint()) + stream.writeByte('"') + return + } + stream.Error = &json.UnsupportedTypeError{Type: key.Type()} +} + +func (encoder *mapEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + mapInterface := encoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface) + return realVal.Len() == 0 +} + +type sortKeysMapEncoder struct { + mapType reflect.Type + elemType reflect.Type + elemEncoder ValEncoder + mapInterface emptyInterface +} + +func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + mapInterface := encoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface) + + // Extract and sort the keys. + keys := realVal.MapKeys() + sv := stringValues(make([]reflectWithString, len(keys))) + for i, v := range keys { + sv[i].v = v + if err := sv[i].resolve(); err != nil { + stream.Error = err + return + } + } + sort.Sort(sv) + + stream.WriteObjectStart() + for i, key := range sv { + if i != 0 { + stream.WriteMore() + } + stream.WriteVal(key.s) // might need html escape, so can not WriteString directly + if stream.indention > 0 { + stream.writeTwoBytes(byte(':'), byte(' ')) + } else { + stream.writeByte(':') + } + val := realVal.MapIndex(key.v).Interface() + encoder.elemEncoder.EncodeInterface(val, stream) + } + stream.WriteObjectEnd() +} + +// stringValues is a slice of reflect.Value holding *reflect.StringValue. +// It implements the methods to sort by string. +type stringValues []reflectWithString + +type reflectWithString struct { + v reflect.Value + s string +} + +func (w *reflectWithString) resolve() error { + if w.v.Kind() == reflect.String { + w.s = w.v.String() + return nil + } + if tm, ok := w.v.Interface().(encoding.TextMarshaler); ok { + buf, err := tm.MarshalText() + w.s = string(buf) + return err + } + switch w.v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + w.s = strconv.FormatInt(w.v.Int(), 10) + return nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + w.s = strconv.FormatUint(w.v.Uint(), 10) + return nil + } + return &json.UnsupportedTypeError{Type: w.v.Type()} +} + +func (sv stringValues) Len() int { return len(sv) } +func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv stringValues) Less(i, j int) bool { return sv[i].s < sv[j].s } + +func (encoder *sortKeysMapEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + mapInterface := encoder.mapInterface + mapInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&mapInterface)) + realVal := reflect.ValueOf(*realInterface) + return realVal.Len() == 0 +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_native.go b/vendor/github.com/json-iterator/go/feature_reflect_native.go new file mode 100644 index 00000000..95bd1e87 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_native.go @@ -0,0 +1,764 @@ +package jsoniter + +import ( + "encoding" + "encoding/base64" + "encoding/json" + "reflect" + "unsafe" +) + +type stringCodec struct { +} + +func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*string)(ptr)) = iter.ReadString() +} + +func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteString(str) +} + +func (codec *stringCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +type intCodec struct { +} + +func (codec *intCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int)(ptr)) = iter.ReadInt() + } +} + +func (codec *intCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt(*((*int)(ptr))) +} + +func (codec *intCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *intCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int)(ptr)) == 0 +} + +type uintptrCodec struct { +} + +func (codec *uintptrCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uintptr)(ptr)) = uintptr(iter.ReadUint64()) + } +} + +func (codec *uintptrCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint64(uint64(*((*uintptr)(ptr)))) +} + +func (codec *uintptrCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uintptrCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uintptr)(ptr)) == 0 +} + +type int8Codec struct { +} + +func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int8)(ptr)) = iter.ReadInt8() + } +} + +func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt8(*((*int8)(ptr))) +} + +func (codec *int8Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int8)(ptr)) == 0 +} + +type int16Codec struct { +} + +func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int16)(ptr)) = iter.ReadInt16() + } +} + +func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt16(*((*int16)(ptr))) +} + +func (codec *int16Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int16)(ptr)) == 0 +} + +type int32Codec struct { +} + +func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int32)(ptr)) = iter.ReadInt32() + } +} + +func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt32(*((*int32)(ptr))) +} + +func (codec *int32Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int32)(ptr)) == 0 +} + +type int64Codec struct { +} + +func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int64)(ptr)) = iter.ReadInt64() + } +} + +func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt64(*((*int64)(ptr))) +} + +func (codec *int64Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int64)(ptr)) == 0 +} + +type uintCodec struct { +} + +func (codec *uintCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint)(ptr)) = iter.ReadUint() + return + } +} + +func (codec *uintCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint(*((*uint)(ptr))) +} + +func (codec *uintCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uintCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint)(ptr)) == 0 +} + +type uint8Codec struct { +} + +func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint8)(ptr)) = iter.ReadUint8() + } +} + +func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint8(*((*uint8)(ptr))) +} + +func (codec *uint8Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint8)(ptr)) == 0 +} + +type uint16Codec struct { +} + +func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint16)(ptr)) = iter.ReadUint16() + } +} + +func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint16(*((*uint16)(ptr))) +} + +func (codec *uint16Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint16)(ptr)) == 0 +} + +type uint32Codec struct { +} + +func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint32)(ptr)) = iter.ReadUint32() + } +} + +func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint32(*((*uint32)(ptr))) +} + +func (codec *uint32Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint32)(ptr)) == 0 +} + +type uint64Codec struct { +} + +func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint64)(ptr)) = iter.ReadUint64() + } +} + +func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint64(*((*uint64)(ptr))) +} + +func (codec *uint64Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint64)(ptr)) == 0 +} + +type float32Codec struct { +} + +func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float32)(ptr)) = iter.ReadFloat32() + } +} + +func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32(*((*float32)(ptr))) +} + +func (codec *float32Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type float64Codec struct { +} + +func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float64)(ptr)) = iter.ReadFloat64() + } +} + +func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64(*((*float64)(ptr))) +} + +func (codec *float64Codec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +type boolCodec struct { +} + +func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*bool)(ptr)) = iter.ReadBool() + } +} + +func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteBool(*((*bool)(ptr))) +} + +func (codec *boolCodec) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, codec) +} + +func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { + return !(*((*bool)(ptr))) +} + +type emptyInterfaceCodec struct { +} + +func (codec *emptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + existing := *((*interface{})(ptr)) + + // Checking for both typed and untyped nil pointers. + if existing != nil && + reflect.TypeOf(existing).Kind() == reflect.Ptr && + !reflect.ValueOf(existing).IsNil() { + + var ptrToExisting interface{} + for { + elem := reflect.ValueOf(existing).Elem() + if elem.Kind() != reflect.Ptr || elem.IsNil() { + break + } + ptrToExisting = existing + existing = elem.Interface() + } + + if iter.ReadNil() { + if ptrToExisting != nil { + nilPtr := reflect.Zero(reflect.TypeOf(ptrToExisting).Elem()) + reflect.ValueOf(ptrToExisting).Elem().Set(nilPtr) + } else { + *((*interface{})(ptr)) = nil + } + } else { + iter.ReadVal(existing) + } + + return + } + + if iter.ReadNil() { + *((*interface{})(ptr)) = nil + } else { + *((*interface{})(ptr)) = iter.Read() + } +} + +func (codec *emptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteVal(*((*interface{})(ptr))) +} + +func (codec *emptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteVal(val) +} + +func (codec *emptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool { + emptyInterface := (*emptyInterface)(ptr) + return emptyInterface.typ == nil +} + +type nonEmptyInterfaceCodec struct { +} + +func (codec *nonEmptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + nonEmptyInterface := (*nonEmptyInterface)(ptr) + if nonEmptyInterface.itab == nil { + iter.ReportError("read non-empty interface", "do not know which concrete type to decode to") + return + } + var i interface{} + e := (*emptyInterface)(unsafe.Pointer(&i)) + e.typ = nonEmptyInterface.itab.typ + e.word = nonEmptyInterface.word + iter.ReadVal(&i) + if e.word == nil { + nonEmptyInterface.itab = nil + } + nonEmptyInterface.word = e.word +} + +func (codec *nonEmptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + nonEmptyInterface := (*nonEmptyInterface)(ptr) + var i interface{} + if nonEmptyInterface.itab != nil { + e := (*emptyInterface)(unsafe.Pointer(&i)) + e.typ = nonEmptyInterface.itab.typ + e.word = nonEmptyInterface.word + } + stream.WriteVal(i) +} + +func (codec *nonEmptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteVal(val) +} + +func (codec *nonEmptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool { + nonEmptyInterface := (*nonEmptyInterface)(ptr) + return nonEmptyInterface.word == nil +} + +type anyCodec struct { +} + +func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*Any)(ptr)) = iter.ReadAny() +} + +func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + (*((*Any)(ptr))).WriteTo(stream) +} + +func (codec *anyCodec) EncodeInterface(val interface{}, stream *Stream) { + (val.(Any)).WriteTo(stream) +} + +func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { + return (*((*Any)(ptr))).Size() == 0 +} + +type jsonNumberCodec struct { +} + +func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*json.Number)(ptr)) = json.Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*json.Number)(ptr)) = "" + default: + *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*json.Number)(ptr)))) +} + +func (codec *jsonNumberCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteRaw(string(val.(json.Number))) +} + +func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.Number)(ptr))) == 0 +} + +type jsoniterNumberCodec struct { +} + +func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*Number)(ptr)) = Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*Number)(ptr)) = "" + default: + *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*Number)(ptr)))) +} + +func (codec *jsoniterNumberCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteRaw(string(val.(Number))) +} + +func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*Number)(ptr))) == 0 +} + +type jsonRawMessageCodec struct { +} + +func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) +} + +func (codec *jsonRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteRaw(string(val.(json.RawMessage))) +} + +func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 +} + +type jsoniterRawMessageCodec struct { +} + +func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*RawMessage)(ptr)))) +} + +func (codec *jsoniterRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) { + stream.WriteRaw(string(val.(RawMessage))) +} + +func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*RawMessage)(ptr))) == 0 +} + +type base64Codec struct { + sliceDecoder ValDecoder +} + +func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + ptrSlice := (*sliceHeader)(ptr) + ptrSlice.Len = 0 + ptrSlice.Cap = 0 + ptrSlice.Data = nil + return + } + switch iter.WhatIsNext() { + case StringValue: + encoding := base64.StdEncoding + src := iter.SkipAndReturnBytes() + src = src[1 : len(src)-1] + decodedLen := encoding.DecodedLen(len(src)) + dst := make([]byte, decodedLen) + len, err := encoding.Decode(dst, src) + if err != nil { + iter.ReportError("decode base64", err.Error()) + } else { + dst = dst[:len] + dstSlice := (*sliceHeader)(unsafe.Pointer(&dst)) + ptrSlice := (*sliceHeader)(ptr) + ptrSlice.Data = dstSlice.Data + ptrSlice.Cap = dstSlice.Cap + ptrSlice.Len = dstSlice.Len + } + case ArrayValue: + codec.sliceDecoder.Decode(ptr, iter) + default: + iter.ReportError("base64Codec", "invalid input") + } +} + +func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + src := *((*[]byte)(ptr)) + if len(src) == 0 { + stream.WriteNil() + return + } + encoding := base64.StdEncoding + stream.writeByte('"') + toGrow := encoding.EncodedLen(len(src)) + stream.ensure(toGrow) + encoding.Encode(stream.buf[stream.n:], src) + stream.n += toGrow + stream.writeByte('"') +} + +func (codec *base64Codec) EncodeInterface(val interface{}, stream *Stream) { + ptr := extractInterface(val).word + src := *((*[]byte)(ptr)) + if len(src) == 0 { + stream.WriteNil() + return + } + encoding := base64.StdEncoding + stream.writeByte('"') + toGrow := encoding.EncodedLen(len(src)) + stream.ensure(toGrow) + encoding.Encode(stream.buf[stream.n:], src) + stream.n += toGrow + stream.writeByte('"') +} + +func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*[]byte)(ptr))) == 0 +} + +type stringModeNumberDecoder struct { + elemDecoder ValDecoder +} + +func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } + decoder.elemDecoder.Decode(ptr, iter) + if iter.Error != nil { + return + } + c = iter.readByte() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } +} + +type stringModeStringDecoder struct { + elemDecoder ValDecoder + cfg *frozenConfig +} + +func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.elemDecoder.Decode(ptr, iter) + str := *((*string)(ptr)) + tempIter := decoder.cfg.BorrowIterator([]byte(str)) + defer decoder.cfg.ReturnIterator(tempIter) + *((*string)(ptr)) = tempIter.ReadString() +} + +type stringModeNumberEncoder struct { + elemEncoder ValEncoder +} + +func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.elemEncoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *stringModeNumberEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} + +type stringModeStringEncoder struct { + elemEncoder ValEncoder + cfg *frozenConfig +} + +func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + tempStream := encoder.cfg.BorrowStream(nil) + defer encoder.cfg.ReturnStream(tempStream) + encoder.elemEncoder.Encode(ptr, tempStream) + stream.WriteString(string(tempStream.Buffer())) +} + +func (encoder *stringModeStringEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} + +type marshalerEncoder struct { + templateInterface emptyInterface + checkIsEmpty checkIsEmpty +} + +func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + templateInterface := encoder.templateInterface + templateInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) + marshaler, ok := (*realInterface).(json.Marshaler) + if !ok { + stream.WriteVal(nil) + return + } + + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + stream.Write(bytes) + } +} +func (encoder *marshalerEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type textMarshalerEncoder struct { + templateInterface emptyInterface + checkIsEmpty checkIsEmpty +} + +func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + templateInterface := encoder.templateInterface + templateInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) + marshaler := (*realInterface).(encoding.TextMarshaler) + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + stream.WriteString(string(bytes)) + } +} + +func (encoder *textMarshalerEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type unmarshalerDecoder struct { + templateInterface emptyInterface +} + +func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + templateInterface := decoder.templateInterface + templateInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) + unmarshaler := (*realInterface).(json.Unmarshaler) + iter.nextToken() + iter.unreadByte() // skip spaces + bytes := iter.SkipAndReturnBytes() + err := unmarshaler.UnmarshalJSON(bytes) + if err != nil { + iter.ReportError("unmarshalerDecoder", err.Error()) + } +} + +type textUnmarshalerDecoder struct { + templateInterface emptyInterface +} + +func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + templateInterface := decoder.templateInterface + templateInterface.word = ptr + realInterface := (*interface{})(unsafe.Pointer(&templateInterface)) + unmarshaler := (*realInterface).(encoding.TextUnmarshaler) + str := iter.ReadString() + err := unmarshaler.UnmarshalText([]byte(str)) + if err != nil { + iter.ReportError("textUnmarshalerDecoder", err.Error()) + } +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_object.go b/vendor/github.com/json-iterator/go/feature_reflect_object.go new file mode 100644 index 00000000..036545cb --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_object.go @@ -0,0 +1,195 @@ +package jsoniter + +import ( + "fmt" + "io" + "reflect" + "strings" + "unsafe" +) + +func encoderOfStruct(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { + type bindingTo struct { + binding *Binding + toName string + ignored bool + } + orderedBindings := []*bindingTo{} + structDescriptor := describeStruct(cfg, prefix, typ) + for _, binding := range structDescriptor.Fields { + for _, toName := range binding.ToNames { + new := &bindingTo{ + binding: binding, + toName: toName, + } + for _, old := range orderedBindings { + if old.toName != toName { + continue + } + old.ignored, new.ignored = resolveConflictBinding(cfg, old.binding, new.binding) + } + orderedBindings = append(orderedBindings, new) + } + } + if len(orderedBindings) == 0 { + return &emptyStructEncoder{} + } + finalOrderedFields := []structFieldTo{} + for _, bindingTo := range orderedBindings { + if !bindingTo.ignored { + finalOrderedFields = append(finalOrderedFields, structFieldTo{ + encoder: bindingTo.binding.Encoder.(*structFieldEncoder), + toName: bindingTo.toName, + }) + } + } + return &structEncoder{typ, structDescriptor.onePtrEmbedded, + structDescriptor.onePtrOptimization, finalOrderedFields} +} + +func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { + newTagged := new.Field.Tag.Get(cfg.getTagKey()) != "" + oldTagged := old.Field.Tag.Get(cfg.getTagKey()) != "" + if newTagged { + if oldTagged { + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } else { + return true, false + } + } else { + if oldTagged { + return true, false + } + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } +} + +func decoderOfStruct(cfg *frozenConfig, prefix string, typ reflect.Type) ValDecoder { + bindings := map[string]*Binding{} + structDescriptor := describeStruct(cfg, prefix, typ) + for _, binding := range structDescriptor.Fields { + for _, fromName := range binding.FromNames { + old := bindings[fromName] + if old == nil { + bindings[fromName] = binding + continue + } + ignoreOld, ignoreNew := resolveConflictBinding(cfg, old, binding) + if ignoreOld { + delete(bindings, fromName) + } + if !ignoreNew { + bindings[fromName] = binding + } + } + } + fields := map[string]*structFieldDecoder{} + for k, binding := range bindings { + fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) + } + return createStructDecoder(typ, fields) +} + +type structFieldEncoder struct { + field *reflect.StructField + fieldEncoder ValEncoder + omitempty bool +} + +func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset) + encoder.fieldEncoder.Encode(fieldPtr, stream) + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%s: %s", encoder.field.Name, stream.Error.Error()) + } +} + +func (encoder *structFieldEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { + fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset) + return encoder.fieldEncoder.IsEmpty(fieldPtr) +} + +type structEncoder struct { + typ reflect.Type + onePtrEmbedded bool + onePtrOptimization bool + fields []structFieldTo +} + +type structFieldTo struct { + encoder *structFieldEncoder + toName string +} + +func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteObjectStart() + isNotFirst := false + for _, field := range encoder.fields { + if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { + continue + } + if isNotFirst { + stream.WriteMore() + } + stream.WriteObjectField(field.toName) + field.encoder.Encode(ptr, stream) + isNotFirst = true + } + stream.WriteObjectEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error()) + } +} + +func (encoder *structEncoder) EncodeInterface(val interface{}, stream *Stream) { + e := (*emptyInterface)(unsafe.Pointer(&val)) + if encoder.onePtrOptimization { + if e.word == nil && encoder.onePtrEmbedded { + stream.WriteObjectStart() + stream.WriteObjectEnd() + return + } + ptr := uintptr(e.word) + e.word = unsafe.Pointer(&ptr) + } + if reflect.TypeOf(val).Kind() == reflect.Ptr { + encoder.Encode(unsafe.Pointer(&e.word), stream) + } else { + encoder.Encode(e.word, stream) + } +} + +func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type emptyStructEncoder struct { +} + +func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyObject() +} + +func (encoder *emptyStructEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_optional.go b/vendor/github.com/json-iterator/go/feature_reflect_optional.go new file mode 100644 index 00000000..fc8e9bc7 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_optional.go @@ -0,0 +1,104 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +func decoderOfOptional(cfg *frozenConfig, prefix string, typ reflect.Type) ValDecoder { + elemType := typ.Elem() + decoder := decoderOfType(cfg, prefix, elemType) + return &OptionalDecoder{elemType, decoder} +} + +func encoderOfOptional(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { + elemType := typ.Elem() + elemEncoder := encoderOfType(cfg, prefix, elemType) + encoder := &OptionalEncoder{elemEncoder} + if elemType.Kind() == reflect.Map { + encoder = &OptionalEncoder{encoder} + } + return encoder +} + +type OptionalDecoder struct { + ValueType reflect.Type + ValueDecoder ValDecoder +} + +func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*unsafe.Pointer)(ptr)) = nil + } else { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + value := reflect.New(decoder.ValueType) + newPtr := extractInterface(value.Interface()).word + decoder.ValueDecoder.Decode(newPtr, iter) + *((*uintptr)(ptr)) = uintptr(newPtr) + } else { + //reuse existing instance + decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } + } +} + +type deferenceDecoder struct { + // only to deference a pointer + valueType reflect.Type + valueDecoder ValDecoder +} + +func (decoder *deferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + value := reflect.New(decoder.valueType) + newPtr := extractInterface(value.Interface()).word + decoder.valueDecoder.Decode(newPtr, iter) + *((*uintptr)(ptr)) = uintptr(newPtr) + } else { + //reuse existing instance + decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } +} + +type OptionalEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *OptionalEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*unsafe.Pointer)(ptr)) == nil +} + +type optionalMapEncoder struct { + valueEncoder ValEncoder +} + +func (encoder *optionalMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.valueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *optionalMapEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *optionalMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + p := *((*unsafe.Pointer)(ptr)) + return p == nil || encoder.valueEncoder.IsEmpty(p) +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_slice.go b/vendor/github.com/json-iterator/go/feature_reflect_slice.go new file mode 100644 index 00000000..2b699026 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_slice.go @@ -0,0 +1,141 @@ +package jsoniter + +import ( + "fmt" + "io" + "reflect" + "unsafe" +) + +func decoderOfSlice(cfg *frozenConfig, prefix string, typ reflect.Type) ValDecoder { + decoder := decoderOfType(cfg, prefix+"[slice]->", typ.Elem()) + return &sliceDecoder{typ, typ.Elem(), decoder} +} + +func encoderOfSlice(cfg *frozenConfig, prefix string, typ reflect.Type) ValEncoder { + encoder := encoderOfType(cfg, prefix+"[slice]->", typ.Elem()) + if typ.Elem().Kind() == reflect.Map { + encoder = &OptionalEncoder{encoder} + } + return &sliceEncoder{typ, typ.Elem(), encoder} +} + +type sliceEncoder struct { + sliceType reflect.Type + elemType reflect.Type + elemEncoder ValEncoder +} + +func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + slice := (*sliceHeader)(ptr) + if slice.Data == nil { + stream.WriteNil() + return + } + if slice.Len == 0 { + stream.WriteEmptyArray() + return + } + stream.WriteArrayStart() + elemPtr := unsafe.Pointer(slice.Data) + encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) + for i := 1; i < slice.Len; i++ { + stream.WriteMore() + elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size()) + encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) + } +} + +func (encoder *sliceEncoder) EncodeInterface(val interface{}, stream *Stream) { + WriteToStream(val, stream, encoder) +} + +func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + slice := (*sliceHeader)(ptr) + return slice.Len == 0 +} + +type sliceDecoder struct { + sliceType reflect.Type + elemType reflect.Type + elemDecoder ValDecoder +} + +// sliceHeader is a safe version of SliceHeader used within this package. +type sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int +} + +func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) + } +} + +func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + slice := (*sliceHeader)(ptr) + if iter.ReadNil() { + slice.Len = 0 + slice.Cap = 0 + slice.Data = nil + return + } + reuseSlice(slice, decoder.sliceType, 4) + slice.Len = 0 + offset := uintptr(0) + iter.ReadArrayCB(func(iter *Iterator) bool { + growOne(slice, decoder.sliceType, decoder.elemType) + decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(slice.Data)+offset), iter) + offset += decoder.elemType.Size() + return true + }) +} + +// grow grows the slice s so that it can hold extra more values, allocating +// more capacity if needed. It also returns the old and new slice lengths. +func growOne(slice *sliceHeader, sliceType reflect.Type, elementType reflect.Type) { + newLen := slice.Len + 1 + if newLen <= slice.Cap { + slice.Len = newLen + return + } + newCap := slice.Cap + if newCap == 0 { + newCap = 1 + } else { + for newCap < newLen { + if slice.Len < 1024 { + newCap += newCap + } else { + newCap += newCap / 4 + } + } + } + newVal := reflect.MakeSlice(sliceType, newLen, newCap) + dst := unsafe.Pointer(newVal.Pointer()) + // copy old array into new array + originalBytesCount := slice.Len * int(elementType.Size()) + srcSliceHeader := (unsafe.Pointer)(&sliceHeader{slice.Data, originalBytesCount, originalBytesCount}) + dstSliceHeader := (unsafe.Pointer)(&sliceHeader{dst, originalBytesCount, originalBytesCount}) + copy(*(*[]byte)(dstSliceHeader), *(*[]byte)(srcSliceHeader)) + slice.Data = dst + slice.Len = newLen + slice.Cap = newCap +} + +func reuseSlice(slice *sliceHeader, sliceType reflect.Type, expectedCap int) { + if expectedCap <= slice.Cap { + return + } + newVal := reflect.MakeSlice(sliceType, 0, expectedCap) + dst := unsafe.Pointer(newVal.Pointer()) + slice.Data = dst + slice.Cap = expectedCap +} diff --git a/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go new file mode 100644 index 00000000..d3080485 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_reflect_struct_decoder.go @@ -0,0 +1,966 @@ +package jsoniter + +import ( + "fmt" + "io" + "reflect" + "strings" + "unsafe" +) + +func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder) ValDecoder { + knownHash := map[int32]struct{}{ + 0: {}, + } + switch len(fields) { + case 0: + return &skipObjectDecoder{typ} + case 1: + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields} + } + knownHash[fieldHash] = struct{}{} + return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder} + } + case 2: + var fieldHash1 int32 + var fieldHash2 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields} + } + knownHash[fieldHash] = struct{}{} + if fieldHash1 == 0 { + fieldHash1 = fieldHash + fieldDecoder1 = fieldDecoder + } else { + fieldHash2 = fieldHash + fieldDecoder2 = fieldDecoder + } + } + return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2} + case 3: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } + } + return &threeFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3} + case 4: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } + } + return &fourFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4} + case 5: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } + } + return &fiveFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5} + case 6: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } + } + return &sixFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6} + case 7: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldName7 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } + } + return &sevenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7} + case 8: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldName7 int32 + var fieldName8 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } + } + return &eightFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8} + case 9: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldName7 int32 + var fieldName8 int32 + var fieldName9 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } + } + return &nineFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9} + case 10: + var fieldName1 int32 + var fieldName2 int32 + var fieldName3 int32 + var fieldName4 int32 + var fieldName5 int32 + var fieldName6 int32 + var fieldName7 int32 + var fieldName8 int32 + var fieldName9 int32 + var fieldName10 int32 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + var fieldDecoder10 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else if fieldName9 == 0 { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } else { + fieldName10 = fieldHash + fieldDecoder10 = fieldDecoder + } + } + return &tenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9, + fieldName10, fieldDecoder10} + } + return &generalStructDecoder{typ, fields} +} + +type generalStructDecoder struct { + typ reflect.Type + fields map[string]*structFieldDecoder +} + +func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + var fieldBytes []byte + var field string + if iter.cfg.objectFieldMustBeSimpleString { + fieldBytes = iter.readObjectFieldAsBytes() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + } else { + field = iter.ReadString() + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + } + fieldDecoder := decoder.fields[strings.ToLower(field)] + if fieldDecoder == nil { + iter.Skip() + } else { + fieldDecoder.Decode(ptr, iter) + } + for iter.nextToken() == ',' { + if iter.cfg.objectFieldMustBeSimpleString { + fieldBytes := iter.readObjectFieldAsBytes() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + } else { + field = iter.ReadString() + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + } + fieldDecoder = decoder.fields[strings.ToLower(field)] + if fieldDecoder == nil { + iter.Skip() + } else { + fieldDecoder.Decode(ptr, iter) + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type skipObjectDecoder struct { + typ reflect.Type +} + +func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valueType := iter.WhatIsNext() + if valueType != ObjectValue && valueType != NilValue { + iter.ReportError("skipObjectDecoder", "expect object or null") + return + } + iter.Skip() +} + +type oneFieldStructDecoder struct { + typ reflect.Type + fieldHash int32 + fieldDecoder *structFieldDecoder +} + +func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + if iter.readFieldHash() == decoder.fieldHash { + decoder.fieldDecoder.Decode(ptr, iter) + } else { + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type twoFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder +} + +func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type threeFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder +} + +func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type fourFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder +} + +func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type fiveFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder +} + +func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type sixFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder +} + +func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type sevenFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder + fieldHash7 int32 + fieldDecoder7 *structFieldDecoder +} + +func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type eightFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder + fieldHash7 int32 + fieldDecoder7 *structFieldDecoder + fieldHash8 int32 + fieldDecoder8 *structFieldDecoder +} + +func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type nineFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder + fieldHash7 int32 + fieldDecoder7 *structFieldDecoder + fieldHash8 int32 + fieldDecoder8 *structFieldDecoder + fieldHash9 int32 + fieldDecoder9 *structFieldDecoder +} + +func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type tenFieldsStructDecoder struct { + typ reflect.Type + fieldHash1 int32 + fieldDecoder1 *structFieldDecoder + fieldHash2 int32 + fieldDecoder2 *structFieldDecoder + fieldHash3 int32 + fieldDecoder3 *structFieldDecoder + fieldHash4 int32 + fieldDecoder4 *structFieldDecoder + fieldHash5 int32 + fieldDecoder5 *structFieldDecoder + fieldHash6 int32 + fieldDecoder6 *structFieldDecoder + fieldHash7 int32 + fieldDecoder7 *structFieldDecoder + fieldHash8 int32 + fieldDecoder8 *structFieldDecoder + fieldHash9 int32 + fieldDecoder9 *structFieldDecoder + fieldHash10 int32 + fieldDecoder10 *structFieldDecoder +} + +func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + case decoder.fieldHash10: + decoder.fieldDecoder10.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type structFieldDecoder struct { + field *reflect.StructField + fieldDecoder ValDecoder +} + +func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + fieldPtr := unsafe.Pointer(uintptr(ptr) + decoder.field.Offset) + decoder.fieldDecoder.Decode(fieldPtr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%s: %s", decoder.field.Name, iter.Error.Error()) + } +} diff --git a/vendor/github.com/json-iterator/go/feature_stream.go b/vendor/github.com/json-iterator/go/feature_stream.go new file mode 100644 index 00000000..97355eb5 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_stream.go @@ -0,0 +1,308 @@ +package jsoniter + +import ( + "io" +) + +// stream is a io.Writer like object, with JSON specific write functions. +// Error is not returned as return value, but stored as Error member on this stream instance. +type Stream struct { + cfg *frozenConfig + out io.Writer + buf []byte + n int + Error error + indention int + Attachment interface{} // open for customized encoder +} + +// NewStream create new stream instance. +// cfg can be jsoniter.ConfigDefault. +// out can be nil if write to internal buffer. +// bufSize is the initial size for the internal buffer in bytes. +func NewStream(cfg API, out io.Writer, bufSize int) *Stream { + return &Stream{ + cfg: cfg.(*frozenConfig), + out: out, + buf: make([]byte, bufSize), + n: 0, + Error: nil, + indention: 0, + } +} + +// Pool returns a pool can provide more stream with same configuration +func (stream *Stream) Pool() StreamPool { + return stream.cfg +} + +// Reset reuse this stream instance by assign a new writer +func (stream *Stream) Reset(out io.Writer) { + stream.out = out + stream.n = 0 +} + +// Available returns how many bytes are unused in the buffer. +func (stream *Stream) Available() int { + return len(stream.buf) - stream.n +} + +// Buffered returns the number of bytes that have been written into the current buffer. +func (stream *Stream) Buffered() int { + return stream.n +} + +// Buffer if writer is nil, use this method to take the result +func (stream *Stream) Buffer() []byte { + return stream.buf[:stream.n] +} + +// Write writes the contents of p into the buffer. +// It returns the number of bytes written. +// If nn < len(p), it also returns an error explaining +// why the write is short. +func (stream *Stream) Write(p []byte) (nn int, err error) { + for len(p) > stream.Available() && stream.Error == nil { + if stream.out == nil { + stream.growAtLeast(len(p)) + } else { + var n int + if stream.Buffered() == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, stream.Error = stream.out.Write(p) + } else { + n = copy(stream.buf[stream.n:], p) + stream.n += n + stream.Flush() + } + nn += n + p = p[n:] + } + } + if stream.Error != nil { + return nn, stream.Error + } + n := copy(stream.buf[stream.n:], p) + stream.n += n + nn += n + return nn, nil +} + +// WriteByte writes a single byte. +func (stream *Stream) writeByte(c byte) { + if stream.Error != nil { + return + } + if stream.Available() < 1 { + stream.growAtLeast(1) + } + stream.buf[stream.n] = c + stream.n++ +} + +func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { + if stream.Error != nil { + return + } + if stream.Available() < 2 { + stream.growAtLeast(2) + } + stream.buf[stream.n] = c1 + stream.buf[stream.n+1] = c2 + stream.n += 2 +} + +func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { + if stream.Error != nil { + return + } + if stream.Available() < 3 { + stream.growAtLeast(3) + } + stream.buf[stream.n] = c1 + stream.buf[stream.n+1] = c2 + stream.buf[stream.n+2] = c3 + stream.n += 3 +} + +func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { + if stream.Error != nil { + return + } + if stream.Available() < 4 { + stream.growAtLeast(4) + } + stream.buf[stream.n] = c1 + stream.buf[stream.n+1] = c2 + stream.buf[stream.n+2] = c3 + stream.buf[stream.n+3] = c4 + stream.n += 4 +} + +func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { + if stream.Error != nil { + return + } + if stream.Available() < 5 { + stream.growAtLeast(5) + } + stream.buf[stream.n] = c1 + stream.buf[stream.n+1] = c2 + stream.buf[stream.n+2] = c3 + stream.buf[stream.n+3] = c4 + stream.buf[stream.n+4] = c5 + stream.n += 5 +} + +// Flush writes any buffered data to the underlying io.Writer. +func (stream *Stream) Flush() error { + if stream.out == nil { + return nil + } + if stream.Error != nil { + return stream.Error + } + if stream.n == 0 { + return nil + } + n, err := stream.out.Write(stream.buf[0:stream.n]) + if n < stream.n && err == nil { + err = io.ErrShortWrite + } + if err != nil { + if n > 0 && n < stream.n { + copy(stream.buf[0:stream.n-n], stream.buf[n:stream.n]) + } + stream.n -= n + stream.Error = err + return err + } + stream.n = 0 + return nil +} + +func (stream *Stream) ensure(minimal int) { + available := stream.Available() + if available < minimal { + stream.growAtLeast(minimal) + } +} + +func (stream *Stream) growAtLeast(minimal int) { + if stream.out != nil { + stream.Flush() + if stream.Available() >= minimal { + return + } + } + toGrow := len(stream.buf) + if toGrow < minimal { + toGrow = minimal + } + newBuf := make([]byte, len(stream.buf)+toGrow) + copy(newBuf, stream.Buffer()) + stream.buf = newBuf +} + +// WriteRaw write string out without quotes, just like []byte +func (stream *Stream) WriteRaw(s string) { + stream.ensure(len(s)) + if stream.Error != nil { + return + } + n := copy(stream.buf[stream.n:], s) + stream.n += n +} + +// WriteNil write null to stream +func (stream *Stream) WriteNil() { + stream.writeFourBytes('n', 'u', 'l', 'l') +} + +// WriteTrue write true to stream +func (stream *Stream) WriteTrue() { + stream.writeFourBytes('t', 'r', 'u', 'e') +} + +// WriteFalse write false to stream +func (stream *Stream) WriteFalse() { + stream.writeFiveBytes('f', 'a', 'l', 's', 'e') +} + +// WriteBool write true or false into stream +func (stream *Stream) WriteBool(val bool) { + if val { + stream.WriteTrue() + } else { + stream.WriteFalse() + } +} + +// WriteObjectStart write { with possible indention +func (stream *Stream) WriteObjectStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('{') + stream.writeIndention(0) +} + +// WriteObjectField write "field": with possible indention +func (stream *Stream) WriteObjectField(field string) { + stream.WriteString(field) + if stream.indention > 0 { + stream.writeTwoBytes(':', ' ') + } else { + stream.writeByte(':') + } +} + +// WriteObjectEnd write } with possible indention +func (stream *Stream) WriteObjectEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte('}') +} + +// WriteEmptyObject write {} +func (stream *Stream) WriteEmptyObject() { + stream.writeByte('{') + stream.writeByte('}') +} + +// WriteMore write , with possible indention +func (stream *Stream) WriteMore() { + stream.writeByte(',') + stream.writeIndention(0) +} + +// WriteArrayStart write [ with possible indention +func (stream *Stream) WriteArrayStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('[') + stream.writeIndention(0) +} + +// WriteEmptyArray write [] +func (stream *Stream) WriteEmptyArray() { + stream.writeTwoBytes('[', ']') +} + +// WriteArrayEnd write ] with possible indention +func (stream *Stream) WriteArrayEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte(']') +} + +func (stream *Stream) writeIndention(delta int) { + if stream.indention == 0 { + return + } + stream.writeByte('\n') + toWrite := stream.indention - delta + stream.ensure(toWrite) + for i := 0; i < toWrite && stream.n < len(stream.buf); i++ { + stream.buf[stream.n] = ' ' + stream.n++ + } +} diff --git a/vendor/github.com/json-iterator/go/feature_stream_float.go b/vendor/github.com/json-iterator/go/feature_stream_float.go new file mode 100644 index 00000000..9a404e11 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_stream_float.go @@ -0,0 +1,96 @@ +package jsoniter + +import ( + "math" + "strconv" +) + +var pow10 []uint64 + +func init() { + pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000} +} + +// WriteFloat32 write float32 to stream +func (stream *Stream) WriteFloat32(val float32) { + abs := math.Abs(float64(val)) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if float32(abs) < 1e-6 || float32(abs) >= 1e21 { + fmt = 'e' + } + } + stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 32)) +} + +// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat32Lossy(val float32) { + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat32(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(float64(val)*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + stream.ensure(10) + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[stream.n-1] == '0' { + stream.n-- + } +} + +// WriteFloat64 write float64 to stream +func (stream *Stream) WriteFloat64(val float64) { + abs := math.Abs(val) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 64)) +} + +// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat64Lossy(val float64) { + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat64(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(val*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + stream.ensure(10) + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[stream.n-1] == '0' { + stream.n-- + } +} diff --git a/vendor/github.com/json-iterator/go/feature_stream_int.go b/vendor/github.com/json-iterator/go/feature_stream_int.go new file mode 100644 index 00000000..7cfd522c --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_stream_int.go @@ -0,0 +1,320 @@ +package jsoniter + +var digits []uint32 + +func init() { + digits = make([]uint32, 1000) + for i := uint32(0); i < 1000; i++ { + digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' + if i < 10 { + digits[i] += 2 << 24 + } else if i < 100 { + digits[i] += 1 << 24 + } + } +} + +func writeFirstBuf(buf []byte, v uint32, n int) int { + start := v >> 24 + if start == 0 { + buf[n] = byte(v >> 16) + n++ + buf[n] = byte(v >> 8) + n++ + } else if start == 1 { + buf[n] = byte(v >> 8) + n++ + } + buf[n] = byte(v) + n++ + return n +} + +func writeBuf(buf []byte, v uint32, n int) { + buf[n] = byte(v >> 16) + buf[n+1] = byte(v >> 8) + buf[n+2] = byte(v) +} + +// WriteUint8 write uint8 to stream +func (stream *Stream) WriteUint8(val uint8) { + stream.ensure(3) + stream.n = writeFirstBuf(stream.buf, digits[val], stream.n) +} + +// WriteInt8 write int8 to stream +func (stream *Stream) WriteInt8(nval int8) { + stream.ensure(4) + n := stream.n + var val uint8 + if nval < 0 { + val = uint8(-nval) + stream.buf[n] = '-' + n++ + } else { + val = uint8(nval) + } + stream.n = writeFirstBuf(stream.buf, digits[val], n) +} + +// WriteUint16 write uint16 to stream +func (stream *Stream) WriteUint16(val uint16) { + stream.ensure(5) + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], stream.n) + return + } + r1 := val - q1*1000 + n := writeFirstBuf(stream.buf, digits[q1], stream.n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return +} + +// WriteInt16 write int16 to stream +func (stream *Stream) WriteInt16(nval int16) { + stream.ensure(6) + n := stream.n + var val uint16 + if nval < 0 { + val = uint16(-nval) + stream.buf[n] = '-' + n++ + } else { + val = uint16(nval) + } + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + n = writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return +} + +// WriteUint32 write uint32 to stream +func (stream *Stream) WriteUint32(val uint32) { + stream.ensure(10) + n := stream.n + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + n := writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + n = writeFirstBuf(stream.buf, digits[q2], n) + } else { + r3 := q2 - q3*1000 + stream.buf[n] = byte(q3 + '0') + n++ + writeBuf(stream.buf, digits[r3], n) + n += 3 + } + writeBuf(stream.buf, digits[r2], n) + writeBuf(stream.buf, digits[r1], n+3) + stream.n = n + 6 +} + +// WriteInt32 write int32 to stream +func (stream *Stream) WriteInt32(nval int32) { + stream.ensure(11) + n := stream.n + var val uint32 + if nval < 0 { + val = uint32(-nval) + stream.buf[n] = '-' + n++ + } else { + val = uint32(nval) + } + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + n := writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + n = writeFirstBuf(stream.buf, digits[q2], n) + } else { + r3 := q2 - q3*1000 + stream.buf[n] = byte(q3 + '0') + n++ + writeBuf(stream.buf, digits[r3], n) + n += 3 + } + writeBuf(stream.buf, digits[r2], n) + writeBuf(stream.buf, digits[r1], n+3) + stream.n = n + 6 +} + +// WriteUint64 write uint64 to stream +func (stream *Stream) WriteUint64(val uint64) { + stream.ensure(20) + n := stream.n + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + n := writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + n = writeFirstBuf(stream.buf, digits[q2], n) + writeBuf(stream.buf, digits[r2], n) + writeBuf(stream.buf, digits[r1], n+3) + stream.n = n + 6 + return + } + r3 := q2 - q3*1000 + q4 := q3 / 1000 + if q4 == 0 { + n = writeFirstBuf(stream.buf, digits[q3], n) + writeBuf(stream.buf, digits[r3], n) + writeBuf(stream.buf, digits[r2], n+3) + writeBuf(stream.buf, digits[r1], n+6) + stream.n = n + 9 + return + } + r4 := q3 - q4*1000 + q5 := q4 / 1000 + if q5 == 0 { + n = writeFirstBuf(stream.buf, digits[q4], n) + writeBuf(stream.buf, digits[r4], n) + writeBuf(stream.buf, digits[r3], n+3) + writeBuf(stream.buf, digits[r2], n+6) + writeBuf(stream.buf, digits[r1], n+9) + stream.n = n + 12 + return + } + r5 := q4 - q5*1000 + q6 := q5 / 1000 + if q6 == 0 { + n = writeFirstBuf(stream.buf, digits[q5], n) + } else { + n = writeFirstBuf(stream.buf, digits[q6], n) + r6 := q5 - q6*1000 + writeBuf(stream.buf, digits[r6], n) + n += 3 + } + writeBuf(stream.buf, digits[r5], n) + writeBuf(stream.buf, digits[r4], n+3) + writeBuf(stream.buf, digits[r3], n+6) + writeBuf(stream.buf, digits[r2], n+9) + writeBuf(stream.buf, digits[r1], n+12) + stream.n = n + 15 +} + +// WriteInt64 write int64 to stream +func (stream *Stream) WriteInt64(nval int64) { + stream.ensure(20) + n := stream.n + var val uint64 + if nval < 0 { + val = uint64(-nval) + stream.buf[n] = '-' + n++ + } else { + val = uint64(nval) + } + q1 := val / 1000 + if q1 == 0 { + stream.n = writeFirstBuf(stream.buf, digits[val], n) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + n := writeFirstBuf(stream.buf, digits[q1], n) + writeBuf(stream.buf, digits[r1], n) + stream.n = n + 3 + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + n = writeFirstBuf(stream.buf, digits[q2], n) + writeBuf(stream.buf, digits[r2], n) + writeBuf(stream.buf, digits[r1], n+3) + stream.n = n + 6 + return + } + r3 := q2 - q3*1000 + q4 := q3 / 1000 + if q4 == 0 { + n = writeFirstBuf(stream.buf, digits[q3], n) + writeBuf(stream.buf, digits[r3], n) + writeBuf(stream.buf, digits[r2], n+3) + writeBuf(stream.buf, digits[r1], n+6) + stream.n = n + 9 + return + } + r4 := q3 - q4*1000 + q5 := q4 / 1000 + if q5 == 0 { + n = writeFirstBuf(stream.buf, digits[q4], n) + writeBuf(stream.buf, digits[r4], n) + writeBuf(stream.buf, digits[r3], n+3) + writeBuf(stream.buf, digits[r2], n+6) + writeBuf(stream.buf, digits[r1], n+9) + stream.n = n + 12 + return + } + r5 := q4 - q5*1000 + q6 := q5 / 1000 + if q6 == 0 { + n = writeFirstBuf(stream.buf, digits[q5], n) + } else { + stream.buf[n] = byte(q6 + '0') + n++ + r6 := q5 - q6*1000 + writeBuf(stream.buf, digits[r6], n) + n += 3 + } + writeBuf(stream.buf, digits[r5], n) + writeBuf(stream.buf, digits[r4], n+3) + writeBuf(stream.buf, digits[r3], n+6) + writeBuf(stream.buf, digits[r2], n+9) + writeBuf(stream.buf, digits[r1], n+12) + stream.n = n + 15 +} + +// WriteInt write int to stream +func (stream *Stream) WriteInt(val int) { + stream.WriteInt64(int64(val)) +} + +// WriteUint write uint to stream +func (stream *Stream) WriteUint(val uint) { + stream.WriteUint64(uint64(val)) +} diff --git a/vendor/github.com/json-iterator/go/feature_stream_string.go b/vendor/github.com/json-iterator/go/feature_stream_string.go new file mode 100644 index 00000000..334282f0 --- /dev/null +++ b/vendor/github.com/json-iterator/go/feature_stream_string.go @@ -0,0 +1,396 @@ +package jsoniter + +import ( + "unicode/utf8" +) + +// htmlSafeSet holds the value true if the ASCII character with the given +// array position can be safely represented inside a JSON string, embedded +// inside of HTML