forked from libsgh/chrome_updater
-
Notifications
You must be signed in to change notification settings - Fork 0
/
download.go
136 lines (121 loc) · 3.15 KB
/
download.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
/*
Package fetch for download, provide high performance download
use Goroutine to parallel download, use WaitGroup to do concurrency control.
*/
package main
import (
"fmt"
"fyne.io/fyne/v2/widget"
"io"
"log"
"net/http"
"os"
"runtime/debug"
"strings"
"sync"
"sync/atomic"
"time"
)
// FileFlag save file flag
//
// FileMode save file mode
const (
FileFlag = os.O_WRONLY | os.O_CREATE
FileMode = 0644
)
// WaitPool implement request pool to enhance performance
var (
// WaitPool = sync.WaitGroup{}
downloadedBytes int64
)
// GoroutineDownload will download form requestURL.
// example:
//
// requestURL := "http://xxx"
// GoroutineDownload(requestURL, 20, 10*1024*1024, 30)
func GoroutineDownload(requestURL, fileName string, poolSize, chunkSize, timeout, fileSize int64, downloadProgress *widget.ProgressBar, wg *sync.WaitGroup) {
var index, start int64
if !strings.HasPrefix(requestURL, "http") {
requestURL = "http://" + requestURL
}
requestURL = strings.TrimSpace(requestURL)
// open file
f, err := os.OpenFile(fileName, FileFlag, FileMode)
if err != nil {
log.Printf("open error:%+v\n", err)
return
}
defer f.Close()
pool := make(chan int64, (fileSize/chunkSize)+1)
for index = 0; index < poolSize; index++ {
go func() {
// recover
defer func() {
if err2 := recover(); err2 != nil {
log.Printf("panic error: %+v, stack:%s", err2, debug.Stack())
}
}()
// loop download until finish
for {
start, err = downloadChunkToFile(requestURL, pool, f, chunkSize, timeout, fileSize, downloadProgress, wg)
if err != nil {
log.Printf("fetch chunck start:%d error:%+v\n", start, err)
pool <- start
} else {
break
}
log.Printf("start loop download again")
}
}()
}
for start = 0; start < fileSize; start += chunkSize {
wg.Add(1)
pool <- start
}
wg.Wait()
// 关闭文件
err = f.Close()
if err != nil {
log.Printf("关闭文件错误:%+v\n", err)
}
}
func downloadChunkToFile(requestURL string, pool chan int64, f *os.File, chunkSize, timeout int64, fileSize int64, downloadProgress *widget.ProgressBar, wg *sync.WaitGroup) (start int64, err error) {
client := &http.Client{Timeout: time.Second * time.Duration(timeout)}
chunkRequest, err := http.NewRequest("GET", requestURL, nil)
if err != nil {
log.Printf("create request error:%+v\n", err)
return
}
var resp *http.Response
var body []byte
var written int
for {
start = <-pool
chunkRequest.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, start+chunkSize-1))
resp, err = client.Do(chunkRequest)
if err != nil {
log.Printf("send request error:%+v\n", err)
return
}
body, err = io.ReadAll(resp.Body)
if err != nil {
_ = resp.Body.Close()
log.Printf("read response error:%+v\n", err)
return
}
written, err = f.WriteAt(body, start)
if err != nil {
_ = resp.Body.Close()
wg.Done()
log.Printf("write file error:%+v\n", err)
return
}
atomic.AddInt64(&downloadedBytes, int64(written))
currentPercent := float64(downloadedBytes) / float64(fileSize)
//_ = bar.Add(written)
downloadProgress.SetValue(currentPercent * 0.9)
_ = resp.Body.Close()
// echo chunk will down one.
wg.Done()
}
}