Skip to content
This repository has been archived by the owner on Jul 24, 2024. It is now read-only.

storage/: handle special character in URL (#614) #617

Merged
merged 5 commits into from
Nov 25, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions pkg/storage/parse.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,10 @@ func ParseBackend(rawURL string, options *BackendOptions) (*backup.StorageBacken
return nil, errors.Annotate(berrors.ErrStorageInvalidConfig, "empty store is not allowed")
}

// https://github.com/pingcap/br/issues/603
// In aws the secret key may contain '/+=' and '+' has a special meaning in URL.
// Replace "+" by "%2B" here to avoid this problem.
rawURL = strings.ReplaceAll(rawURL, "+", "%2B")
u, err := url.Parse(rawURL)
if err != nil {
return nil, errors.Trace(err)
Expand Down
10 changes: 10 additions & 0 deletions pkg/storage/parse_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,16 @@ func (r *testStorageSuite) TestCreateStorage(c *C) {
c.Assert(s3.Sse, Equals, "aws:kms")
c.Assert(s3.SseKmsKeyId, Equals, "TestKey")

// special character in access keys
s, err = ParseBackend(`s3://bucket4/prefix/path?access-key=NXN7IPIOSAAKDEEOLMAF&secret-access-key=nREY/7Dt+PaIbYKrKlEEMMF/ExCiJEX=XMLPUANw`, nil)
c.Assert(err, IsNil)
s3 = s.GetS3()
c.Assert(s3, NotNil)
c.Assert(s3.Bucket, Equals, "bucket4")
c.Assert(s3.Prefix, Equals, "prefix/path")
c.Assert(s3.AccessKey, Equals, "NXN7IPIOSAAKDEEOLMAF")
c.Assert(s3.SecretAccessKey, Equals, "nREY/7Dt+PaIbYKrKlEEMMF/ExCiJEX=XMLPUANw")

gcsOpt := &BackendOptions{
GCS: GCSBackendOptions{
Endpoint: "https://gcs.example.com/",
Expand Down
119 changes: 64 additions & 55 deletions tests/br_s3/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ TABLE="usertable"
DB_COUNT=3

# start the s3 server
export MINIO_ACCESS_KEY=brs3accesskey
export MINIO_SECRET_KEY=brs3secretkey
export MINIO_ACCESS_KEY='KEXI7MANNASOPDLAOIEF'
export MINIO_SECRET_KEY='MaKYxEGDInMPtEYECXRJLU+FPNKb/wAX/MElir7E'
export MINIO_BROWSER=off
export AWS_ACCESS_KEY_ID=$MINIO_ACCESS_KEY
export AWS_SECRET_ACCESS_KEY=$MINIO_SECRET_KEY
Expand All @@ -44,73 +44,82 @@ stop_minio() {
}
trap stop_minio EXIT

s3cmd --access_key=$MINIO_ACCESS_KEY --secret_key=$MINIO_SECRET_KEY --host=$S3_ENDPOINT --host-bucket=$S3_ENDPOINT --no-ssl mb s3://mybucket

# Fill in the database
for i in $(seq $DB_COUNT); do
run_sql "CREATE DATABASE $DB${i};"
go-ycsb load mysql -P tests/$TEST_NAME/workload -p mysql.host=$TIDB_IP -p mysql.port=$TIDB_PORT -p mysql.user=root -p mysql.db=$DB${i}
done
S3_KEY=""
for p in $(seq 2); do
s3cmd --access_key=$MINIO_ACCESS_KEY --secret_key=$MINIO_SECRET_KEY --host=$S3_ENDPOINT --host-bucket=$S3_ENDPOINT --no-ssl mb s3://mybucket

for i in $(seq $DB_COUNT); do
row_count_ori[${i}]=$(run_sql "SELECT COUNT(*) FROM $DB${i}.$TABLE;" | awk '/COUNT/{print $2}')
done
for i in $(seq $DB_COUNT); do
row_count_ori[${i}]=$(run_sql "SELECT COUNT(*) FROM $DB${i}.$TABLE;" | awk '/COUNT/{print $2}')
done

# backup full
echo "backup start..."
BACKUP_LOG="backup.log"
rm -f $BACKUP_LOG
unset BR_LOG_TO_TERM
run_br --pd $PD_ADDR backup full -s "s3://mybucket/$DB?endpoint=http://$S3_ENDPOINT" \
--log-file $BACKUP_LOG || \
( cat $BACKUP_LOG && BR_LOG_TO_TERM=1 && exit 1 )
cat $BACKUP_LOG
BR_LOG_TO_TERM=1
# backup full
echo "backup start..."
BACKUP_LOG="backup.log"
rm -f $BACKUP_LOG
unset BR_LOG_TO_TERM
run_br --pd $PD_ADDR backup full -s "s3://mybucket/$DB?endpoint=http://$S3_ENDPOINT$S3_KEY" \
--log-file $BACKUP_LOG || \
( cat $BACKUP_LOG && BR_LOG_TO_TERM=1 && exit 1 )
cat $BACKUP_LOG
BR_LOG_TO_TERM=1

if grep -i $MINIO_SECRET_KEY $BACKUP_LOG; then
echo "Secret key logged in log. Please remove them."
exit 1
fi
if grep -i $MINIO_SECRET_KEY $BACKUP_LOG; then
echo "Secret key logged in log. Please remove them."
exit 1
fi

for i in $(seq $DB_COUNT); do
run_sql "DROP DATABASE $DB${i};"
done
for i in $(seq $DB_COUNT); do
run_sql "DROP DATABASE $DB${i};"
done

# restore full
echo "restore start..."
RESTORE_LOG="restore.log"
rm -f $RESTORE_LOG
unset BR_LOG_TO_TERM
run_br restore full -s "s3://mybucket/$DB" --pd $PD_ADDR --s3.endpoint="http://$S3_ENDPOINT" \
--log-file $RESTORE_LOG || \
( cat $RESTORE_LOG && BR_LOG_TO_TERM=1 && exit 1 )
cat $RESTORE_LOG
BR_LOG_TO_TERM=1
# restore full
echo "restore start..."
RESTORE_LOG="restore.log"
rm -f $RESTORE_LOG
unset BR_LOG_TO_TERM
run_br restore full -s "s3://mybucket/$DB?$S3_KEY" --pd $PD_ADDR --s3.endpoint="http://$S3_ENDPOINT" \
--log-file $RESTORE_LOG || \
( cat $RESTORE_LOG && BR_LOG_TO_TERM=1 && exit 1 )
cat $RESTORE_LOG
BR_LOG_TO_TERM=1

if grep -i $MINIO_SECRET_KEY $RESTORE_LOG; then
echo "Secret key logged in log. Please remove them."
exit 1
fi
if grep -i $MINIO_SECRET_KEY $RESTORE_LOG; then
echo "Secret key logged in log. Please remove them."
exit 1
fi

for i in $(seq $DB_COUNT); do
row_count_new[${i}]=$(run_sql "SELECT COUNT(*) FROM $DB${i}.$TABLE;" | awk '/COUNT/{print $2}')
done
for i in $(seq $DB_COUNT); do
row_count_new[${i}]=$(run_sql "SELECT COUNT(*) FROM $DB${i}.$TABLE;" | awk '/COUNT/{print $2}')
done

fail=false
for i in $(seq $DB_COUNT); do
if [ "${row_count_ori[i]}" != "${row_count_new[i]}" ];then
fail=true
echo "TEST: [$TEST_NAME] fail on database $DB${i}"
fi
echo "database $DB${i} [original] row count: ${row_count_ori[i]}, [after br] row count: ${row_count_new[i]}"
done
fail=false
for i in $(seq $DB_COUNT); do
if [ "${row_count_ori[i]}" != "${row_count_new[i]}" ];then
fail=true
echo "TEST: [$TEST_NAME] fail on database $DB${i}"
fi
echo "database $DB${i} [original] row count: ${row_count_ori[i]}, [after br] row count: ${row_count_new[i]}"
done

if $fail; then
echo "TEST: [$TEST_NAME] failed!"
exit 1
else
echo "TEST: [$TEST_NAME] successed!"
fi

if $fail; then
echo "TEST: [$TEST_NAME] failed!"
exit 1
else
echo "TEST: [$TEST_NAME] successed!"
fi
# prepare for next test
S3_KEY="&access-key=$MINIO_ACCESS_KEY&secret-access-key=$MINIO_SECRET_KEY"
export AWS_ACCESS_KEY_ID=""
export AWS_SECRET_ACCESS_KEY=""
rm -rf "$TEST_DIR/$DB"
mkdir -p "$TEST_DIR/$DB"
done

for i in $(seq $DB_COUNT); do
run_sql "DROP DATABASE $DB${i};"
Expand Down