diff --git a/CHANGELOG.md b/CHANGELOG.md index ff1e7ef5..2ad5482e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,40 @@ # Changelog +## [2.2.0](https://github.com/sygmaprotocol/sygma-relayer/compare/v2.1.2...v2.2.0) (2024-07-24) + + +### Features + +* btc fee collection ([#299](https://github.com/sygmaprotocol/sygma-relayer/issues/299)) ([9eeff52](https://github.com/sygmaprotocol/sygma-relayer/commit/9eeff5228db1c157ac53a91ea97732408437345b)) +* enable starting multiple tss processes with the same peer subset ([#331](https://github.com/sygmaprotocol/sygma-relayer/issues/331)) ([f65b16f](https://github.com/sygmaprotocol/sygma-relayer/commit/f65b16f9388e365a656817b526de5d8b1c39643a)) +* unlock pending btc proposals when doing retry ([#319](https://github.com/sygmaprotocol/sygma-relayer/issues/319)) ([5c67c79](https://github.com/sygmaprotocol/sygma-relayer/commit/5c67c793902c2ed121cccab6433bc8a1aa9aba84)) + + +### Bug Fixes + +* add fee estimate when calculating btc inputs ([#311](https://github.com/sygmaprotocol/sygma-relayer/issues/311)) ([0ca3a1b](https://github.com/sygmaprotocol/sygma-relayer/commit/0ca3a1b93d5d885a8c2686e86582df4413ea174b)) +* bad non zero check ([#315](https://github.com/sygmaprotocol/sygma-relayer/issues/315)) ([90ab5eb](https://github.com/sygmaprotocol/sygma-relayer/commit/90ab5ebe26ee157aab7999c053b4a08f026afcaf)) +* bump core to fix panic on invalid domain ([#328](https://github.com/sygmaprotocol/sygma-relayer/issues/328)) ([34830ae](https://github.com/sygmaprotocol/sygma-relayer/commit/34830aef2ddad502fea96ac3905762f7ddfba120)) +* enforce proposal execution per resource ([#309](https://github.com/sygmaprotocol/sygma-relayer/issues/309)) ([49a0777](https://github.com/sygmaprotocol/sygma-relayer/commit/49a07772563f1a579035f979ae6db6511f803e2b)) +* fix typos ([#313](https://github.com/sygmaprotocol/sygma-relayer/issues/313)) ([a3c54af](https://github.com/sygmaprotocol/sygma-relayer/commit/a3c54afd957bae2d1e918846a1319059811b6f12)) +* handle msg.To being empty properly ([#312](https://github.com/sygmaprotocol/sygma-relayer/issues/312)) ([68a3acc](https://github.com/sygmaprotocol/sygma-relayer/commit/68a3accae212d7712b94079f8c33133572d8c479)) +* ignore refresh errors to prevent infinite loops ([#332](https://github.com/sygmaprotocol/sygma-relayer/issues/332)) ([496e9ae](https://github.com/sygmaprotocol/sygma-relayer/commit/496e9aed26cbb041f92c34ee9ea39f3f939865a7)) +* is proposal executed nonce ([#335](https://github.com/sygmaprotocol/sygma-relayer/issues/335)) ([a966428](https://github.com/sygmaprotocol/sygma-relayer/commit/a96642887db523557f7d19506df481b24225d4b1)) +* possible race condition ([#316](https://github.com/sygmaprotocol/sygma-relayer/issues/316)) ([b9b5341](https://github.com/sygmaprotocol/sygma-relayer/commit/b9b5341f61b7cdf1ca2c408d1f05074d979a4e80)) +* post audit fixes ([#329](https://github.com/sygmaprotocol/sygma-relayer/issues/329)) ([d23d61d](https://github.com/sygmaprotocol/sygma-relayer/commit/d23d61d4be792c030a74c832547865e2e8c05035)) +* reduce tx nonce collision chances ([#314](https://github.com/sygmaprotocol/sygma-relayer/issues/314)) ([5431db5](https://github.com/sygmaprotocol/sygma-relayer/commit/5431db55c1dcb47b41616032ed0cc376d0fcf78e)) +* remove ECDSA peer ([#324](https://github.com/sygmaprotocol/sygma-relayer/issues/324)) ([3c4cbaa](https://github.com/sygmaprotocol/sygma-relayer/commit/3c4cbaa5a5bae5d14af2f3e343445dc56c17d84f)) +* schnorr signature hash missing leading zeroes ([#341](https://github.com/sygmaprotocol/sygma-relayer/issues/341)) ([34f52b1](https://github.com/sygmaprotocol/sygma-relayer/commit/34f52b17c314aca63e4d32c7988eb1e7049eb576)) +* sort utxos by oldest to prevent mismatched messages ([#317](https://github.com/sygmaprotocol/sygma-relayer/issues/317)) ([033b58a](https://github.com/sygmaprotocol/sygma-relayer/commit/033b58a92236bf8c97198912cbfb4c0ec487e7b4)) +* update key threshold when doing frost resharing ([#333](https://github.com/sygmaprotocol/sygma-relayer/issues/333)) ([f1e6db1](https://github.com/sygmaprotocol/sygma-relayer/commit/f1e6db13998487c6af77097615f68344c84c8f53)) +* update sygma-core version ([#321](https://github.com/sygmaprotocol/sygma-relayer/issues/321)) ([94429d4](https://github.com/sygmaprotocol/sygma-relayer/commit/94429d42b26726ccfa8414a24dacdc61b51f5783)) + + +### Miscellaneous + +* add btc deposit format explanation ([#318](https://github.com/sygmaprotocol/sygma-relayer/issues/318)) ([080554c](https://github.com/sygmaprotocol/sygma-relayer/commit/080554c24662acb74153789c343dcc8fb57b5e5c)) +* bump polkadot libs ([#330](https://github.com/sygmaprotocol/sygma-relayer/issues/330)) ([1119f23](https://github.com/sygmaprotocol/sygma-relayer/commit/1119f231542225bc64c2e4511e35e63d382fe11c)) + ## [2.1.2](https://github.com/sygmaprotocol/sygma-relayer/compare/v2.1.1...v2.1.2) (2024-06-21) diff --git a/app/app.go b/app/app.go index 4eb62463..ef7b3929 100644 --- a/app/app.go +++ b/app/app.go @@ -224,8 +224,8 @@ func Run() error { eventHandlers = append(eventHandlers, hubEventHandlers.NewDepositEventHandler(depositListener, depositHandler, bridgeAddress, *config.GeneralChainConfig.Id, msgChan)) eventHandlers = append(eventHandlers, hubEventHandlers.NewKeygenEventHandler(l, tssListener, coordinator, host, communication, keyshareStore, bridgeAddress, networkTopology.Threshold)) eventHandlers = append(eventHandlers, hubEventHandlers.NewFrostKeygenEventHandler(l, tssListener, coordinator, host, communication, frostKeyshareStore, frostAddress, networkTopology.Threshold)) - eventHandlers = append(eventHandlers, hubEventHandlers.NewRefreshEventHandler(l, topologyProvider, topologyStore, tssListener, coordinator, host, communication, connectionGate, keyshareStore, bridgeAddress)) - eventHandlers = append(eventHandlers, hubEventHandlers.NewRetryEventHandler(l, tssListener, depositHandler, bridgeAddress, *config.GeneralChainConfig.Id, config.BlockConfirmations, msgChan)) + eventHandlers = append(eventHandlers, hubEventHandlers.NewRefreshEventHandler(l, topologyProvider, topologyStore, tssListener, coordinator, host, communication, connectionGate, keyshareStore, frostKeyshareStore, bridgeAddress)) + eventHandlers = append(eventHandlers, hubEventHandlers.NewRetryEventHandler(l, tssListener, depositHandler, propStore, bridgeAddress, *config.GeneralChainConfig.Id, config.BlockConfirmations, msgChan)) evmListener := listener.NewEVMListener(client, eventHandlers, blockstore, sygmaMetrics, *config.GeneralChainConfig.Id, config.BlockRetryInterval, config.BlockConfirmations, config.BlockInterval) executor := executor.NewExecutor(host, communication, coordinator, bridgeContract, keyshareStore, exitLock, config.GasLimit.Uint64()) @@ -325,7 +325,7 @@ func Run() error { resources := make(map[[32]byte]btcConfig.Resource) for _, resource := range config.Resources { resources[resource.ResourceID] = resource - eventHandlers = append(eventHandlers, btcListener.NewFungibleTransferEventHandler(l, *config.GeneralChainConfig.Id, depositHandler, msgChan, conn, resource)) + eventHandlers = append(eventHandlers, btcListener.NewFungibleTransferEventHandler(l, *config.GeneralChainConfig.Id, depositHandler, msgChan, conn, resource, config.FeeAddress)) } listener := btcListener.NewBtcListener(conn, eventHandlers, config, blockstore) diff --git a/chains/btc/config/config.go b/chains/btc/config/config.go index c43abc9b..6131a06c 100644 --- a/chains/btc/config/config.go +++ b/chains/btc/config/config.go @@ -19,12 +19,14 @@ import ( type RawResource struct { Address string ResourceID string + FeeAmount string Tweak string Script string } type Resource struct { Address btcutil.Address + FeeAmount *big.Int ResourceID [32]byte Tweak string Script []byte @@ -34,6 +36,7 @@ type RawBtcConfig struct { chain.GeneralChainConfig `mapstructure:",squash"` Resources []RawResource `mapstrcture:"resources"` StartBlock int64 `mapstructure:"startBlock"` + FeeAddress string `mapstructure:"feeAddress"` Username string `mapstructure:"username"` Password string `mapstructure:"password"` BlockInterval int64 `mapstructure:"blockInterval" default:"5"` @@ -48,7 +51,7 @@ func (c *RawBtcConfig) Validate() error { return err } - if c.BlockConfirmations != 0 && c.BlockConfirmations < 1 { + if c.BlockConfirmations < 1 { return fmt.Errorf("blockConfirmations has to be >=1") } @@ -65,6 +68,7 @@ func (c *RawBtcConfig) Validate() error { type BtcConfig struct { GeneralChainConfig chain.GeneralChainConfig Resources []Resource + FeeAddress btcutil.Address Username string Password string StartBlock *big.Int @@ -100,7 +104,10 @@ func NewBtcConfig(chainConfig map[string]interface{}) (*BtcConfig, error) { if err != nil { return nil, err } - + feeAddress, err := btcutil.DecodeAddress(c.FeeAddress, &networkParams) + if err != nil { + return nil, err + } resources := make([]Resource, len(c.Resources)) for i, r := range c.Resources { scriptBytes, err := hex.DecodeString(r.Script) @@ -108,6 +115,11 @@ func NewBtcConfig(chainConfig map[string]interface{}) (*BtcConfig, error) { return nil, err } + feeAmount, success := new(big.Int).SetString(r.FeeAmount, 10) + if !success { + return nil, fmt.Errorf("error: could not convert string to *big.Int") + } + address, err := btcutil.DecodeAddress(r.Address, &networkParams) if err != nil { return nil, err @@ -123,6 +135,7 @@ func NewBtcConfig(chainConfig map[string]interface{}) (*BtcConfig, error) { ResourceID: resource32Bytes, Script: scriptBytes, Tweak: r.Tweak, + FeeAmount: feeAmount, } } @@ -137,6 +150,7 @@ func NewBtcConfig(chainConfig map[string]interface{}) (*BtcConfig, error) { Password: c.Password, Network: networkParams, MempoolUrl: c.MempoolUrl, + FeeAddress: feeAddress, Resources: resources, } return config, nil diff --git a/chains/btc/config/config_test.go b/chains/btc/config/config_test.go index 2dfaf0df..78512453 100644 --- a/chains/btc/config/config_test.go +++ b/chains/btc/config/config_test.go @@ -93,18 +93,21 @@ func (s *NewBtcConfigTestSuite) Test_InvalidPassword() { func (s *NewBtcConfigTestSuite) Test_ValidConfig() { expectedResource := listener.SliceTo32Bytes(common.LeftPadBytes([]byte{3}, 31)) expectedAddress, _ := btcutil.DecodeAddress("tb1qln69zuhdunc9stwfh6t7adexxrcr04ppy6thgm", &chaincfg.TestNet3Params) + feeAddress, _ := btcutil.DecodeAddress("mkHS9ne12qx9pS9VojpwU5xtRd4T7X7ZUt", &chaincfg.TestNet3Params) expectedScript, _ := hex.DecodeString("51206a698882348433b57d549d6344f74500fcd13ad8d2200cdf89f8e39e5cafa7d5") rawConfig := map[string]interface{}{ - "id": 1, - "endpoint": "ws://domain.com", - "name": "btc1", - "username": "username", - "password": "pass123", - "network": "testnet", + "id": 1, + "endpoint": "ws://domain.com", + "name": "btc1", + "username": "username", + "password": "pass123", + "network": "testnet", + "feeAddress": "mkHS9ne12qx9pS9VojpwU5xtRd4T7X7ZUt", "resources": []interface{}{ config.RawResource{ Address: "tb1qln69zuhdunc9stwfh6t7adexxrcr04ppy6thgm", + FeeAmount: "10000000", ResourceID: "0x0000000000000000000000000000000000000000000000000000000000000300", Script: "51206a698882348433b57d549d6344f74500fcd13ad8d2200cdf89f8e39e5cafa7d5", Tweak: "tweak", @@ -130,12 +133,14 @@ func (s *NewBtcConfigTestSuite) Test_ValidConfig() { BlockInterval: big.NewInt(5), BlockRetryInterval: time.Duration(5) * time.Second, Network: chaincfg.TestNet3Params, + FeeAddress: feeAddress, Resources: []config.Resource{ { Address: expectedAddress, ResourceID: expectedResource, Script: expectedScript, Tweak: "tweak", + FeeAmount: big.NewInt(10000000), }, }, }) diff --git a/chains/btc/executor/executor.go b/chains/btc/executor/executor.go index 0e2ff953..a1a75e02 100644 --- a/chains/btc/executor/executor.go +++ b/chains/btc/executor/executor.go @@ -4,7 +4,6 @@ import ( "context" "encoding/hex" "fmt" - "math/big" "sync" "time" @@ -31,8 +30,9 @@ import ( var ( signingTimeout = 30 * time.Minute - INPUT_SIZE = 180 - OUTPUT_SIZE = 34 + INPUT_SIZE uint64 = 180 + OUTPUT_SIZE uint64 = 34 + FEE_ROUNDING_FACTOR uint64 = 5 ) type MempoolAPI interface { @@ -93,30 +93,54 @@ func (e *Executor) Execute(proposals []*proposal.Proposal) error { e.exitLock.RLock() defer e.exitLock.RUnlock() - sessionID := proposals[0].MessageID - props, err := e.proposalsForExecution(proposals) + messageID := proposals[0].MessageID + props, err := e.proposalsForExecution(proposals, messageID) if err != nil { return err } if len(props) == 0 { return nil } - resource, ok := e.resources[props[0].Data.ResourceId] - if !ok { - return fmt.Errorf("no address for resource") + + propsPerResource := make(map[[32]byte][]*BtcTransferProposal) + for _, prop := range props { + propsPerResource[prop.Data.ResourceId] = append(propsPerResource[prop.Data.ResourceId], prop) } + p := pool.New().WithErrors() + for resourceID, props := range propsPerResource { + resourceID := resourceID + props := props + + p.Go(func() error { + resource, ok := e.resources[resourceID] + if !ok { + return fmt.Errorf("no resource for ID %s", hex.EncodeToString(resourceID[:])) + } + + return e.executeResourceProps(props, resource, messageID) + }) + } + return p.Wait() +} + +func (e *Executor) executeResourceProps(props []*BtcTransferProposal, resource config.Resource, messageID string) error { + log.Info().Str("messageID", messageID).Msgf("Executing proposals %+v for resource %s", props, hex.EncodeToString(resource.ResourceID[:])) + tx, utxos, err := e.rawTx(props, resource) if err != nil { return err } - sigChn := make(chan interface{}) + sigChn := make(chan interface{}, len(tx.TxIn)) p := pool.New().WithErrors() executionContext, cancelExecution := context.WithCancel(context.Background()) watchContext, cancelWatch := context.WithCancel(context.Background()) + sessionID := fmt.Sprintf("%s-%s", messageID, hex.EncodeToString(resource.ResourceID[:])) defer cancelWatch() - p.Go(func() error { return e.watchExecution(watchContext, cancelExecution, tx, props, sigChn, sessionID) }) + p.Go(func() error { + return e.watchExecution(watchContext, cancelExecution, tx, props, sigChn, sessionID, messageID) + }) prevOuts := make(map[wire.OutPoint]*wire.TxOut) for _, utxo := range utxos { txOut := wire.NewTxOut(int64(utxo.Value), resource.Script) @@ -129,33 +153,47 @@ func (e *Executor) Execute(proposals []*proposal.Proposal) error { prevOutputFetcher := txscript.NewMultiPrevOutFetcher(prevOuts) sigHashes := txscript.NewTxSigHashes(tx, prevOutputFetcher) + var buf buffer.Buffer + _ = tx.Serialize(&buf) + bytes := buf.Bytes() + log.Info().Str("messageID", messageID).Msgf("Assembled raw unsigned transaction %s", hex.EncodeToString(bytes)) + // we need to sign each input individually + tssProcesses := make([]tss.TssProcess, len(tx.TxIn)) for i := range tx.TxIn { - txHash, err := txscript.CalcTaprootSignatureHash(sigHashes, txscript.SigHashDefault, tx, i, prevOutputFetcher) + sessionID := fmt.Sprintf("%s-%d", sessionID, i) + signingHash, err := txscript.CalcTaprootSignatureHash(sigHashes, txscript.SigHashDefault, tx, i, prevOutputFetcher) if err != nil { return err } - p.Go(func() error { - msg := new(big.Int) - msg.SetBytes(txHash[:]) - signing, err := signing.NewSigning( - i, - msg, - resource.Tweak, - fmt.Sprintf("%s-%d", sessionID, i), - e.host, - e.comm, - e.fetcher) - if err != nil { - return err - } - return e.coordinator.Execute(executionContext, signing, sigChn) - }) + signing, err := signing.NewSigning( + i, + signingHash, + resource.Tweak, + messageID, + sessionID, + e.host, + e.comm, + e.fetcher) + if err != nil { + return err + } + tssProcesses[i] = signing } + p.Go(func() error { + return e.coordinator.Execute(executionContext, tssProcesses, sigChn) + }) return p.Wait() } -func (e *Executor) watchExecution(ctx context.Context, cancelExecution context.CancelFunc, tx *wire.MsgTx, proposals []*BtcTransferProposal, sigChn chan interface{}, sessionID string) error { +func (e *Executor) watchExecution( + ctx context.Context, + cancelExecution context.CancelFunc, + tx *wire.MsgTx, + proposals []*BtcTransferProposal, + sigChn chan interface{}, + sessionID string, + messageID string) error { timeout := time.NewTicker(signingTimeout) defer timeout.Stop() defer cancelExecution() @@ -175,14 +213,16 @@ func (e *Executor) watchExecution(ctx context.Context, cancelExecution context.C } cancelExecution() - hash, err := e.sendTx(tx, signatures) + hash, err := e.sendTx(tx, signatures, messageID) if err != nil { _ = e.comm.Broadcast(e.host.Peerstore().Peers(), []byte{}, comm.TssFailMsg, sessionID) e.storeProposalsStatus(proposals, store.FailedProp) return err } - log.Info().Str("SessionID", sessionID).Msgf("Sent proposals execution with hash: %s", hash) + e.storeProposalsStatus(proposals, store.ExecutedProp) + log.Info().Str("messageID", messageID).Msgf("Sent proposals execution with hash: %s", hash) + return nil } case <-timeout.C: { @@ -202,33 +242,37 @@ func (e *Executor) rawTx(proposals []*BtcTransferProposal, resource config.Resou if err != nil { return nil, nil, err } - inputAmount, utxos, err := e.inputs(tx, resource.Address, outputAmount) + feeEstimate, err := e.fee(uint64(len(proposals)), uint64(len(proposals))) + if err != nil { + return nil, nil, err + } + inputAmount, utxos, err := e.inputs(tx, resource.Address, outputAmount+feeEstimate) if err != nil { return nil, nil, err } if inputAmount < outputAmount { return nil, nil, fmt.Errorf("utxo input amount %d less than output amount %d", inputAmount, outputAmount) } - fee, err := e.fee(int64(len(utxos)), int64(len(proposals))+1) + fee, err := e.fee(uint64(len(utxos)), uint64(len(proposals))+1) if err != nil { return nil, nil, err } - returnAmount := int64(inputAmount) - fee - outputAmount + returnAmount := inputAmount - fee - outputAmount if returnAmount > 0 { // return extra funds returnScript, err := txscript.PayToAddrScript(resource.Address) if err != nil { return nil, nil, err } - txOut := wire.NewTxOut(returnAmount, returnScript) + txOut := wire.NewTxOut(int64(returnAmount), returnScript) tx.AddTxOut(txOut) } return tx, utxos, err } -func (e *Executor) outputs(tx *wire.MsgTx, proposals []*BtcTransferProposal) (int64, error) { - outputAmount := int64(0) +func (e *Executor) outputs(tx *wire.MsgTx, proposals []*BtcTransferProposal) (uint64, error) { + outputAmount := uint64(0) for _, prop := range proposals { addr, err := btcutil.DecodeAddress(prop.Data.Recipient, &e.chainCfg) if err != nil { @@ -238,16 +282,16 @@ func (e *Executor) outputs(tx *wire.MsgTx, proposals []*BtcTransferProposal) (in if err != nil { return 0, err } - txOut := wire.NewTxOut(prop.Data.Amount, destinationAddrByte) + txOut := wire.NewTxOut(int64(prop.Data.Amount), destinationAddrByte) tx.AddTxOut(txOut) outputAmount += prop.Data.Amount } return outputAmount, nil } -func (e *Executor) inputs(tx *wire.MsgTx, address btcutil.Address, outputAmount int64) (int64, []mempool.Utxo, error) { +func (e *Executor) inputs(tx *wire.MsgTx, address btcutil.Address, outputAmount uint64) (uint64, []mempool.Utxo, error) { usedUtxos := make([]mempool.Utxo, 0) - inputAmount := int64(0) + inputAmount := uint64(0) utxos, err := e.mempool.Utxos(address.String()) if err != nil { return 0, nil, err @@ -262,7 +306,7 @@ func (e *Executor) inputs(tx *wire.MsgTx, address btcutil.Address, outputAmount tx.AddTxIn(txIn) usedUtxos = append(usedUtxos, utxo) - inputAmount += int64(utxo.Value) + inputAmount += uint64(utxo.Value) if inputAmount > outputAmount { break } @@ -270,15 +314,16 @@ func (e *Executor) inputs(tx *wire.MsgTx, address btcutil.Address, outputAmount return inputAmount, usedUtxos, nil } -func (e *Executor) fee(numOfInputs, numOfOutputs int64) (int64, error) { +func (e *Executor) fee(numOfInputs, numOfOutputs uint64) (uint64, error) { recommendedFee, err := e.mempool.RecommendedFee() if err != nil { return 0, err } - return (numOfInputs*int64(INPUT_SIZE) + numOfOutputs*int64(OUTPUT_SIZE)) * recommendedFee.EconomyFee, nil + + return (numOfInputs*INPUT_SIZE + numOfOutputs*OUTPUT_SIZE) * ((recommendedFee.EconomyFee/FEE_ROUNDING_FACTOR)*FEE_ROUNDING_FACTOR + FEE_ROUNDING_FACTOR), nil } -func (e *Executor) sendTx(tx *wire.MsgTx, signatures []taproot.Signature) (*chainhash.Hash, error) { +func (e *Executor) sendTx(tx *wire.MsgTx, signatures []taproot.Signature, messageID string) (*chainhash.Hash, error) { for i, sig := range signatures { tx.TxIn[i].Witness = wire.TxWitness{sig} } @@ -289,7 +334,7 @@ func (e *Executor) sendTx(tx *wire.MsgTx, signatures []taproot.Signature) (*chai return nil, err } bytes := buf.Bytes() - log.Debug().Msgf("Assembled raw transaction %s", hex.EncodeToString(bytes)) + log.Debug().Str("messageID", messageID).Msgf("Assembled raw transaction %s", hex.EncodeToString(bytes)) return e.conn.SendRawTransaction(tx, true) } @@ -303,7 +348,7 @@ func (e *Executor) signaturesFilled(signatures []taproot.Signature) bool { return true } -func (e *Executor) proposalsForExecution(proposals []*proposal.Proposal) ([]*BtcTransferProposal, error) { +func (e *Executor) proposalsForExecution(proposals []*proposal.Proposal, messageID string) ([]*BtcTransferProposal, error) { e.propMutex.Lock() props := make([]*BtcTransferProposal, 0) for _, prop := range proposals { @@ -313,7 +358,7 @@ func (e *Executor) proposalsForExecution(proposals []*proposal.Proposal) ([]*Btc } if executed { - log.Info().Msgf("Proposal %s already executed", fmt.Sprintf("%d-%d-%d", prop.Source, prop.Destination, prop.Data.(BtcTransferProposalData).DepositNonce)) + log.Warn().Str("messageID", messageID).Msgf("Proposal %s already executed", fmt.Sprintf("%d-%d-%d", prop.Source, prop.Destination, prop.Data.(BtcTransferProposalData).DepositNonce)) continue } @@ -344,6 +389,7 @@ func (e *Executor) isExecuted(prop *proposal.Proposal) (bool, error) { } func (e *Executor) storeProposalsStatus(props []*BtcTransferProposal, status store.PropStatus) { + e.propMutex.Lock() for _, prop := range props { err := e.propStorer.StorePropStatus( prop.Source, @@ -354,4 +400,5 @@ func (e *Executor) storeProposalsStatus(props []*BtcTransferProposal, status sto log.Err(err).Msgf("Failed storing proposal %+v status %s", prop, status) } } + e.propMutex.Unlock() } diff --git a/chains/btc/executor/message-handler.go b/chains/btc/executor/message-handler.go index 87620878..1c37813c 100644 --- a/chains/btc/executor/message-handler.go +++ b/chains/btc/executor/message-handler.go @@ -14,7 +14,7 @@ import ( ) type BtcTransferProposalData struct { - Amount int64 + Amount uint64 Recipient string DepositNonce uint64 ResourceId [32]byte @@ -64,7 +64,7 @@ func ERC20MessageHandler(msg *transfer.TransferMessage) (*proposal.Proposal, err bigAmount.Div(bigAmount, divisor) return proposal.NewProposal(msg.Source, msg.Destination, BtcTransferProposalData{ - Amount: bigAmount.Int64(), + Amount: bigAmount.Uint64(), Recipient: string(recipient), DepositNonce: msg.Data.DepositNonce, ResourceId: msg.Data.ResourceId, diff --git a/chains/btc/listener/deposit-handler.go b/chains/btc/listener/deposit-handler.go index 278ceacf..df36e252 100644 --- a/chains/btc/listener/deposit-handler.go +++ b/chains/btc/listener/deposit-handler.go @@ -22,7 +22,8 @@ func NewBtcDepositHandler() *BtcDepositHandler { return &BtcDepositHandler{} } -func (e *BtcDepositHandler) HandleDeposit(sourceID uint8, +func (e *BtcDepositHandler) HandleDeposit( + sourceID uint8, depositNonce uint64, resourceID [32]byte, amount *big.Int, diff --git a/chains/btc/listener/event-handlers.go b/chains/btc/listener/event-handlers.go index 925c0ebc..08745d66 100644 --- a/chains/btc/listener/event-handlers.go +++ b/chains/btc/listener/event-handlers.go @@ -4,18 +4,20 @@ package listener import ( + "crypto/sha256" + "encoding/binary" "math/big" - "strconv" "github.com/ChainSafe/sygma-relayer/chains/btc/config" "github.com/btcsuite/btcd/btcjson" + "github.com/btcsuite/btcd/btcutil" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/sygmaprotocol/sygma-core/relayer/message" ) type Deposit struct { - // ResourceID used to find address of handler to be used for deposit + // ID of the resource that is transfered ResourceID [32]byte // Address of sender (msg.sender: user) SenderAddress string @@ -38,16 +40,18 @@ type DepositHandler interface { type FungibleTransferEventHandler struct { depositHandler DepositHandler domainID uint8 + feeAddress btcutil.Address log zerolog.Logger conn Connection msgChan chan []*message.Message resource config.Resource } -func NewFungibleTransferEventHandler(logC zerolog.Context, domainID uint8, depositHandler DepositHandler, msgChan chan []*message.Message, conn Connection, resource config.Resource) *FungibleTransferEventHandler { +func NewFungibleTransferEventHandler(logC zerolog.Context, domainID uint8, depositHandler DepositHandler, msgChan chan []*message.Message, conn Connection, resource config.Resource, feeAddress btcutil.Address) *FungibleTransferEventHandler { return &FungibleTransferEventHandler{ depositHandler: depositHandler, domainID: domainID, + feeAddress: feeAddress, log: logC.Logger(), conn: conn, msgChan: msgChan, @@ -62,7 +66,7 @@ func (eh *FungibleTransferEventHandler) HandleEvents(blockNumber *big.Int) error eh.log.Error().Err(err).Msg("Error fetching events") return err } - for evtNumber, evt := range evts { + for _, evt := range evts { err := func(evt btcjson.TxRawResult) error { defer func() { if r := recover(); r != nil { @@ -70,7 +74,7 @@ func (eh *FungibleTransferEventHandler) HandleEvents(blockNumber *big.Int) error } }() - d, isDeposit, err := DecodeDepositEvent(evt, eh.resource) + d, isDeposit, err := DecodeDepositEvent(evt, eh.resource, eh.feeAddress) if err != nil { return err } @@ -78,7 +82,7 @@ func (eh *FungibleTransferEventHandler) HandleEvents(blockNumber *big.Int) error if !isDeposit { return nil } - nonce, err := eh.CalculateNonce(blockNumber, evtNumber) + nonce, err := eh.CalculateNonce(blockNumber, evt.Hash) if err != nil { return err } @@ -119,23 +123,23 @@ func (eh *FungibleTransferEventHandler) FetchEvents(startBlock *big.Int) ([]btcj return block.Tx, nil } -func (eh *FungibleTransferEventHandler) CalculateNonce(blockNumber *big.Int, evtNumber int) (uint64, error) { +func (eh *FungibleTransferEventHandler) CalculateNonce(blockNumber *big.Int, transactionHash string) (uint64, error) { // Convert blockNumber to string blockNumberStr := blockNumber.String() - // Convert evtNumber to *big.Int - evtNumberBigInt := big.NewInt(int64(evtNumber)) + // Concatenate blockNumberStr and transactionHash with a separator + concatenatedStr := blockNumberStr + "-" + transactionHash - // Convert evtNumberBigInt to string - evtNumberStr := evtNumberBigInt.String() + // Calculate SHA-256 hash of the concatenated string + hash := sha256.New() + hash.Write([]byte(concatenatedStr)) + hashBytes := hash.Sum(nil) - // Concatenate blockNumberStr and evtNumberStr - concatenatedStr := blockNumberStr + evtNumberStr - - // Parse the concatenated string to uint64 - result, err := strconv.ParseUint(concatenatedStr, 10, 64) - if err != nil { - return 0, err + // XOR fold the hash to get a 64-bit value + var result uint64 + for i := 0; i < 4; i++ { + part := binary.BigEndian.Uint64(hashBytes[i*8 : (i+1)*8]) + result ^= part } return result, nil diff --git a/chains/btc/listener/event-handlers_test.go b/chains/btc/listener/event-handlers_test.go index e408411e..61f1ab20 100644 --- a/chains/btc/listener/event-handlers_test.go +++ b/chains/btc/listener/event-handlers_test.go @@ -34,6 +34,7 @@ type DepositHandlerTestSuite struct { resource config.Resource msgChan chan []*message.Message mockConn *mock_listener.MockConnection + feeAddress btcutil.Address } func TestRunDepositHandlerTestSuite(t *testing.T) { @@ -43,12 +44,14 @@ func TestRunDepositHandlerTestSuite(t *testing.T) { func (s *DepositHandlerTestSuite) SetupTest() { ctrl := gomock.NewController(s.T()) s.domainID = 1 - address, _ := btcutil.DecodeAddress("tb1qln69zuhdunc9stwfh6t7adexxrcr04ppy6thgm", &chaincfg.TestNet3Params) - s.resource = config.Resource{Address: address, ResourceID: [32]byte{}} + address, _ := btcutil.DecodeAddress("tb1pdf5c3q35ssem2l25n435fa69qr7dzwkc6gsqehuflr3euh905l2slafjvv", &chaincfg.TestNet3Params) + s.feeAddress, _ = btcutil.DecodeAddress("tb1qln69zuhdunc9stwfh6t7adexxrcr04ppy6thgm", &chaincfg.TestNet3Params) + + s.resource = config.Resource{Address: address, ResourceID: [32]byte{}, FeeAmount: big.NewInt(10000)} s.mockDepositHandler = mock_listener.NewMockDepositHandler(ctrl) s.msgChan = make(chan []*message.Message, 2) s.mockConn = mock_listener.NewMockConnection(ctrl) - s.fungibleTransferEventHandler = listener.NewFungibleTransferEventHandler(zerolog.Context{}, s.domainID, s.mockDepositHandler, s.msgChan, s.mockConn, s.resource) + s.fungibleTransferEventHandler = listener.NewFungibleTransferEventHandler(zerolog.Context{}, s.domainID, s.mockDepositHandler, s.msgChan, s.mockConn, s.resource, s.feeAddress) } func (s *DepositHandlerTestSuite) Test_FetchDepositFails_GetBlockHashError() { @@ -67,26 +70,18 @@ func (s *DepositHandlerTestSuite) Test_FetchDepositFails_GetBlockVerboseTxError( s.NotNil(err) } -func (s *DepositHandlerTestSuite) Test_CalculateNonceFail_BlockNumberOverflow() { - - blockNumber := new(big.Int) - blockNumber.SetString("18446744073709551616", 10) - nonce, err := s.fungibleTransferEventHandler.CalculateNonce(blockNumber, 5) - s.Equal(nonce, uint64(0)) - s.NotNil(err) -} - func (s *DepositHandlerTestSuite) Test_CalculateNonce() { - blockNumber := big.NewInt(123) - nonce, err := s.fungibleTransferEventHandler.CalculateNonce(blockNumber, 4) - s.Equal(nonce, uint64(1234)) + blockNumber := big.NewInt(850000) + nonce, err := s.fungibleTransferEventHandler.CalculateNonce(blockNumber, "a3f1e4d8b3c5e2a1f6d3c7e4b8a9f3e2c1d4a6b7c8e3f1d2c4b5a6e7") + fmt.Println(nonce) + s.Equal(nonce, uint64(12849897320021645821)) s.Nil(err) } func (s *DepositHandlerTestSuite) Test_HandleDepositFails_ExecutionContinue() { blockNumber := big.NewInt(100) data2 := map[string]any{ - "deposit_nonce": uint64(1001), + "deposit_nonce": uint64(8228687738678474667), "resource_id": [32]byte{0}, "amount": big.NewInt(19000), "deposit_data": "0xe9f23A8289764280697a03aC06795eA92a170e42_1", @@ -129,10 +124,17 @@ func (s *DepositHandlerTestSuite) Test_HandleDepositFails_ExecutionContinue() { { ScriptPubKey: btcjson.ScriptPubKeyResult{ Type: "witness_v1_taproot", - Address: "tb1qln69zuhdunc9stwfh6t7adexxrcr04ppy6thgm", + Address: "tb1pdf5c3q35ssem2l25n435fa69qr7dzwkc6gsqehuflr3euh905l2slafjvv", }, Value: float64(0.00019), }, + { + ScriptPubKey: btcjson.ScriptPubKeyResult{ + Type: "witness_v1_taproot", + Address: "tb1qln69zuhdunc9stwfh6t7adexxrcr04ppy6thgm", + }, + Value: float64(0.0002), + }, }, } diff --git a/chains/btc/listener/util.go b/chains/btc/listener/util.go index ab4012f8..d41a5146 100644 --- a/chains/btc/listener/util.go +++ b/chains/btc/listener/util.go @@ -6,6 +6,7 @@ import ( "github.com/ChainSafe/sygma-relayer/chains/btc/config" "github.com/btcsuite/btcd/btcjson" + "github.com/btcsuite/btcd/btcutil" ) const ( @@ -13,8 +14,10 @@ const ( OP_RETURN = "nulldata" ) -func DecodeDepositEvent(evt btcjson.TxRawResult, resource config.Resource) (Deposit, bool, error) { +func DecodeDepositEvent(evt btcjson.TxRawResult, resource config.Resource, feeAddress btcutil.Address) (Deposit, bool, error) { amount := big.NewInt(0) + feeAmount := big.NewInt(0) + isBridgeDeposit := false sender := "" data := "" @@ -37,9 +40,13 @@ func DecodeDepositEvent(evt btcjson.TxRawResult, resource config.Resource) (Depo amount.Add(amount, big.NewInt(int64(vout.Value*1e8))) } } + + if feeAddress.String() == vout.ScriptPubKey.Address { + feeAmount.Add(feeAmount, big.NewInt(int64(vout.Value*1e8))) + } } - if !isBridgeDeposit { + if !isBridgeDeposit || (feeAmount.Cmp(resource.FeeAmount) == -1) { return Deposit{}, false, nil } diff --git a/chains/btc/listener/util_test.go b/chains/btc/listener/util_test.go index 25aba540..4dd12ed0 100644 --- a/chains/btc/listener/util_test.go +++ b/chains/btc/listener/util_test.go @@ -16,8 +16,9 @@ import ( type DecodeEventsSuite struct { suite.Suite - mockConn *mock_listener.MockConnection - resource config.Resource + mockConn *mock_listener.MockConnection + resource config.Resource + feeAddress btcutil.Address } func TestRunDecodeDepositEventsSuite(t *testing.T) { @@ -27,7 +28,8 @@ func TestRunDecodeDepositEventsSuite(t *testing.T) { func (s *DecodeEventsSuite) SetupTest() { ctrl := gomock.NewController(s.T()) address, _ := btcutil.DecodeAddress("tb1qln69zuhdunc9stwfh6t7adexxrcr04ppy6thgm", &chaincfg.TestNet3Params) - s.resource = config.Resource{Address: address, ResourceID: [32]byte{}} + s.resource = config.Resource{Address: address, ResourceID: [32]byte{}, FeeAmount: big.NewInt(100000000)} + s.feeAddress, _ = btcutil.DecodeAddress("tb1pdf5c3q35ssem2l25n435fa69qr7dzwkc6gsqehuflr3euh905l2slafjvv", &chaincfg.TestNet3Params) s.mockConn = mock_listener.NewMockConnection(ctrl) } @@ -56,7 +58,7 @@ func (s *DecodeEventsSuite) Test_DecodeDepositEvent_ErrorDecodingOPRETURNData() }, } - deposit, isDeposit, err := listener.DecodeDepositEvent(d1, s.resource) + deposit, isDeposit, err := listener.DecodeDepositEvent(d1, s.resource, s.feeAddress) s.Equal(isDeposit, true) s.NotNil(err) s.Equal(deposit, listener.Deposit{}) @@ -84,9 +86,16 @@ func (s *DecodeEventsSuite) Test_DecodeDepositEvent() { }, Value: float64(0.00019), }, + { + ScriptPubKey: btcjson.ScriptPubKeyResult{ + Type: "witness_v1_taproot", + Address: "tb1pdf5c3q35ssem2l25n435fa69qr7dzwkc6gsqehuflr3euh905l2slafjvv", + }, + Value: float64(1), + }, }, } - deposit, isDeposit, err := listener.DecodeDepositEvent(d1, s.resource) + deposit, isDeposit, err := listener.DecodeDepositEvent(d1, s.resource, s.feeAddress) s.Equal(isDeposit, true) s.Nil(err) s.Equal(deposit, listener.Deposit{ @@ -96,6 +105,71 @@ func (s *DecodeEventsSuite) Test_DecodeDepositEvent() { }) } +func (s *DecodeEventsSuite) Test_DecodeDepositEvent_FeeNotSent() { + d1 := btcjson.TxRawResult{ + Vin: []btcjson.Vin{ + { + Txid: "00000000000000000008bba5a6ff31fdb9bb1d4147905b5b3c47a07a07235bfc", + }, + }, + Vout: []btcjson.Vout{ + { + ScriptPubKey: btcjson.ScriptPubKeyResult{ + Type: "nulldata", + Hex: "6a2c3078653966323341383238393736343238303639376130336143303637393565413932613137306534325f31", + }, + }, + { + ScriptPubKey: btcjson.ScriptPubKeyResult{ + Type: "witness_v1_taproot", + Address: "tb1qln69zuhdunc9stwfh6t7adexxrcr04ppy6thgm", + }, + Value: float64(0.00019), + }, + }, + } + deposit, isDeposit, err := listener.DecodeDepositEvent(d1, s.resource, s.feeAddress) + s.Equal(isDeposit, false) + s.Nil(err) + s.Equal(deposit, listener.Deposit{}) +} + +func (s *DecodeEventsSuite) Test_DecodeDepositEvent_NotEnoughFeeSent() { + d1 := btcjson.TxRawResult{ + Vin: []btcjson.Vin{ + { + Txid: "00000000000000000008bba5a6ff31fdb9bb1d4147905b5b3c47a07a07235bfc", + }, + }, + Vout: []btcjson.Vout{ + { + ScriptPubKey: btcjson.ScriptPubKeyResult{ + Type: "nulldata", + Hex: "6a2c3078653966323341383238393736343238303639376130336143303637393565413932613137306534325f31", + }, + }, + { + ScriptPubKey: btcjson.ScriptPubKeyResult{ + Type: "witness_v1_taproot", + Address: "tb1qln69zuhdunc9stwfh6t7adexxrcr04ppy6thgm", + }, + Value: float64(0.00019), + }, + { + ScriptPubKey: btcjson.ScriptPubKeyResult{ + Type: "witness_v1_taproot", + Address: "tb1pdf5c3q35ssem2l25n435fa69qr7dzwkc6gsqehuflr3euh905l2slafjvv", + }, + Value: float64(0.9), + }, + }, + } + deposit, isDeposit, err := listener.DecodeDepositEvent(d1, s.resource, s.feeAddress) + s.Equal(isDeposit, false) + s.Nil(err) + s.Equal(deposit, listener.Deposit{}) +} + func (s *DecodeEventsSuite) Test_DecodeDepositEvent_NotBridgeDepositTx() { d1 := btcjson.TxRawResult{ Vin: []btcjson.Vin{ @@ -118,9 +192,16 @@ func (s *DecodeEventsSuite) Test_DecodeDepositEvent_NotBridgeDepositTx() { }, Value: float64(0.00019), }, + { + ScriptPubKey: btcjson.ScriptPubKeyResult{ + Type: "witness_v1_taproot", + Address: "tb1pdf5c3q35ssem2l25n435fa69qr7dzwkc6gsqehuflr3euh905l2slafjvv", + }, + Value: float64(1), + }, }, } - deposit, isDeposit, err := listener.DecodeDepositEvent(d1, s.resource) + deposit, isDeposit, err := listener.DecodeDepositEvent(d1, s.resource, s.feeAddress) s.Equal(isDeposit, false) s.Nil(err) s.Equal(deposit, listener.Deposit{}) diff --git a/chains/btc/mempool/mempool.go b/chains/btc/mempool/mempool.go index 6692e171..814bfeb0 100644 --- a/chains/btc/mempool/mempool.go +++ b/chains/btc/mempool/mempool.go @@ -8,18 +8,26 @@ import ( "sort" ) +type Status struct { + Confirmed bool + BlockHeight uint64 `json:"block_height"` + BlockHash string `json:"block_hash"` + BlockTime uint64 `json:"block_time"` +} + type Utxo struct { - TxID string `json:"txid"` - Vout uint32 `json:"vout"` - Value uint64 `json:"value"` + TxID string `json:"txid"` + Vout uint32 `json:"vout"` + Value uint64 `json:"value"` + Status Status `json:"status"` } type Fee struct { - FastestFee int64 - HalfHourFee int64 - MinimumFee int64 - EconomyFee int64 - HourFee int64 + FastestFee uint64 + HalfHourFee uint64 + MinimumFee uint64 + EconomyFee uint64 + HourFee uint64 } type MempoolAPI struct { @@ -69,7 +77,11 @@ func (a *MempoolAPI) Utxos(address string) ([]Utxo, error) { return nil, err } sort.Slice(utxos, func(i int, j int) bool { - return utxos[i].TxID < utxos[j].TxID + if utxos[i].Status.BlockTime == utxos[j].Status.BlockTime { + return utxos[i].TxID < utxos[j].TxID + } else { + return utxos[i].Status.BlockTime < utxos[j].Status.BlockTime + } }) return utxos, nil diff --git a/chains/btc/mempool/mempool_test.go b/chains/btc/mempool/mempool_test.go index b6aecc4c..20c068b1 100644 --- a/chains/btc/mempool/mempool_test.go +++ b/chains/btc/mempool/mempool_test.go @@ -52,12 +52,35 @@ func (s *MempoolTestSuite) Test_Utxo_SuccessfulFetch() { { TxID: "28154e2008912d27978225c096c22ffe2ea65e1d55bf440ee41c21f9489c7fe1", Vout: 0, - Value: 11198, + Value: 11197, + Status: mempool.Status{ + Confirmed: true, + BlockHeight: 2812826, + BlockHash: "000000000000001a01d4058773384f2c23aed5a7e5ede252f99e290fa58324a3", + BlockTime: 1715083122, + }, }, { - TxID: "28154e2008912d27978225c096c22ffe2ea65e1d55bf440ee41c21f9489c7fe9", + TxID: "28154e2008912d27978225c096c22ffe2ea65e1d55bf440ee41c21f9489c7fe2", Vout: 0, Value: 11197, + Status: mempool.Status{ + Confirmed: true, + BlockHeight: 2812826, + BlockHash: "000000000000001a01d4058773384f2c23aed5a7e5ede252f99e290fa58324a3", + BlockTime: 1715083122, + }, + }, + { + TxID: "28154e2008912d27978225c096c22ffe2ea65e1d55bf440ee41c21f9489c7fe9", + Vout: 0, + Value: 11198, + Status: mempool.Status{ + Confirmed: true, + BlockHeight: 2812827, + BlockHash: "000000000000001a01d4058773384f2c23aed5a7e5ede252f99e290fa58324a5", + BlockTime: 1715083123, + }, }, }) } diff --git a/chains/btc/mempool/test-data/successful-utxo.json b/chains/btc/mempool/test-data/successful-utxo.json index 4ae65107..d47aceb3 100644 --- a/chains/btc/mempool/test-data/successful-utxo.json +++ b/chains/btc/mempool/test-data/successful-utxo.json @@ -1,4 +1,5 @@ [ - {"txid":"28154e2008912d27978225c096c22ffe2ea65e1d55bf440ee41c21f9489c7fe9","vout":0,"status":{"confirmed":true,"block_height":2812826,"block_hash":"000000000000001a01d4058773384f2c23aed5a7e5ede252f99e290fa58324a3","block_time":1715083122},"value":11197}, - {"txid":"28154e2008912d27978225c096c22ffe2ea65e1d55bf440ee41c21f9489c7fe1","vout":0,"status":{"confirmed":true,"block_height":2812826,"block_hash":"000000000000001a01d4058773384f2c23aed5a7e5ede252f99e290fa58324a3","block_time":1715083122},"value":11198} + {"txid":"28154e2008912d27978225c096c22ffe2ea65e1d55bf440ee41c21f9489c7fe9","vout":0,"status":{"confirmed":true,"block_height":2812827,"block_hash":"000000000000001a01d4058773384f2c23aed5a7e5ede252f99e290fa58324a5","block_time":1715083123},"value":11198}, + {"txid":"28154e2008912d27978225c096c22ffe2ea65e1d55bf440ee41c21f9489c7fe2","vout":0,"status":{"confirmed":true,"block_height":2812826,"block_hash":"000000000000001a01d4058773384f2c23aed5a7e5ede252f99e290fa58324a3","block_time":1715083122},"value":11197}, + {"txid":"28154e2008912d27978225c096c22ffe2ea65e1d55bf440ee41c21f9489c7fe1","vout":0,"status":{"confirmed":true,"block_height":2812826,"block_hash":"000000000000001a01d4058773384f2c23aed5a7e5ede252f99e290fa58324a3","block_time":1715083122},"value":11197} ] \ No newline at end of file diff --git a/chains/evm/calls/contracts/bridge/bridge.go b/chains/evm/calls/contracts/bridge/bridge.go index bc1f6804..47eab036 100644 --- a/chains/evm/calls/contracts/bridge/bridge.go +++ b/chains/evm/calls/contracts/bridge/bridge.go @@ -106,7 +106,7 @@ func (c *BridgeContract) IsProposalExecuted(p *transfer.TransferProposal) (bool, Str("depositNonce", strconv.FormatUint(p.Data.DepositNonce, 10)). Str("resourceID", hexutil.Encode(p.Data.ResourceId[:])). Msg("Getting is proposal executed") - res, err := c.CallContract("isProposalExecuted", p.Source, big.NewInt(int64(p.Data.DepositNonce))) + res, err := c.CallContract("isProposalExecuted", p.Source, new(big.Int).SetUint64(p.Data.DepositNonce)) if err != nil { return false, err } diff --git a/chains/evm/config.go b/chains/evm/config.go index 25c5e968..666e7129 100644 --- a/chains/evm/config.go +++ b/chains/evm/config.go @@ -81,7 +81,7 @@ func (c *RawEVMConfig) Validate() error { if c.Bridge == "" { return fmt.Errorf("required field chain.Bridge empty for chain %v", *c.Id) } - if c.BlockConfirmations != 0 && c.BlockConfirmations < 1 { + if c.BlockConfirmations < 1 { return fmt.Errorf("blockConfirmations has to be >=1") } return nil diff --git a/chains/evm/executor/executor.go b/chains/evm/executor/executor.go index 348ada6a..6b283454 100644 --- a/chains/evm/executor/executor.go +++ b/chains/evm/executor/executor.go @@ -87,6 +87,7 @@ func (e *Executor) Execute(proposals []*proposal.Proposal) error { if len(batch.proposals) == 0 { continue } + messageID := batch.proposals[0].MessageID b := batch p.Go(func() error { @@ -95,13 +96,14 @@ func (e *Executor) Execute(proposals []*proposal.Proposal) error { return err } - sessionID := fmt.Sprintf("%s-%d", batch.proposals[0].MessageID, i) + sessionID := fmt.Sprintf("%s-%d", messageID, i) log.Info().Str("messageID", batch.proposals[0].MessageID).Msgf("Starting session with ID: %s", sessionID) msg := big.NewInt(0) msg.SetBytes(propHash) signing, err := signing.NewSigning( msg, + messageID, sessionID, e.host, e.comm, @@ -115,21 +117,27 @@ func (e *Executor) Execute(proposals []*proposal.Proposal) error { watchContext, cancelWatch := context.WithCancel(context.Background()) ep := pool.New().WithErrors() ep.Go(func() error { - err := e.coordinator.Execute(executionContext, signing, sigChn) + err := e.coordinator.Execute(executionContext, []tss.TssProcess{signing}, sigChn) if err != nil { cancelWatch() } return err }) - ep.Go(func() error { return e.watchExecution(watchContext, cancelExecution, b, sigChn, sessionID) }) + ep.Go(func() error { return e.watchExecution(watchContext, cancelExecution, b, sigChn, sessionID, messageID) }) return ep.Wait() }) } return p.Wait() } -func (e *Executor) watchExecution(ctx context.Context, cancelExecution context.CancelFunc, batch *Batch, sigChn chan interface{}, sessionID string) error { +func (e *Executor) watchExecution( + ctx context.Context, + cancelExecution context.CancelFunc, + batch *Batch, + sigChn chan interface{}, + sessionID string, + messageID string) error { ticker := time.NewTicker(executionCheckPeriod) timeout := time.NewTicker(signingTimeout) defer ticker.Stop() @@ -152,7 +160,7 @@ func (e *Executor) watchExecution(ctx context.Context, cancelExecution context.C return err } - log.Info().Str("messageID", sessionID).Msgf("Sent proposals execution with hash: %s", hash) + log.Info().Str("messageID", messageID).Msgf("Sent proposals execution with hash: %s", hash) } case <-ticker.C: { @@ -160,7 +168,7 @@ func (e *Executor) watchExecution(ctx context.Context, cancelExecution context.C continue } - log.Info().Str("messageID", sessionID).Msgf("Successfully executed proposals") + log.Info().Str("messageID", messageID).Msgf("Successfully executed proposals") return nil } case <-timeout.C: diff --git a/chains/evm/listener/eventHandlers/event-handler.go b/chains/evm/listener/eventHandlers/event-handler.go index 0c4ba628..e1ee2e92 100644 --- a/chains/evm/listener/eventHandlers/event-handler.go +++ b/chains/evm/listener/eventHandlers/event-handler.go @@ -13,6 +13,8 @@ import ( "github.com/rs/zerolog/log" "github.com/ChainSafe/sygma-relayer/chains/evm/calls/consts" + "github.com/ChainSafe/sygma-relayer/relayer/transfer" + "github.com/ChainSafe/sygma-relayer/store" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/sygmaprotocol/sygma-core/relayer/message" @@ -24,6 +26,7 @@ import ( "github.com/ChainSafe/sygma-relayer/tss/ecdsa/keygen" "github.com/ChainSafe/sygma-relayer/tss/ecdsa/resharing" frostKeygen "github.com/ChainSafe/sygma-relayer/tss/frost/keygen" + frostResharing "github.com/ChainSafe/sygma-relayer/tss/frost/resharing" "github.com/ethereum/go-ethereum/common" ethTypes "github.com/ethereum/go-ethereum/core/types" "github.com/libp2p/go-libp2p/core/host" @@ -38,10 +41,16 @@ type EventListener interface { FetchDeposits(ctx context.Context, address common.Address, startBlock *big.Int, endBlock *big.Int) ([]*events.Deposit, error) } +type PropStorer interface { + StorePropStatus(source, destination uint8, depositNonce uint64, status store.PropStatus) error + PropStatus(source, destination uint8, depositNonce uint64) (store.PropStatus, error) +} + type RetryEventHandler struct { log zerolog.Logger eventListener EventListener depositHandler DepositHandler + propStorer PropStorer bridgeAddress common.Address bridgeABI abi.ABI domainID uint8 @@ -53,6 +62,7 @@ func NewRetryEventHandler( logC zerolog.Context, eventListener EventListener, depositHandler DepositHandler, + propStorer PropStorer, bridgeAddress common.Address, domainID uint8, blockConfirmations *big.Int, @@ -63,6 +73,7 @@ func NewRetryEventHandler( log: logC.Logger(), eventListener: eventListener, depositHandler: depositHandler, + propStorer: propStorer, bridgeAddress: bridgeAddress, bridgeABI: bridgeABI, domainID: domainID, @@ -105,6 +116,15 @@ func (eh *RetryEventHandler) HandleEvents( eh.log.Err(err).Str("messageID", msg.ID).Msgf("Failed handling deposit %+v", d) continue } + isExecuted, err := eh.isExecuted(msg) + if err != nil { + eh.log.Err(err).Str("messageID", msg.ID).Msgf("Failed checking if deposit executed %+v", d) + continue + } + if isExecuted { + eh.log.Debug().Str("messageID", msg.ID).Msgf("Deposit marked as executed %+v", d) + continue + } eh.log.Info().Str("messageID", msg.ID).Msgf( "Resolved retry message %+v in block range: %s-%s", msg, startBlock.String(), endBlock.String(), @@ -121,6 +141,31 @@ func (eh *RetryEventHandler) HandleEvents( return nil } +func (eh *RetryEventHandler) isExecuted(msg *message.Message) (bool, error) { + var err error + propStatus, err := eh.propStorer.PropStatus( + msg.Source, + msg.Destination, + msg.Data.(transfer.TransferMessageData).DepositNonce) + if err != nil { + return true, err + } + + if propStatus == store.ExecutedProp { + return true, nil + } + + // change the status to failed if proposal is stuck to be able to retry it + if propStatus == store.PendingProp { + err = eh.propStorer.StorePropStatus( + msg.Source, + msg.Destination, + msg.Data.(transfer.TransferMessageData).DepositNonce, + store.FailedProp) + } + return false, err +} + type KeygenEventHandler struct { log zerolog.Logger eventListener EventListener @@ -179,7 +224,7 @@ func (eh *KeygenEventHandler) HandleEvents( keygenBlockNumber := big.NewInt(0).SetUint64(keygenEvents[0].BlockNumber) keygen := keygen.NewKeygen(eh.sessionID(keygenBlockNumber), eh.threshold, eh.host, eh.communication, eh.storer) - err = eh.coordinator.Execute(context.Background(), keygen, make(chan interface{}, 1)) + err = eh.coordinator.Execute(context.Background(), []tss.TssProcess{keygen}, make(chan interface{}, 1)) if err != nil { log.Err(err).Msgf("Failed executing keygen") } @@ -244,7 +289,7 @@ func (eh *FrostKeygenEventHandler) HandleEvents( keygenBlockNumber := big.NewInt(0).SetUint64(keygenEvents[0].BlockNumber) keygen := frostKeygen.NewKeygen(eh.sessionID(keygenBlockNumber), eh.threshold, eh.host, eh.communication, eh.storer) - err = eh.coordinator.Execute(context.Background(), keygen, make(chan interface{}, 1)) + err = eh.coordinator.Execute(context.Background(), []tss.TssProcess{keygen}, make(chan interface{}, 1)) if err != nil { log.Err(err).Msgf("Failed executing keygen") } @@ -265,7 +310,8 @@ type RefreshEventHandler struct { host host.Host communication comm.Communication connectionGate *p2p.ConnectionGate - storer resharing.SaveDataStorer + ecdsaStorer resharing.SaveDataStorer + frostStorer frostResharing.FrostKeyshareStorer } func NewRefreshEventHandler( @@ -277,7 +323,8 @@ func NewRefreshEventHandler( host host.Host, communication comm.Communication, connectionGate *p2p.ConnectionGate, - storer resharing.SaveDataStorer, + ecdsaStorer resharing.SaveDataStorer, + frostStorer frostResharing.FrostKeyshareStorer, bridgeAddress common.Address, ) *RefreshEventHandler { return &RefreshEventHandler{ @@ -288,7 +335,8 @@ func NewRefreshEventHandler( coordinator: coordinator, host: host, communication: communication, - storer: storer, + ecdsaStorer: ecdsaStorer, + frostStorer: frostStorer, connectionGate: connectionGate, bridgeAddress: bridgeAddress, } @@ -312,15 +360,18 @@ func (eh *RefreshEventHandler) HandleEvents( hash := refreshEvents[len(refreshEvents)-1].Hash if hash == "" { - return fmt.Errorf("hash cannot be empty string") + log.Error().Msgf("Hash cannot be empty string") + return nil } topology, err := eh.topologyProvider.NetworkTopology(hash) if err != nil { - return err + log.Error().Err(err).Msgf("Failed fetching network topology") + return nil } err = eh.topologyStore.StoreTopology(topology) if err != nil { - return err + log.Error().Err(err).Msgf("Failed storing network topology") + return nil } eh.connectionGate.SetTopology(topology) @@ -331,11 +382,20 @@ func (eh *RefreshEventHandler) HandleEvents( ) resharing := resharing.NewResharing( - eh.sessionID(startBlock), topology.Threshold, eh.host, eh.communication, eh.storer, + eh.sessionID(startBlock), topology.Threshold, eh.host, eh.communication, eh.ecdsaStorer, ) - err = eh.coordinator.Execute(context.Background(), resharing, make(chan interface{}, 1)) + err = eh.coordinator.Execute(context.Background(), []tss.TssProcess{resharing}, make(chan interface{}, 1)) if err != nil { - log.Err(err).Msgf("Failed executing key refresh") + log.Err(err).Msgf("Failed executing ecdsa key refresh") + return nil + } + frostResharing := frostResharing.NewResharing( + eh.sessionID(startBlock), topology.Threshold, eh.host, eh.communication, eh.frostStorer, + ) + err = eh.coordinator.Execute(context.Background(), []tss.TssProcess{frostResharing}, make(chan interface{}, 1)) + if err != nil { + log.Err(err).Msgf("Failed executing frost key refresh") + return nil } return nil } diff --git a/chains/evm/listener/eventHandlers/event-handler_test.go b/chains/evm/listener/eventHandlers/event-handler_test.go index ec064219..dd547c0b 100644 --- a/chains/evm/listener/eventHandlers/event-handler_test.go +++ b/chains/evm/listener/eventHandlers/event-handler_test.go @@ -18,6 +18,7 @@ import ( "github.com/ChainSafe/sygma-relayer/chains/evm/listener/eventHandlers" mock_listener "github.com/ChainSafe/sygma-relayer/chains/evm/listener/eventHandlers/mock" "github.com/ChainSafe/sygma-relayer/relayer/transfer" + "github.com/ChainSafe/sygma-relayer/store" "github.com/sygmaprotocol/sygma-core/relayer/message" ) @@ -25,6 +26,7 @@ type RetryEventHandlerTestSuite struct { suite.Suite retryEventHandler *eventHandlers.RetryEventHandler mockDepositHandler *mock_listener.MockDepositHandler + mockPropStorer *mock_listener.MockPropStorer mockEventListener *mock_listener.MockEventListener domainID uint8 msgChan chan []*message.Message @@ -39,8 +41,17 @@ func (s *RetryEventHandlerTestSuite) SetupTest() { s.domainID = 1 s.mockEventListener = mock_listener.NewMockEventListener(ctrl) s.mockDepositHandler = mock_listener.NewMockDepositHandler(ctrl) + s.mockPropStorer = mock_listener.NewMockPropStorer(ctrl) s.msgChan = make(chan []*message.Message, 1) - s.retryEventHandler = eventHandlers.NewRetryEventHandler(log.With(), s.mockEventListener, s.mockDepositHandler, common.Address{}, s.domainID, big.NewInt(5), s.msgChan) + s.retryEventHandler = eventHandlers.NewRetryEventHandler( + log.With(), + s.mockEventListener, + s.mockDepositHandler, + s.mockPropStorer, + common.Address{}, + s.domainID, + big.NewInt(5), + s.msgChan) } func (s *RetryEventHandlerTestSuite) Test_FetchDepositFails() { @@ -79,6 +90,7 @@ func (s *RetryEventHandlerTestSuite) Test_FetchDepositFails_ExecutionContinues() DepositNonce: 2, }, }, nil) + s.mockPropStorer.EXPECT().PropStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(store.MissingProp, nil) err := s.retryEventHandler.HandleEvents(big.NewInt(0), big.NewInt(5)) msgs := <-s.msgChan @@ -132,6 +144,7 @@ func (s *RetryEventHandlerTestSuite) Test_HandleDepositFails_ExecutionContinues( ).Return(&message.Message{Data: transfer.TransferMessageData{ DepositNonce: 2, }}, nil) + s.mockPropStorer.EXPECT().PropStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(store.MissingProp, nil) err := s.retryEventHandler.HandleEvents(big.NewInt(0), big.NewInt(5)) msgs := <-s.msgChan @@ -183,6 +196,7 @@ func (s *RetryEventHandlerTestSuite) Test_HandlingRetryPanics_ExecutionContinue( ).Return(&message.Message{Data: transfer.TransferMessageData{ DepositNonce: 2, }}, nil) + s.mockPropStorer.EXPECT().PropStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(store.MissingProp, nil) err := s.retryEventHandler.HandleEvents(big.NewInt(0), big.NewInt(5)) msgs := <-s.msgChan @@ -235,6 +249,7 @@ func (s *RetryEventHandlerTestSuite) Test_MultipleDeposits() { ).Return(&message.Message{Data: transfer.TransferMessageData{ DepositNonce: 2, }}, nil) + s.mockPropStorer.EXPECT().PropStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(store.MissingProp, nil).Times(2) err := s.retryEventHandler.HandleEvents(big.NewInt(0), big.NewInt(5)) msgs := <-s.msgChan @@ -247,6 +262,65 @@ func (s *RetryEventHandlerTestSuite) Test_MultipleDeposits() { }}}) } +func (s *RetryEventHandlerTestSuite) Test_MultipleDeposits_ExecutedIgnored() { + d1 := events.Deposit{ + DepositNonce: 1, + DestinationDomainID: 2, + ResourceID: [32]byte{}, + HandlerResponse: []byte{}, + Data: []byte{}, + } + d2 := events.Deposit{ + DepositNonce: 2, + DestinationDomainID: 2, + ResourceID: [32]byte{}, + HandlerResponse: []byte{}, + Data: []byte{}, + } + s.mockEventListener.EXPECT().FetchRetryEvents( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + ).Return([]events.RetryEvent{{TxHash: "event1"}}, nil) + s.mockEventListener.EXPECT().FetchRetryDepositEvents(events.RetryEvent{TxHash: "event1"}, gomock.Any(), big.NewInt(5)).Return([]events.Deposit{d1, d2}, nil) + msgID := fmt.Sprintf("retry-%d-%d-%d-%d", 1, 2, 0, 5) + s.mockDepositHandler.EXPECT().HandleDeposit( + s.domainID, + d1.DestinationDomainID, + d1.DepositNonce, + d1.ResourceID, + d1.Data, + d1.HandlerResponse, + msgID, + ).Return(&message.Message{Data: transfer.TransferMessageData{ + DepositNonce: 1, + }}, nil) + s.mockPropStorer.EXPECT().PropStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(store.ExecutedProp, nil) + s.mockDepositHandler.EXPECT().HandleDeposit( + s.domainID, + d2.DestinationDomainID, + d2.DepositNonce, + d2.ResourceID, + d2.Data, + d2.HandlerResponse, + msgID, + ).Return(&message.Message{Data: transfer.TransferMessageData{ + DepositNonce: 2, + }}, nil) + s.mockPropStorer.EXPECT().PropStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(store.PendingProp, nil) + s.mockPropStorer.EXPECT().StorePropStatus(gomock.Any(), gomock.Any(), gomock.Any(), store.FailedProp).Return(nil) + + err := s.retryEventHandler.HandleEvents(big.NewInt(0), big.NewInt(5)) + msgs := <-s.msgChan + + s.Nil(err) + s.Equal(msgs, []*message.Message{ + { + Data: transfer.TransferMessageData{ + DepositNonce: 2, + }, + }, + }) +} + type DepositHandlerTestSuite struct { suite.Suite depositEventHandler *eventHandlers.DepositEventHandler diff --git a/chains/evm/listener/eventHandlers/mock/listener.go b/chains/evm/listener/eventHandlers/mock/listener.go index 954575ad..1616326c 100644 --- a/chains/evm/listener/eventHandlers/mock/listener.go +++ b/chains/evm/listener/eventHandlers/mock/listener.go @@ -10,6 +10,7 @@ import ( reflect "reflect" events "github.com/ChainSafe/sygma-relayer/chains/evm/calls/events" + store "github.com/ChainSafe/sygma-relayer/store" common "github.com/ethereum/go-ethereum/common" types "github.com/ethereum/go-ethereum/core/types" gomock "github.com/golang/mock/gomock" @@ -129,6 +130,58 @@ func (mr *MockEventListenerMockRecorder) FetchRetryEvents(ctx, contractAddress, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchRetryEvents", reflect.TypeOf((*MockEventListener)(nil).FetchRetryEvents), ctx, contractAddress, startBlock, endBlock) } +// MockPropStorer is a mock of PropStorer interface. +type MockPropStorer struct { + ctrl *gomock.Controller + recorder *MockPropStorerMockRecorder +} + +// MockPropStorerMockRecorder is the mock recorder for MockPropStorer. +type MockPropStorerMockRecorder struct { + mock *MockPropStorer +} + +// NewMockPropStorer creates a new mock instance. +func NewMockPropStorer(ctrl *gomock.Controller) *MockPropStorer { + mock := &MockPropStorer{ctrl: ctrl} + mock.recorder = &MockPropStorerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPropStorer) EXPECT() *MockPropStorerMockRecorder { + return m.recorder +} + +// PropStatus mocks base method. +func (m *MockPropStorer) PropStatus(source, destination uint8, depositNonce uint64) (store.PropStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PropStatus", source, destination, depositNonce) + ret0, _ := ret[0].(store.PropStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PropStatus indicates an expected call of PropStatus. +func (mr *MockPropStorerMockRecorder) PropStatus(source, destination, depositNonce interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PropStatus", reflect.TypeOf((*MockPropStorer)(nil).PropStatus), source, destination, depositNonce) +} + +// StorePropStatus mocks base method. +func (m *MockPropStorer) StorePropStatus(source, destination uint8, depositNonce uint64, status store.PropStatus) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorePropStatus", source, destination, depositNonce, status) + ret0, _ := ret[0].(error) + return ret0 +} + +// StorePropStatus indicates an expected call of StorePropStatus. +func (mr *MockPropStorerMockRecorder) StorePropStatus(source, destination, depositNonce, status interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorePropStatus", reflect.TypeOf((*MockPropStorer)(nil).StorePropStatus), source, destination, depositNonce, status) +} + // MockDepositHandler is a mock of DepositHandler interface. type MockDepositHandler struct { ctrl *gomock.Controller diff --git a/chains/proposal.go b/chains/proposal.go index 6652d7c7..0a8a72eb 100644 --- a/chains/proposal.go +++ b/chains/proposal.go @@ -5,6 +5,7 @@ package chains import ( "fmt" + "math/big" "github.com/ChainSafe/sygma-relayer/relayer/transfer" "github.com/ethereum/go-ethereum/common/hexutil" @@ -17,8 +18,8 @@ func ProposalsHash(proposals []*transfer.TransferProposal, chainID int64, verifC formattedProps := make([]interface{}, len(proposals)) for i, prop := range proposals { formattedProps[i] = map[string]interface{}{ - "originDomainID": math.NewHexOrDecimal256(int64(prop.Source)), - "depositNonce": math.NewHexOrDecimal256(int64(prop.Data.DepositNonce)), + "originDomainID": big.NewInt(int64(prop.Source)), + "depositNonce": new(big.Int).SetUint64(prop.Data.DepositNonce), "resourceID": hexutil.Encode(prop.Data.ResourceId[:]), "data": prop.Data.Data, } diff --git a/chains/proposal_test.go b/chains/proposal_test.go index afa8921c..9b685686 100644 --- a/chains/proposal_test.go +++ b/chains/proposal_test.go @@ -4,6 +4,8 @@ package chains import ( + "testing" + "github.com/ChainSafe/sygma-relayer/relayer/transfer" "github.com/stretchr/testify/suite" ) @@ -15,6 +17,10 @@ type ProposalTestSuite struct { suite.Suite } +func TestRunProposalTestSuite(t *testing.T) { + suite.Run(t, new(ProposalTestSuite)) +} + func (s *ProposalTestSuite) Test_ProposalsHash() { data := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 90, 243, 16, 122, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 36, 0, 1, 1, 0, 212, 53, 147, 199, 21, 253, 211, 28, 97, 20, 26, 189, 4, 169, 159, 214, 130, 44, 133, 88, 133, 76, 205, 227, 154, 86, 132, 231, 165, 109, 162, 125} @@ -22,13 +28,13 @@ func (s *ProposalTestSuite) Test_ProposalsHash() { Source: 1, Destination: 2, Data: transfer.TransferProposalData{ - DepositNonce: 3, + DepositNonce: 15078986465725403975, ResourceId: [32]byte{3}, Metadata: nil, Data: data, }, }} - correctRes := []byte{253, 216, 81, 25, 46, 239, 181, 138, 51, 225, 165, 111, 156, 95, 27, 239, 160, 87, 89, 84, 50, 22, 97, 185, 132, 200, 201, 210, 204, 99, 94, 131} + correctRes := []byte{0xde, 0x7b, 0x5c, 0x9e, 0x8, 0x7a, 0xb4, 0xf5, 0xfb, 0xe, 0x9f, 0x73, 0xa7, 0xe5, 0xbd, 0xb, 0xdf, 0x9e, 0xeb, 0x4, 0xaa, 0xbb, 0xd0, 0xe8, 0xf8, 0xde, 0x58, 0xa2, 0x4, 0xa3, 0x3e, 0x55} res, err := ProposalsHash(prop, 5, verifyingContract, bridgeVersion) s.Nil(err) diff --git a/chains/substrate/executor/executor.go b/chains/substrate/executor/executor.go index d3f8252f..de22429a 100644 --- a/chains/substrate/executor/executor.go +++ b/chains/substrate/executor/executor.go @@ -105,12 +105,13 @@ func (e *Executor) Execute(proposals []*proposal.Proposal) error { return err } - sessionID := transferProposals[0].MessageID + messageID := transferProposals[0].MessageID msg := big.NewInt(0) msg.SetBytes(propHash) signing, err := signing.NewSigning( msg, - sessionID, + messageID, + messageID, e.host, e.comm, e.fetcher) @@ -124,7 +125,7 @@ func (e *Executor) Execute(proposals []*proposal.Proposal) error { pool := pool.New().WithErrors() pool.Go(func() error { - err := e.coordinator.Execute(executionContext, signing, sigChn) + err := e.coordinator.Execute(executionContext, []tss.TssProcess{signing}, sigChn) if err != nil { cancelWatch() } @@ -132,7 +133,7 @@ func (e *Executor) Execute(proposals []*proposal.Proposal) error { return err }) pool.Go(func() error { - return e.watchExecution(watchContext, cancelExecution, transferProposals, sigChn, sessionID) + return e.watchExecution(watchContext, cancelExecution, transferProposals, sigChn, messageID) }) return pool.Wait() } diff --git a/docs/general/Arhitecture.md b/docs/general/Arhitecture.md index ef5a10cb..9242eca6 100644 --- a/docs/general/Arhitecture.md +++ b/docs/general/Arhitecture.md @@ -5,6 +5,7 @@ ## Components - **[CLI commands](/docs/general/CLI.md)** - overview of CLI commands +- **[Deposit](/docs/general/Deposit.md)** - Deposit data overview - **[Fees](/docs/general/Fees.md)** - high-level overview of handling fees - **[Relayers](/docs/Home.md)** - relayer technical documentation - **[Topology Map](/docs/general/Topology.md)** - overview of topology map usage diff --git a/docs/general/Deposit.md b/docs/general/Deposit.md new file mode 100644 index 00000000..404dbddf --- /dev/null +++ b/docs/general/Deposit.md @@ -0,0 +1,17 @@ +# Deposit +This document describes the expected format of the deposit for different networks +## BTC Deposit +## Format + +### OP_RETURN Output + +- **Purpose**: Stores arbitrary data within the transaction. +- **Requirements**: + - There should be at most one output with a `ScriptPubKey.Type` of `OP_RETURN`. + - The `OP_RETURN` data must be formatted as `receiverEVMAddress_destinationDomainID`. + + +### Amount Calculation + +- The total deposit amount is calculated by summing the values of the outputs that match the resource address. +- Only outputs with script types of `witness_v1_taproot` are considered for the amount calculation. diff --git a/e2e/btc/btc_test.go b/e2e/btc/btc_test.go index 5cfa8f61..52f9b8d5 100644 --- a/e2e/btc/btc_test.go +++ b/e2e/btc/btc_test.go @@ -152,6 +152,9 @@ func (s *IntegrationTestSuite) Test_Erc20Deposit_Btc_to_EVM() { recipientAddress, err := btcutil.DecodeAddress("bcrt1pdf5c3q35ssem2l25n435fa69qr7dzwkc6gsqehuflr3euh905l2sjyr5ek", &chaincfg.RegressionNetParams) s.Nil(err) + feeAddress, err := btcutil.DecodeAddress("mkHS9ne12qx9pS9VojpwU5xtRd4T7X7ZUt", &chaincfg.RegressionNetParams) + s.Nil(err) + // Define the private key as a hexadecimal string privateKeyHex := "ccfa495d2ae193eeec53db12971bdedfe500603ec53f98a6138f0abe932be84f" @@ -170,6 +173,10 @@ func (s *IntegrationTestSuite) Test_Erc20Deposit_Btc_to_EVM() { recipientPkScript, err := txscript.PayToAddrScript(recipientAddress) s.Nil(err) + // Create the PkScript for the recipient address + feeAddressPkScript, err := txscript.PayToAddrScript(feeAddress) + s.Nil(err) + // Define data for the OP_RETURN output opReturnData := []byte("0x1c3A03D04c026b1f4B4208D2ce053c5686E6FB8d_01") opReturnScript, err := txscript.NullDataScript(opReturnData) @@ -183,7 +190,14 @@ func (s *IntegrationTestSuite) Test_Erc20Deposit_Btc_to_EVM() { Index: unspent[0].Vout, }, nil, nil) + hash2, _ := chainhash.NewHashFromStr(unspent[1].TxID) + txInput2 := wire.NewTxIn(&wire.OutPoint{ + Hash: *hash2, + Index: unspent[1].Vout, + }, nil, nil) + txInputs = append(txInputs, txInput) + txInputs = append(txInputs, txInput2) // Create transaction outputs txOutputs := []*wire.TxOut{ @@ -191,6 +205,10 @@ func (s *IntegrationTestSuite) Test_Erc20Deposit_Btc_to_EVM() { Value: int64(unspent[0].Amount*math.Pow(10, 8)) - 10000000, PkScript: recipientPkScript, }, + { + Value: int64(unspent[1].Amount*math.Pow(10, 8)) - 10000000, + PkScript: feeAddressPkScript, + }, { Value: 0, PkScript: opReturnScript, @@ -206,14 +224,13 @@ func (s *IntegrationTestSuite) Test_Erc20Deposit_Btc_to_EVM() { tx.AddTxOut(txOut) } - subscript, err := hex.DecodeString(unspent[0].ScriptPubKey) - s.Nil(err) - - // Sign the transaction - sigScript, err := txscript.SignatureScript(tx, 0, subscript, txscript.SigHashAll, privateKey, true) - s.Nil(err) - - tx.TxIn[0].SignatureScript = sigScript + for i, txIn := range tx.TxIn { + subscript, err := hex.DecodeString(unspent[i].ScriptPubKey) + s.Nil(err) + sigScript, err := txscript.SignatureScript(tx, i, subscript, txscript.SigHashAll, privateKey, true) + s.Nil(err) + txIn.SignatureScript = sigScript + } _, err = conn.Client.SendRawTransaction(tx, true) s.Nil(err) diff --git a/example/app/app.go b/example/app/app.go index 0b00a140..86e4ba9d 100644 --- a/example/app/app.go +++ b/example/app/app.go @@ -196,8 +196,8 @@ func Run() error { eventHandlers = append(eventHandlers, hubEventHandlers.NewDepositEventHandler(depositListener, depositHandler, bridgeAddress, *config.GeneralChainConfig.Id, msgChan)) eventHandlers = append(eventHandlers, hubEventHandlers.NewKeygenEventHandler(l, tssListener, coordinator, host, communication, keyshareStore, bridgeAddress, networkTopology.Threshold)) eventHandlers = append(eventHandlers, hubEventHandlers.NewFrostKeygenEventHandler(l, tssListener, coordinator, host, communication, frostKeyshareStore, frostAddress, networkTopology.Threshold)) - eventHandlers = append(eventHandlers, hubEventHandlers.NewRefreshEventHandler(l, nil, nil, tssListener, coordinator, host, communication, connectionGate, keyshareStore, bridgeAddress)) - eventHandlers = append(eventHandlers, hubEventHandlers.NewRetryEventHandler(l, tssListener, depositHandler, bridgeAddress, *config.GeneralChainConfig.Id, config.BlockConfirmations, msgChan)) + eventHandlers = append(eventHandlers, hubEventHandlers.NewRefreshEventHandler(l, nil, nil, tssListener, coordinator, host, communication, connectionGate, keyshareStore, frostKeyshareStore, bridgeAddress)) + eventHandlers = append(eventHandlers, hubEventHandlers.NewRetryEventHandler(l, tssListener, depositHandler, propStore, bridgeAddress, *config.GeneralChainConfig.Id, config.BlockConfirmations, msgChan)) evmListener := listener.NewEVMListener(client, eventHandlers, blockstore, sygmaMetrics, *config.GeneralChainConfig.Id, config.BlockRetryInterval, config.BlockConfirmations, config.BlockInterval) executor := executor.NewExecutor(host, communication, coordinator, bridgeContract, keyshareStore, exitLock, config.GasLimit.Uint64()) @@ -268,7 +268,7 @@ func Run() error { resources := make(map[[32]byte]btcConfig.Resource) for _, resource := range config.Resources { resources[resource.ResourceID] = resource - eventHandlers = append(eventHandlers, btcListener.NewFungibleTransferEventHandler(l, *config.GeneralChainConfig.Id, depositHandler, msgChan, conn, resource)) + eventHandlers = append(eventHandlers, btcListener.NewFungibleTransferEventHandler(l, *config.GeneralChainConfig.Id, depositHandler, msgChan, conn, resource, config.FeeAddress)) } listener := btcListener.NewBtcListener(conn, eventHandlers, config, blockstore) diff --git a/example/cfg/config_evm-evm_1.json b/example/cfg/config_evm-evm_1.json index f681fd53..57fead43 100644 --- a/example/cfg/config_evm-evm_1.json +++ b/example/cfg/config_evm-evm_1.json @@ -98,6 +98,7 @@ "name": "bitcoin", "type": "btc", "startBlock": 100, + "feeAddress": "mkHS9ne12qx9pS9VojpwU5xtRd4T7X7ZUt", "endpoint": "bitcoin:18443", "mempoolUrl": "http://mempool-stub:8882", "blockConfirmations": 1, @@ -109,7 +110,8 @@ "address": "bcrt1pdf5c3q35ssem2l25n435fa69qr7dzwkc6gsqehuflr3euh905l2sjyr5ek", "resourceId": "0x0000000000000000000000000000000000000000000000000000000000001000", "script": "51206a698882348433b57d549d6344f74500fcd13ad8d2200cdf89f8e39e5cafa7d5", - "tweak": "c82aa6ae534bb28aaafeb3660c31d6a52e187d8f05d48bb6bdb9b733a9b42212" + "tweak": "c82aa6ae534bb28aaafeb3660c31d6a52e187d8f05d48bb6bdb9b733a9b42212", + "feeAmount": "10000000" } ] } diff --git a/example/cfg/config_evm-evm_2.json b/example/cfg/config_evm-evm_2.json index 6ff0ebf4..153f1341 100644 --- a/example/cfg/config_evm-evm_2.json +++ b/example/cfg/config_evm-evm_2.json @@ -100,6 +100,7 @@ "name": "bitcoin", "type": "btc", "startBlock": 100, + "feeAddress": "mkHS9ne12qx9pS9VojpwU5xtRd4T7X7ZUt", "endpoint": "bitcoin:18443", "mempoolUrl": "http://mempool-stub:8882", "blockConfirmations": 1, @@ -112,7 +113,8 @@ "address": "bcrt1pdf5c3q35ssem2l25n435fa69qr7dzwkc6gsqehuflr3euh905l2sjyr5ek", "resourceId": "0x0000000000000000000000000000000000000000000000000000000000001000", "script": "51206a698882348433b57d549d6344f74500fcd13ad8d2200cdf89f8e39e5cafa7d5", - "tweak": "c82aa6ae534bb28aaafeb3660c31d6a52e187d8f05d48bb6bdb9b733a9b42212" + "tweak": "c82aa6ae534bb28aaafeb3660c31d6a52e187d8f05d48bb6bdb9b733a9b42212", + "feeAmount": "10000000" } ] } diff --git a/example/cfg/config_evm-evm_3.json b/example/cfg/config_evm-evm_3.json index 72884292..a40f31b3 100644 --- a/example/cfg/config_evm-evm_3.json +++ b/example/cfg/config_evm-evm_3.json @@ -99,6 +99,7 @@ "name": "bitcoin", "type": "btc", "startBlock": 100, + "feeAddress": "mkHS9ne12qx9pS9VojpwU5xtRd4T7X7ZUt", "endpoint": "bitcoin:18443", "mempoolUrl": "http://mempool-stub:8882", "blockConfirmations": 1, @@ -111,7 +112,8 @@ "address": "bcrt1pdf5c3q35ssem2l25n435fa69qr7dzwkc6gsqehuflr3euh905l2sjyr5ek", "resourceId": "0x0000000000000000000000000000000000000000000000000000000000001000", "script": "51206a698882348433b57d549d6344f74500fcd13ad8d2200cdf89f8e39e5cafa7d5", - "tweak": "c82aa6ae534bb28aaafeb3660c31d6a52e187d8f05d48bb6bdb9b733a9b42212" + "tweak": "c82aa6ae534bb28aaafeb3660c31d6a52e187d8f05d48bb6bdb9b733a9b42212", + "feeAmount": "10000000" } ] } diff --git a/go.mod b/go.mod index c015c8c5..c6b391ab 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/binance-chain/tss-lib v0.0.0-00010101000000-000000000000 github.com/btcsuite/btcd/btcutil v1.1.5 github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 - github.com/centrifuge/go-substrate-rpc-client/v4 v4.1.0 + github.com/centrifuge/go-substrate-rpc-client/v4 v4.2.1 github.com/creasty/defaults v1.6.0 github.com/deckarep/golang-set/v2 v2.1.0 github.com/ethereum/go-ethereum v1.13.4 @@ -20,7 +20,7 @@ require ( github.com/spf13/cobra v1.5.0 github.com/spf13/viper v1.9.0 github.com/stretchr/testify v1.8.4 - github.com/sygmaprotocol/sygma-core v0.0.0-20240411120252-bf0131a81565 + github.com/sygmaprotocol/sygma-core v0.0.0-20240710140025-258996be150b github.com/taurusgroup/multi-party-sig v0.6.0-alpha-2021-09-21.0.20230619131919-9c7c6ffd7217 go.opentelemetry.io/otel v1.16.0 go.opentelemetry.io/otel/metric v1.16.0 @@ -47,6 +47,7 @@ require ( github.com/holiman/uint256 v1.2.3 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/supranational/blst v0.3.11 // indirect + github.com/vedhavyas/go-subkey/v2 v2.0.0 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/zeebo/blake3 v0.2.3 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect @@ -61,7 +62,7 @@ require ( github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/btcsuite/btcd v0.24.0 - github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.2.0 github.com/btcsuite/btcutil v1.0.3-0.20211129182920-9c4bbabe7acd // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/cgroups v1.0.4 // indirect @@ -163,7 +164,6 @@ require ( github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect - github.com/vedhavyas/go-subkey v1.0.4 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.39.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.39.0 // indirect go.opentelemetry.io/otel/sdk v1.16.0 // indirect diff --git a/go.sum b/go.sum index 3d12dec0..d4bd1121 100644 --- a/go.sum +++ b/go.sum @@ -124,8 +124,8 @@ github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7 github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/centrifuge/go-substrate-rpc-client/v4 v4.1.0 h1:GEvub7kU5YFAcn5A2uOo4AZSM1/cWZCOvfu7E3gQmK8= -github.com/centrifuge/go-substrate-rpc-client/v4 v4.1.0/go.mod h1:szA5wf9suAIcNg/1S3rGeFITHqrnqH5TC6b+O0SEQ94= +github.com/centrifuge/go-substrate-rpc-client/v4 v4.2.1 h1:io49TJ8IOIlzipioJc9pJlrjgdJvqktpUWYxVY5AUjE= +github.com/centrifuge/go-substrate-rpc-client/v4 v4.2.1/go.mod h1:k61SBXqYmnZO4frAJyH3iuqjolYrYsq79r8EstmklDY= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -759,8 +759,8 @@ github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbe github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/sygmaprotocol/multi-party-sig v0.0.0-20240523153754-9377ba09c35e h1:IzidTxe1CT6O5qy2gK9NQO/hrC+2wjDGzCxCF+kjCKU= github.com/sygmaprotocol/multi-party-sig v0.0.0-20240523153754-9377ba09c35e/go.mod h1:roZI3gaKCo15PUSB4LdJpTLTjq8TFsJiOH5kpcN1HpQ= -github.com/sygmaprotocol/sygma-core v0.0.0-20240411120252-bf0131a81565 h1:oEd8KmRDSyGvQGg/A0cd4JbWNiVP8GGjK672JWQ0QAk= -github.com/sygmaprotocol/sygma-core v0.0.0-20240411120252-bf0131a81565/go.mod h1:b4RZCyYr20Mp4WAAj4TkC6gU2KZ0ZWcpSGmKc6n8NKc= +github.com/sygmaprotocol/sygma-core v0.0.0-20240710140025-258996be150b h1:gaXOurg/0X/iKS7uED0RLTieilJymQUNoyXOTwI1710= +github.com/sygmaprotocol/sygma-core v0.0.0-20240710140025-258996be150b/go.mod h1:b4RZCyYr20Mp4WAAj4TkC6gU2KZ0ZWcpSGmKc6n8NKc= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a h1:1ur3QoCqvE5fl+nylMaIr9PVV1w343YRDtsy+Rwu7XI= github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= @@ -774,8 +774,8 @@ github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX github.com/urfave/cli v1.22.5 h1:lNq9sAHXK2qfdI8W+GRItjCEkI+2oR4d+MEHy1CKXoU= github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= -github.com/vedhavyas/go-subkey v1.0.4 h1:QwjBZx4w7qXC2lmqol2jJfhaNXPI9BsgLZiMiCwqGDU= -github.com/vedhavyas/go-subkey v1.0.4/go.mod h1:aOIil/KS9hJlnr9ZSQKSoXdu/MbnkCxG4x9IOlLsMtI= +github.com/vedhavyas/go-subkey/v2 v2.0.0 h1:LemDIsrVtRSOkp0FA8HxP6ynfKjeOj3BY2U9UNfeDMA= +github.com/vedhavyas/go-subkey/v2 v2.0.0/go.mod h1:95aZ+XDCWAUUynjlmi7BtPExjXgXxByE0WfBwbmIRH4= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= diff --git a/tss/coordinator.go b/tss/coordinator.go index 8b510267..ac0802b1 100644 --- a/tss/coordinator.go +++ b/tss/coordinator.go @@ -30,9 +30,9 @@ var ( type TssProcess interface { Run(ctx context.Context, coordinator bool, resultChn chan interface{}, params []byte) error Stop() - Ready(readyMap map[peer.ID]bool, excludedPeers []peer.ID) (bool, error) + Ready(readyPeers []peer.ID, excludedPeers []peer.ID) (bool, error) Retryable() bool - StartParams(readyMap map[peer.ID]bool) []byte + StartParams(readyPeers []peer.ID) []byte SessionID() string ValidCoordinators() []peer.ID } @@ -69,8 +69,10 @@ func NewCoordinator( } // Execute calculates process leader and coordinates party readiness and start the tss processes. -func (c *Coordinator) Execute(ctx context.Context, tssProcess TssProcess, resultChn chan interface{}) error { - sessionID := tssProcess.SessionID() +// Array of processes can be passed if all the processes have to have the same peer subset and +// the result of all of them is needed. The processes should have an unique session ID for each one. +func (c *Coordinator) Execute(ctx context.Context, tssProcesses []TssProcess, resultChn chan interface{}) error { + sessionID := tssProcesses[0].SessionID() value, ok := c.pendingProcesses[sessionID] if ok && value { log.Warn().Str("SessionID", sessionID).Msgf("Process already pending") @@ -89,71 +91,74 @@ func (c *Coordinator) Execute(ctx context.Context, tssProcess TssProcess, result c.processLock.Lock() c.pendingProcesses[sessionID] = false c.processLock.Unlock() - tssProcess.Stop() + for _, process := range tssProcesses { + process.Stop() + } }() coordinatorElector := c.electorFactory.CoordinatorElector(sessionID, elector.Static) - coordinator, _ := coordinatorElector.Coordinator(ctx, tssProcess.ValidCoordinators()) + coordinator, _ := coordinatorElector.Coordinator(ctx, tssProcesses[0].ValidCoordinators()) log.Info().Str("SessionID", sessionID).Msgf("Starting process with coordinator %s", coordinator.Pretty()) p.Go(func(ctx context.Context) error { - err := c.start(ctx, tssProcess, coordinator, resultChn, []peer.ID{}) + err := c.start(ctx, tssProcesses, coordinator, resultChn, []peer.ID{}) if err == nil { cancel() } return err }) p.Go(func(ctx context.Context) error { - return c.watchExecution(ctx, tssProcess, coordinator) + return c.watchExecution(ctx, tssProcesses[0], coordinator) }) err := p.Wait() if err == nil { return nil } - if !tssProcess.Retryable() { + if !tssProcesses[0].Retryable() { return err } - return c.handleError(ctx, err, tssProcess, resultChn) + return c.handleError(ctx, err, tssProcesses, resultChn) } -func (c *Coordinator) handleError(ctx context.Context, err error, tssProcess TssProcess, resultChn chan interface{}) error { +func (c *Coordinator) handleError(ctx context.Context, err error, tssProcesses []TssProcess, resultChn chan interface{}) error { ctx, cancel := context.WithCancel(ctx) defer cancel() rp := pool.New().WithContext(ctx).WithCancelOnError() rp.Go(func(ctx context.Context) error { - return c.watchExecution(ctx, tssProcess, peer.ID("")) + return c.watchExecution(ctx, tssProcesses[0], peer.ID("")) }) + sessionID := tssProcesses[0].SessionID() switch err := err.(type) { case *CoordinatorError: { - log.Err(err).Str("SessionID", tssProcess.SessionID()).Msgf("Tss process failed with error %+v", err) + log.Err(err).Str("SessionID", sessionID).Msgf("Tss process failed with error %+v", err) excludedPeers := []peer.ID{err.Peer} - rp.Go(func(ctx context.Context) error { return c.retry(ctx, tssProcess, resultChn, excludedPeers) }) + rp.Go(func(ctx context.Context) error { return c.retry(ctx, tssProcesses, resultChn, excludedPeers) }) } case *comm.CommunicationError: { - log.Err(err).Str("SessionID", tssProcess.SessionID()).Msgf("Tss process failed with error %+v", err) - rp.Go(func(ctx context.Context) error { return c.retry(ctx, tssProcess, resultChn, []peer.ID{}) }) + log.Err(err).Str("SessionID", sessionID).Msgf("Tss process failed with error %+v", err) + rp.Go(func(ctx context.Context) error { return c.retry(ctx, tssProcesses, resultChn, []peer.ID{}) }) } case *tss.Error: { - log.Err(err).Str("SessionID", tssProcess.SessionID()).Msgf("Tss process failed with error %+v", err) + log.Err(err).Str("SessionID", sessionID).Msgf("Tss process failed with error %+v", err) excludedPeers, err := common.PeersFromParties(err.Culprits()) if err != nil { return err } - rp.Go(func(ctx context.Context) error { return c.retry(ctx, tssProcess, resultChn, excludedPeers) }) + rp.Go(func(ctx context.Context) error { return c.retry(ctx, tssProcesses, resultChn, excludedPeers) }) } case *SubsetError: { // wait for start message if existing singing process fails rp.Go(func(ctx context.Context) error { - return c.waitForStart(ctx, tssProcess, resultChn, peer.ID(""), c.TssTimeout) + return c.waitForStart(ctx, tssProcesses, resultChn, peer.ID(""), c.TssTimeout) }) } default: @@ -197,24 +202,24 @@ func (c *Coordinator) watchExecution(ctx context.Context, tssProcess TssProcess, } // start initiates listeners for coordinator and participants with static calculated coordinator -func (c *Coordinator) start(ctx context.Context, tssProcess TssProcess, coordinator peer.ID, resultChn chan interface{}, excludedPeers []peer.ID) error { +func (c *Coordinator) start(ctx context.Context, tssProcesses []TssProcess, coordinator peer.ID, resultChn chan interface{}, excludedPeers []peer.ID) error { if coordinator.Pretty() == c.host.ID().Pretty() { - return c.initiate(ctx, tssProcess, resultChn, excludedPeers) + return c.initiate(ctx, tssProcesses, resultChn, excludedPeers) } else { - return c.waitForStart(ctx, tssProcess, resultChn, coordinator, c.CoordinatorTimeout) + return c.waitForStart(ctx, tssProcesses, resultChn, coordinator, c.CoordinatorTimeout) } } // retry initiates full bully process to calculate coordinator and starts a new tss process after // an expected error ocurred during regular tss execution -func (c *Coordinator) retry(ctx context.Context, tssProcess TssProcess, resultChn chan interface{}, excludedPeers []peer.ID) error { - coordinatorElector := c.electorFactory.CoordinatorElector(tssProcess.SessionID(), elector.Bully) - coordinator, err := coordinatorElector.Coordinator(ctx, common.ExcludePeers(tssProcess.ValidCoordinators(), excludedPeers)) +func (c *Coordinator) retry(ctx context.Context, tssProcesses []TssProcess, resultChn chan interface{}, excludedPeers []peer.ID) error { + coordinatorElector := c.electorFactory.CoordinatorElector(tssProcesses[0].SessionID(), elector.Bully) + coordinator, err := coordinatorElector.Coordinator(ctx, common.ExcludePeers(tssProcesses[0].ValidCoordinators(), excludedPeers)) if err != nil { return err } - return c.start(ctx, tssProcess, coordinator, resultChn, excludedPeers) + return c.start(ctx, tssProcesses, coordinator, resultChn, excludedPeers) } // broadcastInitiateMsg sends TssInitiateMsg to all peers @@ -228,11 +233,12 @@ func (c *Coordinator) broadcastInitiateMsg(sessionID string) { // initiate sends initiate message to all peers and waits // for ready response. After tss process declares that enough // peers are ready, start message is broadcasted and tss process is started. -func (c *Coordinator) initiate(ctx context.Context, tssProcess TssProcess, resultChn chan interface{}, excludedPeers []peer.ID) error { +func (c *Coordinator) initiate(ctx context.Context, tssProcesses []TssProcess, resultChn chan interface{}, excludedPeers []peer.ID) error { readyChan := make(chan *comm.WrappedMessage) - readyMap := make(map[peer.ID]bool) - readyMap[c.host.ID()] = true + readyPeers := make([]peer.ID, 0) + readyPeers = append(readyPeers, c.host.ID()) + tssProcess := tssProcesses[0] subID := c.communication.Subscribe(tssProcess.SessionID(), comm.TssReadyMsg, readyChan) defer c.communication.UnSubscribe(subID) @@ -244,10 +250,10 @@ func (c *Coordinator) initiate(ctx context.Context, tssProcess TssProcess, resul case wMsg := <-readyChan: { log.Debug().Str("SessionID", tssProcess.SessionID()).Msgf("received ready message from %s", wMsg.From) - if !slices.Contains(excludedPeers, wMsg.From) { - readyMap[wMsg.From] = true + if !slices.Contains(excludedPeers, wMsg.From) && !slices.Contains(readyPeers, wMsg.From) { + readyPeers = append(readyPeers, wMsg.From) } - ready, err := tssProcess.Ready(readyMap, excludedPeers) + ready, err := tssProcess.Ready(readyPeers, excludedPeers) if err != nil { return err } @@ -255,14 +261,21 @@ func (c *Coordinator) initiate(ctx context.Context, tssProcess TssProcess, resul continue } - startParams := tssProcess.StartParams(readyMap) + startParams := tssProcess.StartParams(readyPeers) startMsgBytes, err := message.MarshalStartMessage(startParams) if err != nil { return err } _ = c.communication.Broadcast(c.host.Peerstore().Peers(), startMsgBytes, comm.TssStartMsg, tssProcess.SessionID()) - return tssProcess.Run(ctx, true, resultChn, startParams) + p := pool.New().WithContext(ctx).WithCancelOnError() + for _, process := range tssProcesses { + tssProcess := process + p.Go(func(ctx context.Context) error { + return tssProcess.Run(ctx, true, resultChn, startParams) + }) + } + return p.Wait() } case <-ticker.C: { @@ -280,7 +293,7 @@ func (c *Coordinator) initiate(ctx context.Context, tssProcess TssProcess, resul // when it receives the start message. func (c *Coordinator) waitForStart( ctx context.Context, - tssProcess TssProcess, + tssProcesses []TssProcess, resultChn chan interface{}, coordinator peer.ID, timeout time.Duration, @@ -288,6 +301,7 @@ func (c *Coordinator) waitForStart( msgChan := make(chan *comm.WrappedMessage) startMsgChn := make(chan *comm.WrappedMessage) + tssProcess := tssProcesses[0] initSubID := c.communication.Subscribe(tssProcess.SessionID(), comm.TssInitiateMsg, msgChan) defer c.communication.UnSubscribe(initSubID) startSubID := c.communication.Subscribe(tssProcess.SessionID(), comm.TssStartMsg, startMsgChn) @@ -327,7 +341,14 @@ func (c *Coordinator) waitForStart( return err } - return tssProcess.Run(ctx, false, resultChn, msg.Params) + p := pool.New().WithContext(ctx).WithCancelOnError() + for _, process := range tssProcesses { + tssProcess := process + p.Go(func(ctx context.Context) error { + return tssProcess.Run(ctx, false, resultChn, msg.Params) + }) + } + return p.Wait() } case <-coordinatorTimeoutTicker.C: { diff --git a/tss/ecdsa/common/utils.go b/tss/ecdsa/common/utils.go index f9fac5e6..e8cb49fa 100644 --- a/tss/ecdsa/common/utils.go +++ b/tss/ecdsa/common/utils.go @@ -66,3 +66,15 @@ func ExcludePeers(peers peer.IDSlice, excludedPeers peer.IDSlice) peer.IDSlice { } return includedPeers } + +func PeersIntersection(oldPeers peer.IDSlice, newPeers peer.IDSlice) peer.IDSlice { + includedPeers := make(peer.IDSlice, 0) + for _, peer := range oldPeers { + if !slices.Contains(newPeers, peer) { + continue + } + + includedPeers = append(includedPeers, peer) + } + return includedPeers +} diff --git a/tss/ecdsa/common/utils_test.go b/tss/ecdsa/common/utils_test.go index bf6b565a..8a7d2395 100644 --- a/tss/ecdsa/common/utils_test.go +++ b/tss/ecdsa/common/utils_test.go @@ -106,10 +106,10 @@ type ExcludedPeersTestSuite struct { } func TestRunExcludedPeersTestSuite(t *testing.T) { - suite.Run(t, new(PartiesFromPeersTestSuite)) + suite.Run(t, new(ExcludedPeersTestSuite)) } -func (s *PartiesFromPeersTestSuite) Test_ExcludePeers_Excluded() { +func (s *ExcludedPeersTestSuite) Test_ExcludePeers_Excluded() { peerID1, _ := peer.Decode("QmcW3oMdSqoEcjbyd51auqC23vhKX6BqfcZcY2HJ3sKAZR") peerID2, _ := peer.Decode("QmZHPnN3CKiTAp8VaJqszbf8m7v4mPh15M421KpVdYHF54") peerID3, _ := peer.Decode("QmYayosTHxL2xa4jyrQ2PmbhGbrkSxsGM1kzXLTT8SsLVy") @@ -120,3 +120,23 @@ func (s *PartiesFromPeersTestSuite) Test_ExcludePeers_Excluded() { s.Equal(includedPeers, peer.IDSlice{peerID1}) } + +type PeersIntersectionTestSuite struct { + suite.Suite +} + +func TestRunPeersIntersectionTestSuite(t *testing.T) { + suite.Run(t, new(PeersIntersectionTestSuite)) +} + +func (s *PeersIntersectionTestSuite) Test_PeersIntersection() { + peerID1, _ := peer.Decode("QmcW3oMdSqoEcjbyd51auqC23vhKX6BqfcZcY2HJ3sKAZR") + peerID2, _ := peer.Decode("QmZHPnN3CKiTAp8VaJqszbf8m7v4mPh15M421KpVdYHF54") + peerID3, _ := peer.Decode("QmYayosTHxL2xa4jyrQ2PmbhGbrkSxsGM1kzXLTT8SsLVy") + oldPeers := []peer.ID{peerID1, peerID2, peerID3} + newPeers := []peer.ID{peerID3, peerID2} + + includedPeers := common.PeersIntersection(oldPeers, newPeers) + + s.Equal(includedPeers, peer.IDSlice{peerID2, peerID3}) +} diff --git a/tss/ecdsa/keygen/keygen.go b/tss/ecdsa/keygen/keygen.go index 6ca3c16d..245111c5 100644 --- a/tss/ecdsa/keygen/keygen.go +++ b/tss/ecdsa/keygen/keygen.go @@ -114,12 +114,12 @@ func (k *Keygen) Stop() { // Ready returns true if all parties from the peerstore are ready. // Error is returned if excluded peers exist as we need all peers to participate // in keygen process. -func (k *Keygen) Ready(readyMap map[peer.ID]bool, excludedPeers []peer.ID) (bool, error) { +func (k *Keygen) Ready(readyPeers []peer.ID, excludedPeers []peer.ID) (bool, error) { if len(excludedPeers) > 0 { return false, errors.New("error") } - return len(readyMap) == len(k.Host.Peerstore().Peers()), nil + return len(readyPeers) == len(k.Host.Peerstore().Peers()), nil } // ValidCoordinators returns all peers in peerstore @@ -127,7 +127,7 @@ func (k *Keygen) ValidCoordinators() []peer.ID { return k.Host.Peerstore().Peers() } -func (k *Keygen) StartParams(readyMap map[peer.ID]bool) []byte { +func (k *Keygen) StartParams(readyPeers []peer.ID) []byte { return []byte{} } diff --git a/tss/ecdsa/keygen/keygen_test.go b/tss/ecdsa/keygen/keygen_test.go index 2ccb6770..2e88e5b5 100644 --- a/tss/ecdsa/keygen/keygen_test.go +++ b/tss/ecdsa/keygen/keygen_test.go @@ -50,7 +50,7 @@ func (s *KeygenTestSuite) Test_ValidKeygenProcess() { s.MockECDSAStorer.EXPECT().StoreKeyshare(gomock.Any()).Times(3) pool := pool.New().WithContext(context.Background()).WithCancelOnError() for i, coordinator := range coordinators { - pool.Go(func(ctx context.Context) error { return coordinator.Execute(ctx, processes[i], nil) }) + pool.Go(func(ctx context.Context) error { return coordinator.Execute(ctx, []tss.TssProcess{processes[i]}, nil) }) } err := pool.Wait() @@ -81,7 +81,7 @@ func (s *KeygenTestSuite) Test_KeygenTimeout() { s.MockECDSAStorer.EXPECT().StoreKeyshare(gomock.Any()).Times(0) pool := pool.New().WithContext(context.Background()) for i, coordinator := range coordinators { - pool.Go(func(ctx context.Context) error { return coordinator.Execute(ctx, processes[i], nil) }) + pool.Go(func(ctx context.Context) error { return coordinator.Execute(ctx, []tss.TssProcess{processes[i]}, nil) }) } err := pool.Wait() diff --git a/tss/ecdsa/resharing/resharing.go b/tss/ecdsa/resharing/resharing.go index 9b303767..f84b7dd2 100644 --- a/tss/ecdsa/resharing/resharing.go +++ b/tss/ecdsa/resharing/resharing.go @@ -136,15 +136,15 @@ func (r *Resharing) Run( // Stop ends all subscriptions created when starting the tss process and unlocks keyshare. func (r *Resharing) Stop() { - log.Info().Str("sessionID", r.SessionID()).Msgf("Stopping tss process.") + r.Log.Info().Msgf("Stopping tss process.") r.Communication.UnSubscribe(r.subscriptionID) r.storer.UnlockKeyshare() r.Cancel() } // Ready returns true if all parties from peerstore are ready -func (r *Resharing) Ready(readyMap map[peer.ID]bool, excludedPeers []peer.ID) (bool, error) { - return len(readyMap) == len(r.Host.Peerstore().Peers()), nil +func (r *Resharing) Ready(readyPeers []peer.ID, excludedPeers []peer.ID) (bool, error) { + return len(readyPeers) == len(r.Host.Peerstore().Peers()), nil } // ValidCoordinators returns only peers that have a valid keyshare from the previous resharing @@ -164,10 +164,11 @@ func (r *Resharing) ValidCoordinators() []peer.ID { } // StartParams returns threshold and peer subset from the old key to share with new parties. -func (r *Resharing) StartParams(readyMap map[peer.ID]bool) []byte { +func (r *Resharing) StartParams(readyPeers []peer.ID) []byte { + oldSubset := common.PeersIntersection(r.key.Peers, r.Host.Peerstore().Peers()) startParams := &startParams{ OldThreshold: r.key.Threshold, - OldSubset: r.key.Peers, + OldSubset: oldSubset, } paramBytes, _ := json.Marshal(startParams) return paramBytes @@ -200,7 +201,7 @@ func (r *Resharing) validateStartParams(params startParams) error { slices.Sort(r.key.Peers) // if relayer is already part of the active subset, check if peer subset // in starting params is same as one saved in keyshare - if len(r.key.Peers) != 0 && !slices.Equal(params.OldSubset, r.key.Peers) { + if len(r.key.Peers) != 0 && !slices.Equal(params.OldSubset, common.PeersIntersection(r.key.Peers, r.Host.Peerstore().Peers())) { return errors.New("invalid peers subset in start params") } diff --git a/tss/ecdsa/resharing/resharing_test.go b/tss/ecdsa/resharing/resharing_test.go index 5f2c1495..737f813f 100644 --- a/tss/ecdsa/resharing/resharing_test.go +++ b/tss/ecdsa/resharing/resharing_test.go @@ -68,7 +68,56 @@ func (s *ResharingTestSuite) Test_ValidResharingProcess_OldAndNewSubset() { resultChn := make(chan interface{}) pool := pool.New().WithContext(context.Background()).WithCancelOnError() for i, coordinator := range coordinators { - pool.Go(func(ctx context.Context) error { return coordinator.Execute(ctx, processes[i], resultChn) }) + pool.Go(func(ctx context.Context) error { + return coordinator.Execute(ctx, []tss.TssProcess{processes[i]}, resultChn) + }) + } + + err := pool.Wait() + s.Nil(err) +} + +func (s *ResharingTestSuite) Test_ValidResharingProcess_RemovePeer() { + communicationMap := make(map[peer.ID]*tsstest.TestCommunication) + coordinators := []*tss.Coordinator{} + processes := []tss.TssProcess{} + + hosts := []host.Host{} + for i := 0; i < s.PartyNumber-1; i++ { + host, _ := tsstest.NewHost(i) + hosts = append(hosts, host) + } + for _, host := range hosts { + for _, peer := range hosts { + host.Peerstore().AddAddr(peer.ID(), peer.Addrs()[0], peerstore.PermanentAddrTTL) + } + } + + for i, host := range hosts { + communication := tsstest.TestCommunication{ + Host: host, + Subscriptions: make(map[comm.SubscriptionID]chan *comm.WrappedMessage), + } + communicationMap[host.ID()] = &communication + storer := keyshare.NewECDSAKeyshareStore(fmt.Sprintf("../../test/keyshares/%d.keyshare", i)) + share, _ := storer.GetKeyshare() + s.MockECDSAStorer.EXPECT().LockKeyshare() + s.MockECDSAStorer.EXPECT().UnlockKeyshare() + s.MockECDSAStorer.EXPECT().GetKeyshare().Return(share, nil) + s.MockECDSAStorer.EXPECT().StoreKeyshare(gomock.Any()).Return(nil) + resharing := resharing.NewResharing("resharing2", 1, host, &communication, s.MockECDSAStorer) + electorFactory := elector.NewCoordinatorElectorFactory(host, s.BullyConfig) + coordinators = append(coordinators, tss.NewCoordinator(host, &communication, electorFactory)) + processes = append(processes, resharing) + } + tsstest.SetupCommunication(communicationMap) + + resultChn := make(chan interface{}) + pool := pool.New().WithContext(context.Background()).WithCancelOnError() + for i, coordinator := range coordinators { + pool.Go(func(ctx context.Context) error { + return coordinator.Execute(ctx, []tss.TssProcess{processes[i]}, resultChn) + }) } err := pool.Wait() @@ -117,7 +166,7 @@ func (s *ResharingTestSuite) Test_InvalidResharingProcess_InvalidOldThreshold_Le pool := pool.New().WithContext(context.Background()) for i, coordinator := range coordinators { pool.Go(func(ctx context.Context) error { - return coordinator.Execute(ctx, processes[i], resultChn) + return coordinator.Execute(ctx, []tss.TssProcess{processes[i]}, resultChn) }) } err := pool.Wait() @@ -165,7 +214,9 @@ func (s *ResharingTestSuite) Test_InvalidResharingProcess_InvalidOldThreshold_Bi resultChn := make(chan interface{}) pool := pool.New().WithContext(context.Background()) for i, coordinator := range coordinators { - pool.Go(func(ctx context.Context) error { return coordinator.Execute(ctx, processes[i], resultChn) }) + pool.Go(func(ctx context.Context) error { + return coordinator.Execute(ctx, []tss.TssProcess{processes[i]}, resultChn) + }) } err := pool.Wait() diff --git a/tss/ecdsa/signing/signing.go b/tss/ecdsa/signing/signing.go index 76888b20..1241d334 100644 --- a/tss/ecdsa/signing/signing.go +++ b/tss/ecdsa/signing/signing.go @@ -44,6 +44,7 @@ type Signing struct { func NewSigning( msg *big.Int, + messageID string, sessionID string, host host.Host, comm comm.Communication, @@ -64,7 +65,7 @@ func NewSigning( Communication: comm, Peers: key.Peers, SID: sessionID, - Log: log.With().Str("SessionID", sessionID).Str("Process", "signing").Logger(), + Log: log.With().Str("SessionID", sessionID).Str("messageID", messageID).Str("Process", "signing").Logger(), Cancel: func() {}, }, key: key, @@ -126,7 +127,7 @@ func (s *Signing) Run( p.Go(func(ctx context.Context) error { return s.processEndMessage(ctx, sigChn) }) p.Go(func(ctx context.Context) error { return s.monitorSigning(ctx) }) - s.Log.Info().Msgf("Started signing process") + s.Log.Info().Msgf("Started signing process for message %s", s.msg.Text(16)) tssError := s.Party.Start() if tssError != nil { @@ -138,15 +139,15 @@ func (s *Signing) Run( // Stop ends all subscriptions created when starting the tss process. func (s *Signing) Stop() { - log.Info().Str("sessionID", s.SessionID()).Msgf("Stopping tss process.") + s.Log.Info().Msgf("Stopping tss process.") s.Communication.UnSubscribe(s.subscriptionID) s.Cancel() } // Ready returns true if threshold+1 parties are ready to start the signing process. -func (s *Signing) Ready(readyMap map[peer.ID]bool, excludedPeers []peer.ID) (bool, error) { - readyMap = s.readyParticipants(readyMap) - return len(readyMap) == s.key.Threshold+1, nil +func (s *Signing) Ready(readyPeers []peer.ID, excludedPeers []peer.ID) (bool, error) { + readyPeers = s.readyParticipants(readyPeers) + return len(readyPeers) == s.key.Threshold+1, nil } // ValidCoordinators returns only peers that have a valid keyshare @@ -157,12 +158,10 @@ func (s *Signing) ValidCoordinators() []peer.ID { // StartParams returns peer subset for this tss process. It is calculated // by sorting hashes of peer IDs and session ID and chosing ready peers alphabetically // until threshold is satisfied. -func (s *Signing) StartParams(readyMap map[peer.ID]bool) []byte { - readyMap = s.readyParticipants(readyMap) +func (s *Signing) StartParams(readyPeers []peer.ID) []byte { + readyPeers = s.readyParticipants(readyPeers) peers := []peer.ID{} - for peer := range readyMap { - peers = append(peers, peer) - } + peers = append(peers, readyPeers...) sortedPeers := util.SortPeersForSession(peers, s.SessionID()) peerSubset := []peer.ID{} @@ -214,18 +213,15 @@ func (s *Signing) processEndMessage(ctx context.Context, endChn chan tssCommon.S } // readyParticipants returns all ready peers that contain a valid key share -func (s *Signing) readyParticipants(readyMap map[peer.ID]bool) map[peer.ID]bool { - readyParticipants := make(map[peer.ID]bool) - for peer, ready := range readyMap { - if !ready { - continue - } +func (s *Signing) readyParticipants(readyPeers []peer.ID) []peer.ID { + readyParticipants := make([]peer.ID, 0) + for _, peer := range readyPeers { if !slices.Contains(s.key.Peers, peer) { continue } - readyParticipants[peer] = true + readyParticipants = append(readyParticipants, peer) } return readyParticipants diff --git a/tss/ecdsa/signing/signing_test.go b/tss/ecdsa/signing/signing_test.go index 1ebc6e70..68e327ac 100644 --- a/tss/ecdsa/signing/signing_test.go +++ b/tss/ecdsa/signing/signing_test.go @@ -46,7 +46,7 @@ func (s *SigningTestSuite) Test_ValidSigningProcess() { msgBytes := []byte("Message") msg := big.NewInt(0) msg.SetBytes(msgBytes) - signing, err := signing.NewSigning(msg, "signing1", host, &communication, fetcher) + signing, err := signing.NewSigning(msg, "signing1", "signing1", host, &communication, fetcher) if err != nil { panic(err) } @@ -56,14 +56,14 @@ func (s *SigningTestSuite) Test_ValidSigningProcess() { } tsstest.SetupCommunication(communicationMap) - resultChn := make(chan interface{}) + resultChn := make(chan interface{}, 2) ctx, cancel := context.WithCancel(context.Background()) pool := pool.New().WithContext(ctx) for i, coordinator := range coordinators { coordinator := coordinator pool.Go(func(ctx context.Context) error { - return coordinator.Execute(ctx, processes[i], resultChn) + return coordinator.Execute(ctx, []tss.TssProcess{processes[i]}, resultChn) }) } @@ -96,7 +96,7 @@ func (s *SigningTestSuite) Test_SigningTimeout() { msgBytes := []byte("Message") msg := big.NewInt(0) msg.SetBytes(msgBytes) - signing, err := signing.NewSigning(msg, "signing2", host, &communication, fetcher) + signing, err := signing.NewSigning(msg, "signing2", "signing2", host, &communication, fetcher) if err != nil { panic(err) } @@ -112,7 +112,9 @@ func (s *SigningTestSuite) Test_SigningTimeout() { pool := pool.New().WithContext(context.Background()) for i, coordinator := range coordinators { coordinator := coordinator - pool.Go(func(ctx context.Context) error { return coordinator.Execute(ctx, processes[i], resultChn) }) + pool.Go(func(ctx context.Context) error { + return coordinator.Execute(ctx, []tss.TssProcess{processes[i]}, resultChn) + }) } err := pool.Wait() @@ -140,8 +142,8 @@ func (s *SigningTestSuite) Test_PendingProcessExists() { s.MockECDSAStorer.EXPECT().UnlockKeyshare().AnyTimes() pool := pool.New().WithContext(context.Background()).WithCancelOnError() for i, coordinator := range coordinators { - pool.Go(func(ctx context.Context) error { return coordinator.Execute(ctx, processes[i], nil) }) - pool.Go(func(ctx context.Context) error { return coordinator.Execute(ctx, processes[i], nil) }) + pool.Go(func(ctx context.Context) error { return coordinator.Execute(ctx, []tss.TssProcess{processes[i]}, nil) }) + pool.Go(func(ctx context.Context) error { return coordinator.Execute(ctx, []tss.TssProcess{processes[i]}, nil) }) } err := pool.Wait() diff --git a/tss/frost/common/base.go b/tss/frost/common/base.go index 21a0a458..5ed4362c 100644 --- a/tss/frost/common/base.go +++ b/tss/frost/common/base.go @@ -108,7 +108,7 @@ func (k *BaseFrostTss) BroadcastPeers(msg *protocol.Message) ([]peer.ID, error) return k.Peers, nil } else { if string(msg.To) == "" { - return []peer.ID{}, nil + return k.Peers, nil } p, err := peer.Decode(string(msg.To)) diff --git a/tss/frost/common/util.go b/tss/frost/common/util.go index 135c3438..22de1fdb 100644 --- a/tss/frost/common/util.go +++ b/tss/frost/common/util.go @@ -4,19 +4,16 @@ package common import ( - "sort" - mapset "github.com/deckarep/golang-set/v2" "github.com/libp2p/go-libp2p/core/peer" "github.com/taurusgroup/multi-party-sig/pkg/party" ) func PartyIDSFromPeers(peers peer.IDSlice) []party.ID { - sort.Sort(peers) peerSet := mapset.NewSet[peer.ID](peers...) idSlice := make([]party.ID, len(peerSet.ToSlice())) for i, peer := range peerSet.ToSlice() { - idSlice[i] = party.ID(peer.Pretty()) + idSlice[i] = party.ID(peer.String()) } return idSlice } diff --git a/tss/frost/keygen/keygen.go b/tss/frost/keygen/keygen.go index e297b4ac..fbd9801d 100644 --- a/tss/frost/keygen/keygen.go +++ b/tss/frost/keygen/keygen.go @@ -76,7 +76,7 @@ func (k *Keygen) Run( var err error k.Handler, err = protocol.NewMultiHandler( frost.KeygenTaproot( - party.ID(k.Host.ID().Pretty()), + party.ID(k.Host.ID().String()), common.PartyIDSFromPeers(append(k.Host.Peerstore().Peers(), k.Host.ID())), k.threshold), []byte(k.SessionID())) @@ -101,12 +101,12 @@ func (k *Keygen) Stop() { // Ready returns true if all parties from the peerstore are ready. // Error is returned if excluded peers exist as we need all peers to participate // in keygen process. -func (k *Keygen) Ready(readyMap map[peer.ID]bool, excludedPeers []peer.ID) (bool, error) { +func (k *Keygen) Ready(readyPeers []peer.ID, excludedPeers []peer.ID) (bool, error) { if len(excludedPeers) > 0 { return false, errors.New("error") } - return len(readyMap) == len(k.Host.Peerstore().Peers()), nil + return len(readyPeers) == len(k.Host.Peerstore().Peers()), nil } // ValidCoordinators returns all peers in peerstore @@ -114,7 +114,7 @@ func (k *Keygen) ValidCoordinators() []peer.ID { return k.Peers } -func (k *Keygen) StartParams(readyMap map[peer.ID]bool) []byte { +func (k *Keygen) StartParams(readyPeers []peer.ID) []byte { return []byte{} } diff --git a/tss/frost/keygen/keygen_test.go b/tss/frost/keygen/keygen_test.go index 6d307630..3b80fe44 100644 --- a/tss/frost/keygen/keygen_test.go +++ b/tss/frost/keygen/keygen_test.go @@ -49,7 +49,7 @@ func (s *KeygenTestSuite) Test_ValidKeygenProcess() { pool := pool.New().WithContext(context.Background()).WithCancelOnError() for i, coordinator := range coordinators { - pool.Go(func(ctx context.Context) error { return coordinator.Execute(ctx, processes[i], nil) }) + pool.Go(func(ctx context.Context) error { return coordinator.Execute(ctx, []tss.TssProcess{processes[i]}, nil) }) } err := pool.Wait() diff --git a/tss/frost/resharing/resharing.go b/tss/frost/resharing/resharing.go index 964b639a..2810d422 100644 --- a/tss/frost/resharing/resharing.go +++ b/tss/frost/resharing/resharing.go @@ -57,6 +57,7 @@ func NewResharing( }, } } + key.Key.Threshold = threshold return &Resharing{ BaseFrostTss: common.BaseFrostTss{ @@ -110,22 +111,22 @@ func (r *Resharing) Run( // Stop ends all subscriptions created when starting the tss process and unlocks keyshare. func (r *Resharing) Stop() { - log.Info().Str("sessionID", r.SessionID()).Msgf("Stopping tss process.") + r.Log.Info().Msgf("Stopping tss process.") r.Communication.UnSubscribe(r.subscriptionID) r.storer.UnlockKeyshare() r.Cancel() } // Ready returns true if all parties from peerstore are ready -func (r *Resharing) Ready(readyMap map[peer.ID]bool, excludedPeers []peer.ID) (bool, error) { - return len(readyMap) == len(r.Host.Peerstore().Peers()), nil +func (r *Resharing) Ready(readyPeers []peer.ID, excludedPeers []peer.ID) (bool, error) { + return len(readyPeers) == len(r.Host.Peerstore().Peers()), nil } func (r *Resharing) ValidCoordinators() []peer.ID { return r.key.Peers } -func (r *Resharing) StartParams(readyMap map[peer.ID]bool) []byte { +func (r *Resharing) StartParams(readyPeers []peer.ID) []byte { return r.key.Key.PublicKey } diff --git a/tss/frost/resharing/resharing_test.go b/tss/frost/resharing/resharing_test.go index 7657db2c..177b3896 100644 --- a/tss/frost/resharing/resharing_test.go +++ b/tss/frost/resharing/resharing_test.go @@ -68,7 +68,56 @@ func (s *ResharingTestSuite) Test_ValidResharingProcess_OldAndNewSubset() { resultChn := make(chan interface{}) pool := pool.New().WithContext(context.Background()).WithCancelOnError() for i, coordinator := range coordinators { - pool.Go(func(ctx context.Context) error { return coordinator.Execute(ctx, processes[i], resultChn) }) + pool.Go(func(ctx context.Context) error { + return coordinator.Execute(ctx, []tss.TssProcess{processes[i]}, resultChn) + }) + } + + err := pool.Wait() + s.Nil(err) +} + +func (s *ResharingTestSuite) Test_ValidResharingProcess_RemovePeer() { + communicationMap := make(map[peer.ID]*tsstest.TestCommunication) + coordinators := []*tss.Coordinator{} + processes := []tss.TssProcess{} + + hosts := []host.Host{} + for i := 0; i < s.PartyNumber-1; i++ { + host, _ := tsstest.NewHost(i) + hosts = append(hosts, host) + } + for _, host := range hosts { + for _, peer := range hosts { + host.Peerstore().AddAddr(peer.ID(), peer.Addrs()[0], peerstore.PermanentAddrTTL) + } + } + + for i, host := range hosts { + communication := tsstest.TestCommunication{ + Host: host, + Subscriptions: make(map[comm.SubscriptionID]chan *comm.WrappedMessage), + } + communicationMap[host.ID()] = &communication + storer := keyshare.NewFrostKeyshareStore(fmt.Sprintf("../../test/keyshares/%d-frost.keyshare", i)) + share, err := storer.GetKeyshare() + s.MockFrostStorer.EXPECT().LockKeyshare() + s.MockFrostStorer.EXPECT().UnlockKeyshare() + s.MockFrostStorer.EXPECT().GetKeyshare().Return(share, err) + s.MockFrostStorer.EXPECT().StoreKeyshare(gomock.Any()).Return(nil) + resharing := resharing.NewResharing("resharing2", 1, host, &communication, s.MockFrostStorer) + electorFactory := elector.NewCoordinatorElectorFactory(host, s.BullyConfig) + coordinators = append(coordinators, tss.NewCoordinator(host, &communication, electorFactory)) + processes = append(processes, resharing) + } + tsstest.SetupCommunication(communicationMap) + + resultChn := make(chan interface{}) + pool := pool.New().WithContext(context.Background()).WithCancelOnError() + for i, coordinator := range coordinators { + pool.Go(func(ctx context.Context) error { + return coordinator.Execute(ctx, []tss.TssProcess{processes[i]}, resultChn) + }) } err := pool.Wait() diff --git a/tss/frost/signing/signing.go b/tss/frost/signing/signing.go index b1d3544c..40699999 100644 --- a/tss/frost/signing/signing.go +++ b/tss/frost/signing/signing.go @@ -7,7 +7,6 @@ import ( "context" "encoding/hex" "encoding/json" - "math/big" errors "github.com/ChainSafe/sygma-relayer/tss" "github.com/binance-chain/tss-lib/tss" @@ -43,15 +42,16 @@ type Signing struct { id int coordinator bool key keyshare.FrostKeyshare - msg *big.Int + msg []byte resultChn chan interface{} subscriptionID comm.SubscriptionID } func NewSigning( id int, - msg *big.Int, + msg []byte, tweak string, + messageID string, sessionID string, host host.Host, comm comm.Communication, @@ -85,7 +85,7 @@ func NewSigning( Communication: comm, Peers: key.Peers, SID: sessionID, - Log: log.With().Str("SessionID", sessionID).Str("Process", "signing").Logger(), + Log: log.With().Str("SessionID", sessionID).Str("messageID", messageID).Str("Process", "signing").Logger(), Cancel: func() {}, Done: make(chan bool), }, @@ -122,7 +122,7 @@ func (s *Signing) Run( frost.SignTaproot( s.key.Key, common.PartyIDSFromPeers(peerSubset), - s.msg.Bytes(), + s.msg, ), []byte(s.SessionID())) if err != nil { @@ -135,21 +135,21 @@ func (s *Signing) Run( p.Go(func(ctx context.Context) error { return s.processEndMessage(ctx) }) p.Go(func(ctx context.Context) error { return s.ProcessOutboundMessages(ctx, outChn, comm.TssKeySignMsg) }) - s.Log.Info().Msgf("Started signing process") + s.Log.Info().Msgf("Started signing process for message %s", hex.EncodeToString(s.msg)) return p.Wait() } // Stop ends all subscriptions created when starting the tss process. func (s *Signing) Stop() { - log.Info().Str("sessionID", s.SessionID()).Msgf("Stopping tss process.") + s.Log.Info().Msgf("Stopping tss process.") s.Communication.UnSubscribe(s.subscriptionID) s.Cancel() } // Ready returns true if threshold+1 parties are ready to start the signing process. -func (s *Signing) Ready(readyMap map[peer.ID]bool, excludedPeers []peer.ID) (bool, error) { - readyMap = s.readyParticipants(readyMap) - return len(readyMap) == s.key.Threshold+1, nil +func (s *Signing) Ready(readyPeers []peer.ID, excludedPeers []peer.ID) (bool, error) { + readyPeers = s.readyParticipants(readyPeers) + return len(readyPeers) == s.key.Threshold+1, nil } // ValidCoordinators returns only peers that have a valid keyshare @@ -160,12 +160,10 @@ func (s *Signing) ValidCoordinators() []peer.ID { // StartParams returns peer subset for this tss process. It is calculated // by sorting hashes of peer IDs and session ID and chosing ready peers alphabetically // until threshold is satisfied. -func (s *Signing) StartParams(readyMap map[peer.ID]bool) []byte { - readyMap = s.readyParticipants(readyMap) +func (s *Signing) StartParams(readyPeers []peer.ID) []byte { + readyPeers = s.readyParticipants(readyPeers) peers := []peer.ID{} - for peer := range readyMap { - peers = append(peers, peer) - } + peers = append(peers, readyPeers...) sortedPeers := util.SortPeersForSession(peers, s.SessionID()) peerSubset := []peer.ID{} @@ -221,18 +219,15 @@ func (s *Signing) processEndMessage(ctx context.Context) error { } // readyParticipants returns all ready peers that contain a valid key share -func (s *Signing) readyParticipants(readyMap map[peer.ID]bool) map[peer.ID]bool { - readyParticipants := make(map[peer.ID]bool) - for peer, ready := range readyMap { - if !ready { - continue - } +func (s *Signing) readyParticipants(readyPeers []peer.ID) []peer.ID { + readyParticipants := make([]peer.ID, 0) + for _, peer := range readyPeers { if !slices.Contains(s.key.Peers, peer) { continue } - readyParticipants[peer] = true + readyParticipants = append(readyParticipants, peer) } return readyParticipants diff --git a/tss/frost/signing/signing_test.go b/tss/frost/signing/signing_test.go index b7feb435..d90178fc 100644 --- a/tss/frost/signing/signing_test.go +++ b/tss/frost/signing/signing_test.go @@ -7,7 +7,6 @@ import ( "context" "encoding/hex" "fmt" - "math/big" "testing" "time" @@ -50,8 +49,6 @@ func (s *SigningTestSuite) Test_ValidSigningProcess() { s.Nil(err) msgBytes := []byte("Message") - msg := big.NewInt(0) - msg.SetBytes(msgBytes) for i, host := range s.Hosts { communication := tsstest.TestCommunication{ Host: host, @@ -60,7 +57,7 @@ func (s *SigningTestSuite) Test_ValidSigningProcess() { communicationMap[host.ID()] = &communication fetcher := keyshare.NewFrostKeyshareStore(fmt.Sprintf("../../test/keyshares/%d-frost.keyshare", i)) - signing, err := signing.NewSigning(1, msg, tweak, "signing1", host, &communication, fetcher) + signing, err := signing.NewSigning(1, msgBytes, tweak, "signing1", "signing1", host, &communication, fetcher) if err != nil { panic(err) } @@ -70,14 +67,14 @@ func (s *SigningTestSuite) Test_ValidSigningProcess() { } tsstest.SetupCommunication(communicationMap) - resultChn := make(chan interface{}) + resultChn := make(chan interface{}, 2) ctx, cancel := context.WithCancel(context.Background()) pool := pool.New().WithContext(ctx) for i, coordinator := range coordinators { coordinator := coordinator pool.Go(func(ctx context.Context) error { - return coordinator.Execute(ctx, processes[i], resultChn) + return coordinator.Execute(ctx, []tss.TssProcess{processes[i]}, resultChn) }) } @@ -85,13 +82,79 @@ func (s *SigningTestSuite) Test_ValidSigningProcess() { sig2 := <-resultChn tSig1 := sig1.(signing.Signature) tSig2 := sig2.(signing.Signature) - s.Equal(tweakedKeyshare.PublicKey.Verify(tSig1.Signature, msg.Bytes()), true) - s.Equal(tweakedKeyshare.PublicKey.Verify(tSig2.Signature, msg.Bytes()), true) + s.Equal(tweakedKeyshare.PublicKey.Verify(tSig1.Signature, msgBytes), true) + s.Equal(tweakedKeyshare.PublicKey.Verify(tSig2.Signature, msgBytes), true) cancel() err = pool.Wait() s.Nil(err) } +func (s *SigningTestSuite) Test_MultipleProcesses() { + communicationMap := make(map[peer.ID]*tsstest.TestCommunication) + coordinators := []*tss.Coordinator{} + processes := [][]tss.TssProcess{} + + tweak := "c82aa6ae534bb28aaafeb3660c31d6a52e187d8f05d48bb6bdb9b733a9b42212" + tweakBytes, err := hex.DecodeString(tweak) + s.Nil(err) + h := &curve.Secp256k1Scalar{} + err = h.UnmarshalBinary(tweakBytes) + s.Nil(err) + + msgBytes := []byte("Message") + for i, host := range s.Hosts { + communication := tsstest.TestCommunication{ + Host: host, + Subscriptions: make(map[comm.SubscriptionID]chan *comm.WrappedMessage), + } + communicationMap[host.ID()] = &communication + fetcher := keyshare.NewFrostKeyshareStore(fmt.Sprintf("../../test/keyshares/%d-frost.keyshare", i)) + + signing1, err := signing.NewSigning(1, msgBytes, tweak, "signing1", "signing1", host, &communication, fetcher) + if err != nil { + panic(err) + } + signing2, err := signing.NewSigning(1, msgBytes, tweak, "signing1", "signing2", host, &communication, fetcher) + if err != nil { + panic(err) + } + signing3, err := signing.NewSigning(1, msgBytes, tweak, "signing1", "signing3", host, &communication, fetcher) + if err != nil { + panic(err) + } + electorFactory := elector.NewCoordinatorElectorFactory(host, s.BullyConfig) + coordinator := tss.NewCoordinator(host, &communication, electorFactory) + coordinators = append(coordinators, coordinator) + processes = append(processes, []tss.TssProcess{signing1, signing2, signing3}) + } + tsstest.SetupCommunication(communicationMap) + + resultChn := make(chan interface{}, 6) + ctx, cancel := context.WithCancel(context.Background()) + pool := pool.New().WithContext(ctx) + for i, coordinator := range coordinators { + coordinator := coordinator + + pool.Go(func(ctx context.Context) error { + return coordinator.Execute(ctx, processes[i], resultChn) + }) + } + + results := make([]signing.Signature, 6) + i := 0 + for result := range resultChn { + sig := result.(signing.Signature) + results[i] = sig + i++ + if i == 5 { + break + } + } + err = pool.Wait() + s.NotNil(err) + cancel() +} + func (s *SigningTestSuite) Test_ProcessTimeout() { communicationMap := make(map[peer.ID]*tsstest.TestCommunication) coordinators := []*tss.Coordinator{} @@ -105,8 +168,6 @@ func (s *SigningTestSuite) Test_ProcessTimeout() { s.Nil(err) msgBytes := []byte("Message") - msg := big.NewInt(0) - msg.SetBytes(msgBytes) for i, host := range s.Hosts { communication := tsstest.TestCommunication{ Host: host, @@ -115,7 +176,7 @@ func (s *SigningTestSuite) Test_ProcessTimeout() { communicationMap[host.ID()] = &communication fetcher := keyshare.NewFrostKeyshareStore(fmt.Sprintf("../../test/keyshares/%d-frost.keyshare", i)) - signing, err := signing.NewSigning(1, msg, tweak, "signing1", host, &communication, fetcher) + signing, err := signing.NewSigning(1, msgBytes, tweak, "signing1", "signing1", host, &communication, fetcher) if err != nil { panic(err) } @@ -134,7 +195,7 @@ func (s *SigningTestSuite) Test_ProcessTimeout() { for i, coordinator := range coordinators { coordinator := coordinator pool.Go(func(ctx context.Context) error { - return coordinator.Execute(ctx, processes[i], resultChn) + return coordinator.Execute(ctx, []tss.TssProcess{processes[i]}, resultChn) }) } diff --git a/tss/mock/coordinator.go b/tss/mock/coordinator.go index 66a60e17..8268dd2b 100644 --- a/tss/mock/coordinator.go +++ b/tss/mock/coordinator.go @@ -36,18 +36,18 @@ func (m *MockTssProcess) EXPECT() *MockTssProcessMockRecorder { } // Ready mocks base method. -func (m *MockTssProcess) Ready(readyMap map[peer.ID]bool, excludedPeers []peer.ID) (bool, error) { +func (m *MockTssProcess) Ready(readyPeers, excludedPeers []peer.ID) (bool, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Ready", readyMap, excludedPeers) + ret := m.ctrl.Call(m, "Ready", readyPeers, excludedPeers) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(error) return ret0, ret1 } // Ready indicates an expected call of Ready. -func (mr *MockTssProcessMockRecorder) Ready(readyMap, excludedPeers interface{}) *gomock.Call { +func (mr *MockTssProcessMockRecorder) Ready(readyPeers, excludedPeers interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ready", reflect.TypeOf((*MockTssProcess)(nil).Ready), readyMap, excludedPeers) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ready", reflect.TypeOf((*MockTssProcess)(nil).Ready), readyPeers, excludedPeers) } // Retryable mocks base method. @@ -93,17 +93,17 @@ func (mr *MockTssProcessMockRecorder) SessionID() *gomock.Call { } // StartParams mocks base method. -func (m *MockTssProcess) StartParams(readyMap map[peer.ID]bool) []byte { +func (m *MockTssProcess) StartParams(readyPeers []peer.ID) []byte { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StartParams", readyMap) + ret := m.ctrl.Call(m, "StartParams", readyPeers) ret0, _ := ret[0].([]byte) return ret0 } // StartParams indicates an expected call of StartParams. -func (mr *MockTssProcessMockRecorder) StartParams(readyMap interface{}) *gomock.Call { +func (mr *MockTssProcessMockRecorder) StartParams(readyPeers interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartParams", reflect.TypeOf((*MockTssProcess)(nil).StartParams), readyMap) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartParams", reflect.TypeOf((*MockTssProcess)(nil).StartParams), readyPeers) } // Stop mocks base method. diff --git a/tss/test/communication.go b/tss/test/communication.go index b01faeca..0f694408 100644 --- a/tss/test/communication.go +++ b/tss/test/communication.go @@ -6,6 +6,7 @@ package tsstest import ( "fmt" "sync" + "time" "github.com/ChainSafe/sygma-relayer/comm" "github.com/libp2p/go-libp2p/core/host" @@ -35,6 +36,8 @@ func (tc *TestCommunication) Broadcast( Payload: msg, From: tc.Host.ID(), } + + time.Sleep(100 * time.Millisecond) for _, peer := range peers { if tc.PeerCommunications[peer.Pretty()] == nil { continue