From 1af1141173d59826c0f0559b49303b5256156de1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toni=20Ram=C3=ADrez?= Date: Tue, 17 Sep 2024 13:06:56 +0200 Subject: [PATCH] fix: linter --- sequencesender/ethtx.go | 33 +++++++++++-------- sequencesender/rpc.go | 13 ++++---- .../seqsendertypes/rpcbatch/rpcbatch.go | 17 ++++++++-- sequencesender/sequencesender.go | 29 +++++++++++----- 4 files changed, 62 insertions(+), 30 deletions(-) diff --git a/sequencesender/ethtx.go b/sequencesender/ethtx.go index 6d3f4053..054fb64f 100644 --- a/sequencesender/ethtx.go +++ b/sequencesender/ethtx.go @@ -36,7 +36,8 @@ type ethTxAdditionalData struct { } // sendTx adds transaction to the ethTxManager to send it to L1 -func (s *SequenceSender) sendTx(ctx context.Context, resend bool, txOldHash *common.Hash, to *common.Address, fromBatch uint64, toBatch uint64, data []byte, gas uint64) error { +func (s *SequenceSender) sendTx(ctx context.Context, resend bool, txOldHash *common.Hash, to *common.Address, + fromBatch uint64, toBatch uint64, data []byte, gas uint64) error { // Params if new tx to send or resend a previous tx var paramTo *common.Address var paramNonce *uint64 @@ -164,7 +165,7 @@ func (s *SequenceSender) purgeEthTx(ctx context.Context) { } // syncEthTxResults syncs results from L1 for transactions in the memory structure -func (s *SequenceSender) syncEthTxResults(ctx context.Context) (uint64, error) { +func (s *SequenceSender) syncEthTxResults(ctx context.Context) (uint64, error) { //nolint:unparam s.mutexEthTx.Lock() var txPending uint64 var txSync uint64 @@ -240,7 +241,9 @@ func (s *SequenceSender) syncAllEthTxResults(ctx context.Context) error { } // copyTxData copies tx data in the internal structure -func (s *SequenceSender) copyTxData(txHash common.Hash, txData []byte, txsResults map[common.Hash]ethtxmanager.TxResult) { +func (s *SequenceSender) copyTxData( + txHash common.Hash, txData []byte, txsResults map[common.Hash]ethtxmanager.TxResult, +) { s.ethTxData[txHash] = make([]byte, len(txData)) copy(s.ethTxData[txHash], txData) @@ -264,12 +267,14 @@ func (s *SequenceSender) updateEthTxResult(txData *ethTxData, txResult ethtxmana if txData.Status != txResult.Status.String() { log.Infof("update transaction %v to state %s", txResult.ID, txResult.Status.String()) txData.StatusTimestamp = time.Now() - stTrans := txData.StatusTimestamp.Format("2006-01-02T15:04:05.000-07:00") + ", " + txData.Status + ", " + txResult.Status.String() + stTrans := txData.StatusTimestamp.Format("2006-01-02T15:04:05.000-07:00") + + ", " + txData.Status + ", " + txResult.Status.String() txData.Status = txResult.Status.String() txData.StateHistory = append(txData.StateHistory, stTrans) // Manage according to the state - statusConsolidated := txData.Status == ethtxmanager.MonitoredTxStatusSafe.String() || txData.Status == ethtxmanager.MonitoredTxStatusFinalized.String() + statusConsolidated := txData.Status == ethtxmanager.MonitoredTxStatusSafe.String() || + txData.Status == ethtxmanager.MonitoredTxStatusFinalized.String() if txData.Status == ethtxmanager.MonitoredTxStatusFailed.String() { s.logFatalf("transaction %v result failed!") } else if statusConsolidated && txData.ToBatch >= s.latestVirtualBatchNumber { @@ -292,24 +297,26 @@ func (s *SequenceSender) updateEthTxResult(txData *ethTxData, txResult ethtxmana func (s *SequenceSender) getResultAndUpdateEthTx(ctx context.Context, txHash common.Hash) error { txData, exists := s.ethTransactions[txHash] if !exists { - log.Infof("transaction %v not found in memory", txHash) - return nil + s.logger.Errorf("transaction %v not found in memory", txHash) + return errors.New("transaction not found in memory structure") } txResult, err := s.ethTxManager.Result(ctx, txHash) - if err == ethtxmanager.ErrNotFound { - log.Infof("transaction %v does not exist in ethtxmanager. Marking it", txHash) + switch { + case errors.Is(err, ethtxmanager.ErrNotFound): + s.logger.Infof("transaction %v does not exist in ethtxmanager. Marking it", txHash) txData.OnMonitor = false // Resend tx errSend := s.sendTx(ctx, true, &txHash, nil, 0, 0, nil, txData.Gas) if errSend == nil { txData.OnMonitor = false } - return errSend - } else if err != nil { - log.Errorf("error getting result for tx %v: %v", txHash, err) + + case err != nil: + s.logger.Errorf("error getting result for tx %v: %v", txHash, err) return err - } else { + + default: s.updateEthTxResult(txData, txResult) } diff --git a/sequencesender/rpc.go b/sequencesender/rpc.go index b87ab84a..90fe648d 100644 --- a/sequencesender/rpc.go +++ b/sequencesender/rpc.go @@ -38,24 +38,25 @@ func (s *SequenceSender) getBatchFromRPC(batchNumber uint64) (*rpcbatch.RPCBatch // Check if the response is an error if response.Error != nil { - return nil, fmt.Errorf("error in the response calling zkevm_getBatchByNumber: %v", response.Error) + return nil, fmt.Errorf("error in the response calling zkevm_getBatchByNumber: %w", response.Error) } // Get the batch number from the response hex string err = json.Unmarshal(response.Result, &zkEVMBatchData) if err != nil { - return nil, fmt.Errorf("error unmarshalling the batch from the response calling zkevm_getBatchByNumber: %v", err) + return nil, fmt.Errorf("error unmarshalling the batch from the response calling zkevm_getBatchByNumber: %w", err) } - rpcBatch, err := rpcbatch.New(batchNumber, zkEVMBatchData.Blocks, common.Hex2Bytes(zkEVMBatchData.BatchL2Data), common.HexToHash(zkEVMBatchData.GlobalExitRoot), common.HexToAddress(zkEVMBatchData.Coinbase), zkEVMBatchData.Closed) + rpcBatch, err := rpcbatch.New(batchNumber, zkEVMBatchData.Blocks, common.Hex2Bytes(zkEVMBatchData.BatchL2Data), + common.HexToHash(zkEVMBatchData.GlobalExitRoot), common.HexToAddress(zkEVMBatchData.Coinbase), zkEVMBatchData.Closed) if err != nil { - return nil, fmt.Errorf("error creating the rpc batch: %v", err) + return nil, fmt.Errorf("error creating the rpc batch: %w", err) } if len(zkEVMBatchData.Blocks) > 0 { lastL2BlockTimestamp, err := s.getL2BlockTimestampFromRPC(zkEVMBatchData.Blocks[len(zkEVMBatchData.Blocks)-1]) if err != nil { - return nil, fmt.Errorf("error getting the last l2 block timestamp from the rpc: %v", err) + return nil, fmt.Errorf("error getting the last l2 block timestamp from the rpc: %w", err) } rpcBatch.SetLastL2BLockTimestamp(lastL2BlockTimestamp) } else { @@ -87,7 +88,7 @@ func (s *SequenceSender) getL2BlockTimestampFromRPC(blockHash string) (uint64, e l2Block := zkeEVML2Block{} err = json.Unmarshal(response.Result, &l2Block) if err != nil { - return 0, fmt.Errorf("error unmarshalling the l2 block from the response calling eth_getBlockByHash: %v", err) + return 0, fmt.Errorf("error unmarshalling the l2 block from the response calling eth_getBlockByHash: %w", err) } return new(big.Int).SetBytes(common.FromHex(l2Block.Timestamp)).Uint64(), nil diff --git a/sequencesender/seqsendertypes/rpcbatch/rpcbatch.go b/sequencesender/seqsendertypes/rpcbatch/rpcbatch.go index 32f4d18c..fafc1841 100644 --- a/sequencesender/seqsendertypes/rpcbatch/rpcbatch.go +++ b/sequencesender/seqsendertypes/rpcbatch/rpcbatch.go @@ -18,7 +18,8 @@ type RPCBatch struct { l1InfoTreeIndex uint32 `json:"l1InfoTreeIndex"` } -func New(batchNumber uint64, blockHashes []string, batchL2Data []byte, globalExitRoot common.Hash, coinbase common.Address, closed bool) (*RPCBatch, error) { +func New(batchNumber uint64, blockHashes []string, batchL2Data []byte, globalExitRoot common.Hash, + coinbase common.Address, closed bool) (*RPCBatch, error) { return &RPCBatch{ batchNumber: batchNumber, blockHashes: blockHashes, @@ -110,8 +111,18 @@ func (b *RPCBatch) SetL1InfoTreeIndex(index uint32) { // String func (b *RPCBatch) String() string { - return fmt.Sprintf("Batch/RPC: LastCoinbase: %s, ForcedBatchTimestamp: %d, ForcedGlobalExitRoot: %x, ForcedBlockHashL1: %x, L2Data: %x, LastL2BLockTimestamp: %d, BatchNumber: %d, GlobalExitRoot: %x, L1InfoTreeIndex: %d", - b.LastCoinbase().String(), b.ForcedBatchTimestamp(), b.ForcedGlobalExitRoot().String(), b.ForcedBlockHashL1().String(), b.L2Data(), b.LastL2BLockTimestamp(), b.BatchNumber(), b.GlobalExitRoot().String(), b.L1InfoTreeIndex(), + return fmt.Sprintf( + "Batch/RPC: LastCoinbase: %s, ForcedBatchTimestamp: %d, ForcedGlobalExitRoot: %x, ForcedBlockHashL1: %x"+ + ", L2Data: %x, LastL2BLockTimestamp: %d, BatchNumber: %d, GlobalExitRoot: %x, L1InfoTreeIndex: %d", + b.LastCoinbase().String(), + b.ForcedBatchTimestamp(), + b.ForcedGlobalExitRoot().String(), + b.ForcedBlockHashL1().String(), + b.L2Data(), + b.LastL2BLockTimestamp(), + b.BatchNumber(), + b.GlobalExitRoot().String(), + b.L1InfoTreeIndex(), ) } diff --git a/sequencesender/sequencesender.go b/sequencesender/sequencesender.go index c1378bb9..4005c991 100644 --- a/sequencesender/sequencesender.go +++ b/sequencesender/sequencesender.go @@ -19,6 +19,8 @@ import ( "github.com/ethereum/go-ethereum/common" ) +const ten = 10 + // SequenceSender represents a sequence sender type SequenceSender struct { cfg Config @@ -148,7 +150,7 @@ func (s *SequenceSender) batchRetrieval(ctx context.Context) error { // Try to retrieve batch from RPC rpcBatch, err := s.getBatchFromRPC(currentBatchNumber) if err != nil { - if err == state.ErrNotFound { + if errors.Is(err, ethtxmanager.ErrNotFound) { s.logger.Infof("batch %d not found in RPC", currentBatchNumber) } else { s.logger.Errorf("error getting batch %d from RPC: %v", currentBatchNumber, err) @@ -307,12 +309,21 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { elapsed, waitTime := s.marginTimeElapsed(lastL2BlockTimestamp, lastL1BlockHeader.Time, timeMargin) if !elapsed { - s.logger.Infof("waiting at least %d seconds to send sequences, time difference between last L1 block %d (ts: %d) and last L2 block %d (ts: %d) in the sequence is lower than %d seconds", - waitTime, lastL1BlockHeader.Number, lastL1BlockHeader.Time, lastBatch.BatchNumber(), lastL2BlockTimestamp, timeMargin) + s.logger.Infof("waiting at least %d seconds to send sequences, time difference between last L1 block %d (ts: %d) "+ + "and last L2 block %d (ts: %d) in the sequence is lower than %d seconds", + waitTime, lastL1BlockHeader.Number, lastL1BlockHeader.Time, + lastBatch.BatchNumber(), lastL2BlockTimestamp, timeMargin, + ) time.Sleep(time.Duration(waitTime) * time.Second) } else { - s.logger.Infof("continuing, time difference between last L1 block %d (ts: %d) and last L2 block %d (ts: %d) in the sequence is greater than %d seconds", - lastL1BlockHeader.Number, lastL1BlockHeader.Time, lastBatch.BatchNumber, lastL2BlockTimestamp, timeMargin) + s.logger.Infof("continuing, time difference between last L1 block %d (ts: %d) and last L2 block %d (ts: %d) "+ + "in the sequence is greater than %d seconds", + lastL1BlockHeader.Number, + lastL1BlockHeader.Time, + lastBatch.BatchNumber, + lastL2BlockTimestamp, + timeMargin, + ) break } } @@ -326,11 +337,13 @@ func (s *SequenceSender) tryToSendSequence(ctx context.Context) { // Wait if the time difference is less than L1BlockTimestampMargin if !elapsed { - s.logger.Infof("waiting at least %d seconds to send sequences, time difference between now (ts: %d) and last L2 block %d (ts: %d) in the sequence is lower than %d seconds", + s.logger.Infof("waiting at least %d seconds to send sequences, time difference between now (ts: %d) "+ + "and last L2 block %d (ts: %d) in the sequence is lower than %d seconds", waitTime, currentTime, lastBatch.BatchNumber, lastL2BlockTimestamp, timeMargin) time.Sleep(time.Duration(waitTime) * time.Second) } else { - s.logger.Infof("[SeqSender]sending sequences now, time difference between now (ts: %d) and last L2 block %d (ts: %d) in the sequence is also greater than %d seconds", + s.logger.Infof("sending sequences now, time difference between now (ts: %d) and last L2 block %d (ts: %d) "+ + "in the sequence is also greater than %d seconds", currentTime, lastBatch.BatchNumber, lastL2BlockTimestamp, timeMargin) break } @@ -500,6 +513,6 @@ func (s *SequenceSender) logFatalf(template string, args ...interface{}) { for { s.logger.Errorf(template, args...) s.logger.Errorf("sequence sending stopped.") - time.Sleep(10 * time.Second) + time.Sleep(ten * time.Second) } }