Skip to content

Commit

Permalink
remove l1iforoot from stream (0xPolygonHermez#3064)
Browse files Browse the repository at this point in the history
  • Loading branch information
ToniRamirezM authored Jan 12, 2024
1 parent dac5335 commit 1205c7f
Show file tree
Hide file tree
Showing 6 changed files with 103 additions and 157 deletions.
1 change: 0 additions & 1 deletion sequencer/datastreamer.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ func (f *finalizer) DSSendL2Block(batchNumber uint64, blockResponse *state.Proce
L2BlockNumber: blockResponse.BlockNumber,
Timestamp: int64(blockResponse.Timestamp),
L1BlockHash: blockResponse.BlockHashL1,
L1InfoRoot: blockResponse.BlockInfoRoot,
GlobalExitRoot: blockResponse.GlobalExitRoot,
Coinbase: f.sequencerAddress,
ForkID: uint16(forkID),
Expand Down
1 change: 0 additions & 1 deletion sequencer/sequencer.go
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,6 @@ func (s *Sequencer) sendDataToStreamer() {
Timestamp: l2Block.Timestamp,
L1BlockHash: l2Block.L1BlockHash,
GlobalExitRoot: l2Block.GlobalExitRoot,
L1InfoRoot: l2Block.L1InfoRoot,
Coinbase: l2Block.Coinbase,
ForkID: l2Block.ForkID,
}
Expand Down
239 changes: 97 additions & 142 deletions state/datastream.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@ type DSL2Block struct {
Timestamp int64 // 8 bytes
L1BlockHash common.Hash // 32 bytes
GlobalExitRoot common.Hash // 32 bytes
L1InfoRoot common.Hash // 32 bytes
Coinbase common.Address // 20 bytes
ForkID uint16 // 2 bytes
BlockHash common.Hash // 32 bytes
Expand All @@ -74,7 +73,6 @@ type DSL2BlockStart struct {
Timestamp int64 // 8 bytes
L1BlockHash common.Hash // 32 bytes
GlobalExitRoot common.Hash // 32 bytes
L1InfoRoot common.Hash // 32 bytes
Coinbase common.Address // 20 bytes
ForkID uint16 // 2 bytes
}
Expand All @@ -87,7 +85,6 @@ func (b DSL2BlockStart) Encode() []byte {
bytes = binary.LittleEndian.AppendUint64(bytes, uint64(b.Timestamp))
bytes = append(bytes, b.L1BlockHash.Bytes()...)
bytes = append(bytes, b.GlobalExitRoot.Bytes()...)
bytes = append(bytes, b.L1InfoRoot.Bytes()...)
bytes = append(bytes, b.Coinbase.Bytes()...)
bytes = binary.LittleEndian.AppendUint16(bytes, b.ForkID)
return bytes
Expand All @@ -100,9 +97,8 @@ func (b DSL2BlockStart) Decode(data []byte) DSL2BlockStart {
b.Timestamp = int64(binary.LittleEndian.Uint64(data[16:24]))
b.L1BlockHash = common.BytesToHash(data[24:56])
b.GlobalExitRoot = common.BytesToHash(data[56:88])
b.L1InfoRoot = common.BytesToHash(data[88:120])
b.Coinbase = common.BytesToAddress(data[120:140])
b.ForkID = binary.LittleEndian.Uint16(data[140:142])
b.Coinbase = common.BytesToAddress(data[88:108])
b.ForkID = binary.LittleEndian.Uint16(data[108:110])
return b
}

Expand Down Expand Up @@ -273,7 +269,6 @@ func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.St
BatchNumber: genesisL2Block.BatchNumber,
L2BlockNumber: genesisL2Block.L2BlockNumber,
Timestamp: genesisL2Block.Timestamp,
L1InfoRoot: genesisL2Block.L1InfoRoot,
GlobalExitRoot: genesisL2Block.GlobalExitRoot,
Coinbase: genesisL2Block.Coinbase,
ForkID: genesisL2Block.ForkID,
Expand Down Expand Up @@ -384,28 +379,23 @@ func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.St
currentBatchNumber += limit

for _, batch := range fullBatches {
if len(batch.L2Blocks) == 0 {
// Empty batch
err = streamServer.StartAtomicOp()
if err != nil {
return err
}

bookMark := DSBookMark{
Type: BookMarkTypeBatch,
Value: batch.BatchNumber,
}
err = streamServer.StartAtomicOp()
if err != nil {
return err
}

_, err = streamServer.AddStreamBookmark(bookMark.Encode())
if err != nil {
return err
}
bookMark := DSBookMark{
Type: BookMarkTypeBatch,
Value: batch.BatchNumber,
}

err = streamServer.CommitAtomicOp()
if err != nil {
return err
}
_, err = streamServer.AddStreamBookmark(bookMark.Encode())
if err != nil {
return err
}

if len(batch.L2Blocks) == 0 {
// Empty batch
// Check if there is a GER update
if batch.GlobalExitRoot != currentGER && batch.GlobalExitRoot != (common.Hash{}) {
updateGer := DSUpdateGER{
Expand All @@ -426,150 +416,115 @@ func GenerateDataStreamerFile(ctx context.Context, streamServer *datastreamer.St
if err != nil {
return err
}

err = streamServer.CommitAtomicOp()
if err != nil {
return err
}

currentGER = batch.GlobalExitRoot
}
continue
}

err = streamServer.StartAtomicOp()
if err != nil {
return err
}

bookMark := DSBookMark{
Type: BookMarkTypeBatch,
Value: batch.BatchNumber,
}

_, err = streamServer.AddStreamBookmark(bookMark.Encode())
if err != nil {
return err
}

for blockIndex, l2block := range batch.L2Blocks {
if l2block.L2BlockNumber <= lastAddedL2Block && lastAddedL2Block != 0 {
continue
} else {
lastAddedL2Block = l2block.L2BlockNumber
}

l1BlockHash := common.Hash{}

vb, err := stateDB.GetVirtualBatch(ctx, batch.BatchNumber, nil)
if err != nil {
log.Errorf("Failed getting virtualBatch %d, err: %v", batch.BatchNumber, err)
return err
}
l1InfoRoot := vb.L1InfoRoot

// Get L1 block hash
if l2block.ForkID >= FORKID_ETROG {
isForcedBatch := false
batchRawData := &BatchRawV2{}

if batch.BatchNumber == 1 || batch.ForcedBatchNum != nil {
isForcedBatch = true
} else {
for blockIndex, l2block := range batch.L2Blocks {
if l2block.L2BlockNumber <= lastAddedL2Block && lastAddedL2Block != 0 {
continue
} else {
batchRawData, err = DecodeBatchV2(batch.BatchL2Data)
if err != nil {
log.Errorf("Failed to decode batch data, err: %v", err)
return err
}
lastAddedL2Block = l2block.L2BlockNumber
}

if !isForcedBatch {
// Get current block by index
l2blockRaw := batchRawData.Blocks[blockIndex]
if l2blockRaw.IndexL1InfoTree != 0 {
l1InfoTreeExitRootStorageEntry, err := stateDB.GetL1InfoRootLeafByIndex(ctx, l2blockRaw.IndexL1InfoTree, nil)
l1BlockHash := common.Hash{}

// Get L1 block hash
if l2block.ForkID >= FORKID_ETROG {
isForcedBatch := false
batchRawData := &BatchRawV2{}

if batch.BatchNumber == 1 || batch.ForcedBatchNum != nil {
isForcedBatch = true
} else {
batchRawData, err = DecodeBatchV2(batch.BatchL2Data)
if err != nil {
log.Errorf("Failed to decode batch data, err: %v", err)
return err
}
l1InfoRoot = &l1InfoTreeExitRootStorageEntry.L1InfoTreeRoot
l1BlockHash = l1InfoTreeExitRootStorageEntry.L1InfoTreeLeaf.PreviousBlockHash
}
} else {
// Initial batch must be handled differently
if batch.BatchNumber == 1 {
l1Aux := batch.GlobalExitRoot
l1InfoRoot = &l1Aux
l1BlockHash, err = stateDB.GetVirtualBatchParentHash(ctx, batch.BatchNumber, nil)
if err != nil {
return err

if !isForcedBatch {
// Get current block by index
l2blockRaw := batchRawData.Blocks[blockIndex]
if l2blockRaw.IndexL1InfoTree != 0 {
l1InfoTreeExitRootStorageEntry, err := stateDB.GetL1InfoRootLeafByIndex(ctx, l2blockRaw.IndexL1InfoTree, nil)
if err != nil {
return err
}
l1BlockHash = l1InfoTreeExitRootStorageEntry.L1InfoTreeLeaf.PreviousBlockHash
}
} else {
l1BlockHash, err = stateDB.GetForcedBatchParentHash(ctx, *batch.ForcedBatchNum, nil)
if err != nil {
return err
// Initial batch must be handled differently
if batch.BatchNumber == 1 {
l1BlockHash, err = stateDB.GetVirtualBatchParentHash(ctx, batch.BatchNumber, nil)
if err != nil {
return err
}
} else {
l1BlockHash, err = stateDB.GetForcedBatchParentHash(ctx, *batch.ForcedBatchNum, nil)
if err != nil {
return err
}
}
}
}
}

blockStart := DSL2BlockStart{
BatchNumber: l2block.BatchNumber,
L2BlockNumber: l2block.L2BlockNumber,
Timestamp: l2block.Timestamp,
L1BlockHash: l1BlockHash,
GlobalExitRoot: l2block.GlobalExitRoot,
Coinbase: l2block.Coinbase,
ForkID: l2block.ForkID,
}
if l1InfoRoot != nil {
blockStart.L1InfoRoot = *l1InfoRoot
}
blockStart := DSL2BlockStart{
BatchNumber: l2block.BatchNumber,
L2BlockNumber: l2block.L2BlockNumber,
Timestamp: l2block.Timestamp,
L1BlockHash: l1BlockHash,
GlobalExitRoot: l2block.GlobalExitRoot,
Coinbase: l2block.Coinbase,
ForkID: l2block.ForkID,
}

bookMark = DSBookMark{
Type: BookMarkTypeL2Block,
Value: blockStart.L2BlockNumber,
}
bookMark = DSBookMark{
Type: BookMarkTypeL2Block,
Value: blockStart.L2BlockNumber,
}

_, err = streamServer.AddStreamBookmark(bookMark.Encode())
if err != nil {
return err
}
_, err = streamServer.AddStreamBookmark(bookMark.Encode())
if err != nil {
return err
}

_, err = streamServer.AddStreamEntry(EntryTypeL2BlockStart, blockStart.Encode())
if err != nil {
return err
}
_, err = streamServer.AddStreamEntry(EntryTypeL2BlockStart, blockStart.Encode())
if err != nil {
return err
}

for _, tx := range l2block.Txs {
// Populate intermediate state root
if imStateRoots == nil || (*imStateRoots)[blockStart.L2BlockNumber] == nil {
position := GetSystemSCPosition(l2block.L2BlockNumber)
imStateRoot, err := stateDB.GetStorageAt(ctx, common.HexToAddress(SystemSC), big.NewInt(0).SetBytes(position), l2block.StateRoot)
if err != nil {
return err
}
tx.StateRoot = common.BigToHash(imStateRoot)
} else {
tx.StateRoot = common.BytesToHash((*imStateRoots)[blockStart.L2BlockNumber])
}

for _, tx := range l2block.Txs {
// Populate intermediate state root
if imStateRoots == nil || (*imStateRoots)[blockStart.L2BlockNumber] == nil {
position := GetSystemSCPosition(l2block.L2BlockNumber)
imStateRoot, err := stateDB.GetStorageAt(ctx, common.HexToAddress(SystemSC), big.NewInt(0).SetBytes(position), l2block.StateRoot)
entry, err = streamServer.AddStreamEntry(EntryTypeL2Tx, tx.Encode())
if err != nil {
return err
}
tx.StateRoot = common.BigToHash(imStateRoot)
} else {
tx.StateRoot = common.BytesToHash((*imStateRoots)[blockStart.L2BlockNumber])
}

entry, err = streamServer.AddStreamEntry(EntryTypeL2Tx, tx.Encode())
blockEnd := DSL2BlockEnd{
L2BlockNumber: l2block.L2BlockNumber,
BlockHash: l2block.BlockHash,
StateRoot: l2block.StateRoot,
}

_, err = streamServer.AddStreamEntry(EntryTypeL2BlockEnd, blockEnd.Encode())
if err != nil {
return err
}
currentGER = l2block.GlobalExitRoot
}

blockEnd := DSL2BlockEnd{
L2BlockNumber: l2block.L2BlockNumber,
BlockHash: l2block.BlockHash,
StateRoot: l2block.StateRoot,
}

_, err = streamServer.AddStreamEntry(EntryTypeL2BlockEnd, blockEnd.Encode())
if err != nil {
return err
}
currentGER = l2block.GlobalExitRoot
}
// Commit at the end of each batch group
err = streamServer.CommitAtomicOp()
Expand Down
7 changes: 2 additions & 5 deletions state/pgstatestorage/datastream.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ import (

// GetDSGenesisBlock returns the genesis block
func (p *PostgresStorage) GetDSGenesisBlock(ctx context.Context, dbTx pgx.Tx) (*state.DSL2Block, error) {
const genesisL2BlockSQL = `SELECT 0 as batch_num, l2b.block_num, l2b.received_at, '0x0000000000000000000000000000000000000000' as global_exit_root, l2b.header->>'miner' AS coinbase, 0 as fork_id, l2b.block_hash, l2b.state_root, '' AS info_root
const genesisL2BlockSQL = `SELECT 0 as batch_num, l2b.block_num, l2b.received_at, '0x0000000000000000000000000000000000000000' as global_exit_root, l2b.header->>'miner' AS coinbase, 0 as fork_id, l2b.block_hash, l2b.state_root
FROM state.l2block l2b
WHERE l2b.block_num = 0`

Expand All @@ -29,7 +29,7 @@ func (p *PostgresStorage) GetDSGenesisBlock(ctx context.Context, dbTx pgx.Tx) (*

// GetDSL2Blocks returns the L2 blocks
func (p *PostgresStorage) GetDSL2Blocks(ctx context.Context, firstBatchNumber, lastBatchNumber uint64, dbTx pgx.Tx) ([]*state.DSL2Block, error) {
const l2BlockSQL = `SELECT l2b.batch_num, l2b.block_num, l2b.received_at, b.global_exit_root, l2b.header->>'miner' AS coinbase, f.fork_id, l2b.block_hash, l2b.state_root, coalesce(l2b.header->>'blockInfoRoot', '') AS info_root
const l2BlockSQL = `SELECT l2b.batch_num, l2b.block_num, l2b.received_at, b.global_exit_root, l2b.header->>'miner' AS coinbase, f.fork_id, l2b.block_hash, l2b.state_root
FROM state.l2block l2b, state.batch b, state.fork_id f
WHERE l2b.batch_num BETWEEN $1 AND $2 AND l2b.batch_num = b.batch_num AND l2b.batch_num between f.from_batch_num AND f.to_batch_num
ORDER BY l2b.block_num ASC`
Expand Down Expand Up @@ -61,7 +61,6 @@ func scanL2Block(row pgx.Row) (*state.DSL2Block, error) {
timestamp time.Time
blockHashStr string
stateRootStr string
infoRootStr string
)
if err := row.Scan(
&l2Block.BatchNumber,
Expand All @@ -72,12 +71,10 @@ func scanL2Block(row pgx.Row) (*state.DSL2Block, error) {
&l2Block.ForkID,
&blockHashStr,
&stateRootStr,
&infoRootStr,
); err != nil {
return &l2Block, err
}
l2Block.GlobalExitRoot = common.HexToHash(gerStr)
l2Block.L1InfoRoot = common.HexToHash(infoRootStr)
l2Block.Coinbase = common.HexToAddress(coinbaseStr)
l2Block.Timestamp = timestamp.Unix()
l2Block.BlockHash = common.HexToHash(blockHashStr)
Expand Down
6 changes: 2 additions & 4 deletions state/test/datastream_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,7 @@ func TestL2BlockStartEncode(t *testing.T) {
Timestamp: 3, // 8 bytes
L1BlockHash: common.HexToHash("0x04"), // 32 bytes
GlobalExitRoot: common.HexToHash("0x05"), // 32 bytes
L1InfoRoot: common.HexToHash("0x06"), // 32 bytes
Coinbase: common.HexToAddress("0x07"), // 20 bytes
Coinbase: common.HexToAddress("0x06"), // 20 bytes
ForkID: 5,
}

Expand All @@ -28,8 +27,7 @@ func TestL2BlockStartEncode(t *testing.T) {
3, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6,
5, 0}

assert.Equal(t, expected, encoded)
Expand Down
Loading

0 comments on commit 1205c7f

Please sign in to comment.