sharding: sync with master

Former-commit-id: 53c92a9cbae6c604d0edf429e0fd0c8d22f70825 [formerly 02d09912b46393fd29930ae7d04ecf7ff6e2861c]
Former-commit-id: c1d5b8f21d9f7bc35820a231fa74a87456fb6360
This commit is contained in:
Raul Jordan
2018-06-06 11:06:01 -04:00
4 changed files with 435 additions and 173 deletions

View File

@@ -112,61 +112,65 @@ func (c *Collation) CalculateChunkRoot() {
c.header.data.ChunkRoot = &chunkRoot
}
// ConvertBackToTx converts raw blobs back to their original transactions.
func ConvertBackToTx(rawBlobs []utils.RawBlob) ([]*types.Transaction, error) {
blobs := make([]*types.Transaction, len(rawBlobs))
for i := 0; i < len(rawBlobs); i++ {
blobs[i] = types.NewTransaction(0, common.HexToAddress("0x"), nil, 0, nil, nil)
err := utils.ConvertFromRawBlob(&rawBlobs[i], blobs[i])
if err != nil {
return nil, fmt.Errorf("Creation of transactions from raw blobs failed: %v", err)
}
}
return blobs, nil
}
// SerializeTxToBlob method serializes the input tx
// and returns the blobs in byte array.
func SerializeTxToBlob(txs []*types.Transaction) ([]byte, error) {
// convertTxToRawBlob transactions into RawBlobs. This step encodes transactions uses RLP encoding
func convertTxToRawBlob(txs []*types.Transaction) ([]*utils.RawBlob, error) {
blobs := make([]*utils.RawBlob, len(txs))
for i := 0; i < len(txs); i++ {
err := error(nil)
blobs[i], err = utils.NewRawBlob(txs[i], false)
if err != nil {
return nil, fmt.Errorf("%v", err)
return nil, err
}
}
return blobs, nil
}
// SerializeTxToBlob converts transactions using two steps. First performs RLP encoding, and then blob encoding.
func SerializeTxToBlob(txs []*types.Transaction) ([]byte, error) {
blobs, err := convertTxToRawBlob(txs)
if err != nil {
return nil, err
}
serializedTx, err := utils.Serialize(blobs)
if err != nil {
return nil, fmt.Errorf("%v", err)
return nil, err
}
if int64(len(serializedTx)) > collationSizelimit {
return nil, fmt.Errorf("The serialized body exceeded the collation size limit: %v", serializedTx)
return nil, fmt.Errorf("the serialized body size %d exceeded the collation size limit %d", len(serializedTx), collationSizelimit)
}
return serializedTx, nil
}
// convertRawBlobToTx converts raw blobs back to their original transactions.
func convertRawBlobToTx(rawBlobs []utils.RawBlob) ([]*types.Transaction, error) {
blobs := make([]*types.Transaction, len(rawBlobs))
for i := 0; i < len(rawBlobs); i++ {
blobs[i] = types.NewTransaction(0, common.HexToAddress("0x"), nil, 0, nil, nil)
err := utils.ConvertFromRawBlob(&rawBlobs[i], blobs[i])
if err != nil {
return nil, fmt.Errorf("creation of transactions from raw blobs failed: %v", err)
}
}
return blobs, nil
}
// DeserializeBlobToTx takes byte array blob and converts it back
// to original txs and returns the txs in tx array.
func DeserializeBlobToTx(serialisedBlob []byte) (*[]*types.Transaction, error) {
deserializedBlobs, err := utils.Deserialize(serialisedBlob)
if err != nil {
return nil, fmt.Errorf("%v", err)
return nil, err
}
txs, err := ConvertBackToTx(deserializedBlobs)
txs, err := convertRawBlobToTx(deserializedBlobs)
if err != nil {
return nil, fmt.Errorf("%v", err)
return nil, err
}
return &txs, nil

View File

@@ -9,6 +9,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/sharding/utils"
)
// fieldAccess is to access unexported fields in structs in another package
@@ -131,34 +132,160 @@ func makeTxWithGasLimit(gl uint64) *types.Transaction {
// BENCHMARK TESTS
// Helper function to generate test that completes round trip serialization tests for a specific number of transactions.
func runBenchTest(b *testing.B, numTransactions int) {
func makeRandomTransactions(numTransactions int) []*types.Transaction {
var txs []*types.Transaction
for i := 0; i < numTransactions; i++ {
data := make([]byte, 650)
// 150 is the current average tx size, based on recent blocks (i.e. tx size = block size / # txs)
// for example: https://etherscan.io/block/5722271
data := make([]byte, 150)
rand.Read(data)
txs = append(txs, types.NewTransaction(0 /*nonce*/, common.HexToAddress("0x0") /*to*/, nil /*amount*/, 0 /*gasLimit*/, nil /*gasPrice*/, data))
}
return txs
}
// Benchmarks serialization and deserialization of a set of transactions
func runSerializeRoundtrip(b *testing.B, numTransactions int) {
txs := makeRandomTransactions(numTransactions)
b.ResetTimer()
for i := 0; i < b.N; i++ {
results, _ := SerializeTxToBlob(txs)
_, _ = DeserializeBlobToTx(results)
blob, err := SerializeTxToBlob(txs)
if err != nil {
b.Errorf("SerializeTxToBlob failed: %v", err)
}
_, err = DeserializeBlobToTx(blob)
if err != nil {
b.Errorf("DeserializeBlobToTx failed: %v", err)
}
}
}
// Benchmarks serialization of a set of transactions. Does both RLP encoding and serialization of blob
func runSerializeBenchmark(b *testing.B, numTransactions int) {
txs := makeRandomTransactions(numTransactions)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := SerializeTxToBlob(txs)
if err != nil {
b.Errorf("SerializeTxToBlob failed: %v", err)
}
}
}
// Benchmarks just the process of converting an RLP encoded set of transactions into serialized data
func runSerializeNoRLPBenchmark(b *testing.B, numTransactions int) {
txs := makeRandomTransactions(numTransactions)
blobs, err := convertTxToRawBlob(txs)
if err != nil {
b.Errorf("SerializeTxToRawBlock failed: %v", err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := utils.Serialize(blobs)
if err != nil {
b.Errorf("utils.Serialize failed: %v", err)
}
}
}
func BenchmarkSerialization10(b *testing.B) {
runBenchTest(b, 10)
// Benchmarks deserialization of a set of transactions. Does both deserialization of blob and RLP decoding.
func runDeserializeBenchmark(b *testing.B, numTransactions int) {
txs := makeRandomTransactions(numTransactions)
blob, err := SerializeTxToBlob(txs)
if err != nil {
b.Errorf("SerializeTxToRawBlock failed: %v", err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := DeserializeBlobToTx(blob)
if err != nil {
b.Errorf("DeserializeBlobToTx failed: %v", err)
}
}
}
func BenchmarkSerialization100(b *testing.B) {
runBenchTest(b, 100)
// Benchmarks just the process of converting serialized data into a blob that's ready for RLP decoding
func runDeserializeNoRLPBenchmark(b *testing.B, numTransactions int) {
txs := makeRandomTransactions(numTransactions)
blob, err := SerializeTxToBlob(txs)
if err != nil {
b.Errorf("SerializeTxToBlob failed: %v", err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := utils.Deserialize(blob)
if err != nil {
b.Errorf("utils.Deserialize failed: %v", err)
}
}
}
func BenchmarkSerialization1000(b *testing.B) {
runBenchTest(b, 1000)
func BenchmarkSerializeNoRLP10(b *testing.B) {
runSerializeNoRLPBenchmark(b, 10)
}
func BenchmarkSerialization10000(b *testing.B) {
runBenchTest(b, 10000)
func BenchmarkSerializeNoRLP100(b *testing.B) {
runSerializeNoRLPBenchmark(b, 100)
}
func BenchmarkSerializeNoRLP1000(b *testing.B) {
runSerializeNoRLPBenchmark(b, 1000)
}
func BenchmarkSerialize10(b *testing.B) {
runSerializeBenchmark(b, 10)
}
func BenchmarkSerialize100(b *testing.B) {
runSerializeBenchmark(b, 100)
}
func BenchmarkSerialize1000(b *testing.B) {
runSerializeBenchmark(b, 1000)
}
func BenchmarkDeserialize10(b *testing.B) {
runDeserializeBenchmark(b, 10)
}
func BenchmarkDeserialize100(b *testing.B) {
runDeserializeBenchmark(b, 100)
}
func BenchmarkDeserialize1000(b *testing.B) {
runDeserializeBenchmark(b, 1000)
}
func BenchmarkDeserializeNoRLP10(b *testing.B) {
runDeserializeNoRLPBenchmark(b, 10)
}
func BenchmarkDeserializeNoRLP100(b *testing.B) {
runDeserializeNoRLPBenchmark(b, 100)
}
func BenchmarkDeserializeNoRLP1000(b *testing.B) {
runDeserializeNoRLPBenchmark(b, 1000)
}
func BenchmarkSerializeRoundtrip10(b *testing.B) {
runSerializeRoundtrip(b, 10)
}
func BenchmarkSerializeRoundtrip100(b *testing.B) {
runSerializeRoundtrip(b, 100)
}
func BenchmarkSerializeRoundtrip1000(b *testing.B) {
runSerializeRoundtrip(b, 1000)
}

View File

@@ -4,14 +4,17 @@ package utils
import (
"fmt"
"math"
"github.com/ethereum/go-ethereum/rlp"
)
var (
chunkSize = int64(32)
indicatorSize = int64(1)
chunkDataSize = chunkSize - indicatorSize
chunkSize = int64(32)
indicatorSize = int64(1)
chunkDataSize = chunkSize - indicatorSize
skipEvmBits = byte(0x80)
dataLengthBits = byte(0x1F)
)
// Flags to add to chunk delimiter.
@@ -47,150 +50,149 @@ func ConvertFromRawBlob(blob *RawBlob, i interface{}) error {
return nil
}
// SerializeBlob parses the blob and serializes it appropriately.
func SerializeBlob(cb RawBlob) ([]byte, error) {
// getNumChunks calculates the number of chunks that will be produced by a byte array of given length
func getNumChunks(dataSize int) int {
numChunks := math.Ceil(float64(dataSize) / float64(chunkDataSize))
return int(numChunks)
}
length := int64(len(cb.data))
terminalLength := length % chunkDataSize
chunksNumber := length / chunkDataSize
indicatorByte := make([]byte, 1)
indicatorByte[0] = 0
if cb.flags.skipEvmExecution {
indicatorByte[0] |= (1 << 7)
}
tempBody := []byte{}
// if blob is less than 31 bytes, adds the indicator chunk
// and pads the remaining empty bytes to the right.
if chunksNumber == 0 {
paddedBytes := make([]byte, (chunkDataSize - length))
indicatorByte[0] = byte(terminalLength)
if cb.flags.skipEvmExecution {
indicatorByte[0] |= (1 << 7)
}
tempBody = append(indicatorByte, append(cb.data, paddedBytes...)...)
return tempBody, nil
}
// if there is no need to pad empty bytes, then the indicator byte
// is added as 0001111, then this chunk is returned to the
// main Serialize function.
if terminalLength == 0 {
for i := int64(1); i < chunksNumber; i++ {
// This loop loops through all non-terminal chunks and add a indicator
// byte of 00000000, each chunk is created by appending the indicator
// byte to the data chunks. The data chunks are separated into sets of
// 31 bytes.
tempBody = append(tempBody,
append(indicatorByte,
cb.data[(i-1)*chunkDataSize:i*chunkDataSize]...)...)
}
indicatorByte[0] = byte(chunkDataSize)
if cb.flags.skipEvmExecution {
indicatorByte[0] |= (1 << 7)
}
// Terminal chunk has its indicator byte added, chunkDataSize*chunksNumber refers to the total size of the blob
tempBody = append(tempBody,
append(indicatorByte,
cb.data[(chunksNumber-1)*chunkDataSize:chunkDataSize*chunksNumber]...)...)
return tempBody, nil
}
// This loop loops through all non-terminal chunks and add a indicator byte
// of 00000000, each chunk is created by appending the indcator byte
// to the data chunks. The data chunks are separated into sets of 31.
for i := int64(1); i <= chunksNumber; i++ {
tempBody = append(tempBody,
append(indicatorByte,
cb.data[(i-1)*chunkDataSize:i*chunkDataSize]...)...)
}
// Appends indicator bytes to terminal-chunks , and if the index of the chunk
// delimiter is non-zero adds it to the chunk. Also pads empty bytes to
// the terminal chunk.chunkDataSize*chunksNumber refers to the total
// size of the blob. finalchunkIndex refers to the index of the last data byte.
indicatorByte[0] = byte(terminalLength)
if cb.flags.skipEvmExecution {
indicatorByte[0] |= (1 << 7)
}
tempBody = append(tempBody,
append(indicatorByte,
cb.data[chunkDataSize*chunksNumber:length]...)...)
emptyBytes := make([]byte, (chunkDataSize - terminalLength))
tempBody = append(tempBody, emptyBytes...)
return tempBody, nil
// getSerializedDatasize determines the number of bytes that will be produced by a byte array of given length
func getSerializedDatasize(dataSize int) int {
return getNumChunks(dataSize) * int(chunkSize)
}
// getTerminalLength determines the length of the final chunk for a byte array of given length
func getTerminalLength(dataSize int) int {
numChunks := getNumChunks(dataSize)
return dataSize - ((numChunks - 1) * int(chunkDataSize))
}
// Serialize takes a set of blobs and converts them to a single byte array.
func Serialize(rawblobs []*RawBlob) ([]byte, error) {
length := int64(len(rawblobs))
serialisedData := []byte{}
//Loops through all the blobs and serializes them into chunks
for i := int64(0); i < length; i++ {
data := *rawblobs[i]
refinedData, err := SerializeBlob(data)
if err != nil {
return nil, fmt.Errorf("Index %v: %v", i, err)
}
serialisedData = append(serialisedData, refinedData...)
func Serialize(rawBlobs []*RawBlob) ([]byte, error) {
// Loop through all blobs and determine the amount of space that needs to be allocated
totalDataSize := 0
for i := 0; i < len(rawBlobs); i++ {
blobDataSize := len(rawBlobs[i].data)
totalDataSize += getSerializedDatasize(blobDataSize)
}
return serialisedData, nil
returnData := make([]byte, 0, totalDataSize)
// Loop through every blob and copy one chunk at a time
for i := 0; i < len(rawBlobs); i++ {
rawBlob := rawBlobs[i]
numChunks := getNumChunks(len(rawBlob.data))
for j := 0; j < numChunks; j++ {
var terminalLength int
// if non-terminal chunk
if j != numChunks-1 {
terminalLength = int(chunkDataSize)
// append indicating byte with just the length bits
returnData = append(returnData, byte(0))
} else {
terminalLength = getTerminalLength(len(rawBlob.data))
indicatorByte := byte(terminalLength)
// include skipEvm flag if true
if rawBlob.flags.skipEvmExecution {
indicatorByte = indicatorByte | skipEvmBits
}
returnData = append(returnData, indicatorByte)
}
// append data bytes
chunkStart := j * int(chunkDataSize)
chunkEnd := chunkStart + terminalLength
blobSlice := rawBlob.data[chunkStart:chunkEnd]
returnData = append(returnData, blobSlice...)
// append filler bytes, if necessary
if terminalLength != int(chunkDataSize) {
numFillerBytes := numChunks*int(chunkDataSize) - len(rawBlob.data)
fillerBytes := make([]byte, numFillerBytes)
returnData = append(returnData, fillerBytes...)
}
}
}
return returnData, nil
}
// isSkipEvm is true if the first bit is 1
func isSkipEvm(indicator byte) bool {
return indicator&skipEvmBits>>7 == 1
}
// getDatabyteLength is calculated by looking at the last 5 bits.
// Therefore, mask the first 3 bits to 0
func getDatabyteLength(indicator byte) int {
return int(indicator & dataLengthBits)
}
// SerializedBlob is a helper struct used by Deserialize to determine the total size of the data byte array
type SerializedBlob struct {
numNonTerminalChunks int
terminalLength int
}
// Deserialize results in the byte array being deserialised and
// separated into its respective interfaces.
func Deserialize(data []byte) ([]RawBlob, error) {
chunksNumber := len(data) / int(chunkSize)
serializedBlobs := []SerializedBlob{}
numPartitions := 0
length := int64(len(data))
chunksNumber := length / chunkSize
indicatorByte := byte(0)
tempBody := RawBlob{}
var deserializedBlob []RawBlob
// This separates the byte array into its separate blobs.
for i := int64(1); i <= chunksNumber; i++ {
indicatorIndex := (i - 1) * chunkSize
// Tests if the chunk delimiter is zero, if it is it will append the data chunk
// to tempBody.
if data[indicatorIndex] == indicatorByte || data[indicatorIndex] == byte(128) {
tempBody.data = append(tempBody.data, data[(indicatorIndex+1):(i)*chunkSize]...)
} else if data[indicatorIndex] == byte(31) || data[indicatorIndex] == byte(159) {
if data[indicatorIndex] == byte(159) {
tempBody.flags.skipEvmExecution = true
}
tempBody.data = append(tempBody.data, data[(indicatorIndex+1):indicatorIndex+1+chunkDataSize]...)
deserializedBlob = append(deserializedBlob, tempBody)
tempBody = RawBlob{}
// first iterate through every chunk and identify blobs and their length
for i := 0; i < chunksNumber; i++ {
indicatorIndex := i * int(chunkSize)
databyteLength := getDatabyteLength(data[indicatorIndex])
// if indicator is non-terminal, increase partitions counter
if databyteLength == 0 {
numPartitions += 1
} else {
// Since the chunk delimiter in non-zero now we can infer that it is
// a terminal chunk and add it and append to the deserializedblob
// slice. The tempBody signifies a single deserialized blob.
terminalIndex := int64(data[indicatorIndex])
//Check if EVM flag is equal to 1
flagindex := data[indicatorIndex] >> 7
if flagindex == byte(1) {
terminalIndex = int64(data[indicatorIndex]) - 128
tempBody.flags.skipEvmExecution = true
// if indicator is terminal, append blob info and reset partitions counter
serializedBlob := SerializedBlob{
numNonTerminalChunks: numPartitions,
terminalLength: databyteLength,
}
tempBody.data = append(tempBody.data, data[(indicatorIndex+1):(indicatorIndex+1+terminalIndex)]...)
deserializedBlob = append(deserializedBlob, tempBody)
tempBody = RawBlob{}
serializedBlobs = append(serializedBlobs, serializedBlob)
numPartitions = 0
}
}
// for each block, construct the data byte array
deserializedBlob := make([]RawBlob, 0, len(serializedBlobs))
currentByte := 0
for i := 0; i < len(serializedBlobs); i++ {
numNonTerminalChunks := serializedBlobs[i].numNonTerminalChunks
terminalLength := serializedBlobs[i].terminalLength
blob := RawBlob{}
blob.data = make([]byte, 0, numNonTerminalChunks*31+terminalLength)
// append data from non-terminal chunks
for chunk := 0; chunk < numNonTerminalChunks; chunk++ {
dataBytes := data[currentByte+1 : currentByte+32]
blob.data = append(blob.data, dataBytes...)
currentByte += 32
}
if isSkipEvm(data[currentByte]) {
blob.flags.skipEvmExecution = true
}
// append data from terminal chunk
dataBytes := data[currentByte+1 : currentByte+terminalLength+1]
blob.data = append(blob.data, dataBytes...)
currentByte += 32
deserializedBlob = append(deserializedBlob, blob)
}
return deserializedBlob, nil
}

View File

@@ -46,7 +46,6 @@ func TestSize(t *testing.T) {
drefbody := make([]*RawBlob, len(blob))
for s := 0; s < len(blob); s++ {
drefbody[s] = &(blob[s])
}
serializedblob, err := Serialize(drefbody)
if err != nil {
@@ -57,8 +56,8 @@ func TestSize(t *testing.T) {
t.Errorf("Error Serializing blobs the lengths are not the same:\n %d \n %d", int64(len(serializedblob)), sizeafterSerialize)
}
}
}
func TestSerializeAndDeserializeblob(t *testing.T) {
for i := 1; i < 300; i++ {
@@ -84,5 +83,135 @@ func TestSerializeAndDeserializeblob(t *testing.T) {
t.Errorf("Error Serializing blobs at index %d, the serialized and deserialized versions are not the same:\n\n %v \n\n %v \n\n %v", i, blob, serializedblob, raw)
}
}
}
func TestDeserializeSkipEvm(t *testing.T) {
data := make([]byte, 64)
// Set the indicator byte of the second chunk so that the first flag bit (SKIP_EVM) is true and the length bits equal 1
data[32] = 0x81
rawBlobs, err := Deserialize(data)
if err != nil {
t.Errorf("Deserialize failed: %v", err)
}
if len(rawBlobs) != 1 {
t.Errorf("Length of blobs incorrect: %d", len(rawBlobs))
}
if !rawBlobs[0].flags.skipEvmExecution {
t.Errorf("SKIP_EVM flag is not true")
}
blobSize := 32
if len(rawBlobs[0].data) != blobSize {
t.Errorf("blob size should be %d but is %d", blobSize, len(rawBlobs[0].data))
}
}
func TestDeserializeSkipEvmFalse(t *testing.T) {
// create 64 byte array with the isSkipEVM flag turned on
data := make([]byte, 64)
// Set the indicator byte of the second chunk so that no flag is true and the length bits equal 2
data[32] = 0x02
rawBlobs, err := Deserialize(data)
if err != nil {
t.Errorf("Deserialize failed: %v", err)
}
if len(rawBlobs) != 1 {
t.Errorf("Length of blobs incorrect: %d", len(rawBlobs))
}
if rawBlobs[0].flags.skipEvmExecution {
t.Errorf("SKIP_EVM flag is true")
}
blobSize := 33
if len(rawBlobs[0].data) != blobSize {
t.Errorf("blob size should be %d but is %d", blobSize, len(rawBlobs[0].data))
}
}
func TestSerializeSkipEvm(t *testing.T) {
rawBlobs := make([]*RawBlob, 1)
rawBlobs[0] = &RawBlob{data: make([]byte, 32)}
rawBlobs[0].data[31] = byte(1)
rawBlobs[0].flags.skipEvmExecution = true
data, err := Serialize(rawBlobs)
if err != nil {
t.Errorf("Serialize failed: %v", err)
}
dataSize := 64
if len(data) != dataSize {
t.Errorf("Length of serialized data incorrect. Should be %d but is %d", dataSize, len(data))
}
if data[0] != 0 {
t.Errorf("Indicating byte for first chunk should be %x but is %x", 0, data[0])
}
indicatingByte := byte(0x81)
if data[32] != indicatingByte {
t.Errorf("Indicating byte for second chunk should be %x but is %x", indicatingByte, data[32])
}
}
func TestSerializeSkipEvmFalse(t *testing.T) {
rawBlobs := make([]*RawBlob, 1)
rawBlobs[0] = &RawBlob{data: make([]byte, 31)}
data, err := Serialize(rawBlobs)
if err != nil {
t.Errorf("Serialize failed: %v", err)
}
blobSize := 32
if len(data) != blobSize {
t.Errorf("Length of serialized data incorrect. Should be %d but is %d", blobSize, len(data))
}
indicatingByte := byte(0x1f)
if data[0] != indicatingByte {
t.Errorf("Indicating byte for first chunk should be %x but is %x", indicatingByte, data[0])
}
}
func TestSerializeTestData(t *testing.T) {
rawBlobs := make([]*RawBlob, 1)
rawBlobs[0] = &RawBlob{data: make([]byte, 60)}
blobData := rawBlobs[0].data
for i := 0; i < len(blobData); i++ {
blobData[i] = byte(i)
}
data, err := Serialize(rawBlobs)
if err != nil {
t.Errorf("Serialize failed: %v", err)
}
blobSize := 64
if len(data) != blobSize {
t.Errorf("Length of serialized data incorrect. Should be %d but is %d", blobSize, len(data))
}
indicatingByte := byte(0x1D)
if data[32] != indicatingByte {
t.Errorf("Indicating byte for second chunk should be %x but is %x", indicatingByte, data[32])
}
for i := 1; i < 32; i++ {
if data[i] != byte(i-1) {
t.Errorf("Data byte incorrect. Should be %x but is %x", byte(i-1), data[i])
}
}
for i := 33; i < 62; i++ {
if data[i] != byte(i-2) {
t.Errorf("Data byte incorrect. Should be %x but is %x", byte(i-2), data[i])
}
}
}