diff --git a/contracts/native/cross_chain_manager/common/param.go b/contracts/native/cross_chain_manager/common/param.go index 36df469e..ac47102e 100644 --- a/contracts/native/cross_chain_manager/common/param.go +++ b/contracts/native/cross_chain_manager/common/param.go @@ -23,6 +23,7 @@ import ( "math/big" "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/contracts/native" "github.com/ethereum/go-ethereum/rlp" @@ -108,6 +109,48 @@ func (m *MakeTxParam) DecodeRLP(s *rlp.Stream) error { return nil } +type MakeTxParamWithSender struct { + Sender common.Address + MakeTxParam +} + +//used for param from evm contract +type MakeTxParamWithSenderShim struct { + Sender common.Address + MakeTxParam []byte +} + +func (this *MakeTxParamWithSender) Deserialization(data []byte) (err error) { + + BytesTy, _ := abi.NewType("bytes", "", nil) + AddrTy, _ := abi.NewType("address", "", nil) + // StringTy, _ := abi.NewType("string", "", nil) + + TxParam := abi.Arguments{ + {Type: AddrTy, Name: "sender"}, + {Type: BytesTy, Name: "makeTxParam"}, + } + + args, err := TxParam.Unpack(data) + if err != nil { + return + } + + shim := new(MakeTxParamWithSenderShim) + err = TxParam.Copy(shim, args) + if err != nil { + return + } + + this.Sender = shim.Sender + makeTxParam, err := DecodeTxParam(shim.MakeTxParam) + if err != nil { + return + } + this.MakeTxParam = *makeTxParam + return +} + //used for param from evm contract type MakeTxParamShim struct { TxHash []byte diff --git a/contracts/native/cross_chain_manager/entrance.go b/contracts/native/cross_chain_manager/entrance.go index 1bf6cdc5..e96c7c1f 100644 --- a/contracts/native/cross_chain_manager/entrance.go +++ b/contracts/native/cross_chain_manager/entrance.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/contracts/native/cross_chain_manager/heco" "github.com/ethereum/go-ethereum/contracts/native/cross_chain_manager/msc" "github.com/ethereum/go-ethereum/contracts/native/cross_chain_manager/okex" + "github.com/ethereum/go-ethereum/contracts/native/cross_chain_manager/ont" "github.com/ethereum/go-ethereum/contracts/native/cross_chain_manager/polygon" "github.com/ethereum/go-ethereum/contracts/native/cross_chain_manager/quorum" "github.com/ethereum/go-ethereum/contracts/native/cross_chain_manager/zilliqa" @@ -88,6 +89,8 @@ func GetChainHandler(router uint64) (scom.ChainHandler, error) { return polygon.NewHandler(), nil case utils.COSMOS_ROUTER: return cosmos.NewCosmosHandler(), nil + case utils.ONT_ROUTER: + return ont.NewONTHandler(), nil case utils.ZILLIQA_ROUTER: return zilliqa.NewHandler(), nil default: diff --git a/contracts/native/cross_chain_manager/ont/merkle/common/address.go b/contracts/native/cross_chain_manager/ont/merkle/common/address.go new file mode 100644 index 00000000..faa2f24a --- /dev/null +++ b/contracts/native/cross_chain_manager/ont/merkle/common/address.go @@ -0,0 +1,152 @@ +/* + * Copyright (C) 2021 The Zion Authors + * This file is part of The Zion library. + * + * The Zion is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * The Zion is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with The Zion. If not, see . + */ + +package common + +import ( + "crypto/sha256" + "errors" + "fmt" + "io" + "math/big" + + "github.com/itchyny/base58-go" + "golang.org/x/crypto/ripemd160" +) + +const ADDR_LEN = 20 + +type Address [ADDR_LEN]byte + +var ADDRESS_EMPTY = Address{} + +// ToHexString returns hex string representation of Address +func (self *Address) ToHexString() string { + return fmt.Sprintf("%x", ToArrayReverse(self[:])) +} + +// Serialize serialize Address into io.Writer +func (self *Address) Serialization(sink *ZeroCopySink) { + sink.WriteAddress(*self) +} + +// Deserialize deserialize Address from io.Reader +func (self *Address) Deserialization(source *ZeroCopySource) error { + var eof bool + *self, eof = source.NextAddress() + if eof { + return io.ErrUnexpectedEOF + } + return nil +} + +// Serialize serialize Address into io.Writer +func (self *Address) Serialize(w io.Writer) error { + _, err := w.Write(self[:]) + return err +} + +// Deserialize deserialize Address from io.Reader +func (self *Address) Deserialize(r io.Reader) error { + _, err := io.ReadFull(r, self[:]) + if err != nil { + return errors.New("deserialize Address error") + } + return nil +} + +// ToBase58 returns base58 encoded address string +func (f *Address) ToBase58() string { + data := append([]byte{23}, f[:]...) + temp := sha256.Sum256(data) + temps := sha256.Sum256(temp[:]) + data = append(data, temps[0:4]...) + + bi := new(big.Int).SetBytes(data).String() + encoded, _ := base58.BitcoinEncoding.Encode([]byte(bi)) + return string(encoded) +} + +// AddressParseFromBytes returns parsed Address +func AddressParseFromBytes(f []byte) (Address, error) { + if len(f) != ADDR_LEN { + return ADDRESS_EMPTY, errors.New("[Common]: AddressParseFromBytes err, len != 20") + } + + var addr Address + copy(addr[:], f) + return addr, nil +} + +// AddressParseFromHexString returns parsed Address +func AddressFromHexString(s string) (Address, error) { + hx, err := HexToBytes(s) + if err != nil { + return ADDRESS_EMPTY, err + } + return AddressParseFromBytes(ToArrayReverse(hx)) +} + +const maxSize = 2048 + +// AddressFromBase58 returns Address from encoded base58 string +func AddressFromBase58(encoded string) (Address, error) { + if encoded == "" { + return ADDRESS_EMPTY, errors.New("invalid address") + } + if len(encoded) > maxSize { + return ADDRESS_EMPTY, errors.New("invalid address") + } + decoded, err := base58.BitcoinEncoding.Decode([]byte(encoded)) + if err != nil { + return ADDRESS_EMPTY, err + } + + x, ok := new(big.Int).SetString(string(decoded), 10) + if !ok { + return ADDRESS_EMPTY, errors.New("invalid address") + } + + buf := x.Bytes() + if len(buf) != 1+ADDR_LEN+4 || buf[0] != byte(23) { + return ADDRESS_EMPTY, errors.New("wrong encoded address") + } + + ph, err := AddressParseFromBytes(buf[1:21]) + if err != nil { + return ADDRESS_EMPTY, err + } + + addr := ph.ToBase58() + + if addr != encoded { + return ADDRESS_EMPTY, errors.New("[AddressFromBase58]: decode encoded verify failed.") + } + + return ph, nil +} + +func AddressFromVmCode(code []byte) Address { + var addr Address + temp := sha256.Sum256(code) + md := ripemd160.New() + md.Write(temp[:]) + md.Sum(addr[:0]) + + return addr +} diff --git a/contracts/native/cross_chain_manager/ont/merkle/common/common.go b/contracts/native/cross_chain_manager/ont/merkle/common/common.go new file mode 100644 index 00000000..e843eaf5 --- /dev/null +++ b/contracts/native/cross_chain_manager/ont/merkle/common/common.go @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2021 The Zion Authors + * This file is part of The Zion library. + * + * The Zion is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * The Zion is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with The Zion. If not, see . + */ + +package common + +import ( + "encoding/hex" + "math/rand" + "os" +) + +// GetNonce returns random nonce +func GetNonce() uint64 { + // Fixme replace with the real random number generator + nonce := uint64(rand.Uint32())<<32 + uint64(rand.Uint32()) + return nonce +} + +// ToHexString convert []byte to hex string +func ToHexString(data []byte) string { + return hex.EncodeToString(data) +} + +// HexToBytes convert hex string to []byte +func HexToBytes(value string) ([]byte, error) { + return hex.DecodeString(value) +} + +func ToArrayReverse(arr []byte) []byte { + l := len(arr) + x := make([]byte, 0) + for i := l - 1; i >= 0; i-- { + x = append(x, arr[i]) + } + return x +} + +// FileExisted checks whether filename exists in filesystem +func FileExisted(filename string) bool { + _, err := os.Stat(filename) + return err == nil || os.IsExist(err) +} diff --git a/contracts/native/cross_chain_manager/ont/merkle/common/safeMath.go b/contracts/native/cross_chain_manager/ont/merkle/common/safeMath.go new file mode 100644 index 00000000..9af6271f --- /dev/null +++ b/contracts/native/cross_chain_manager/ont/merkle/common/safeMath.go @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2021 The Zion Authors + * This file is part of The Zion library. + * + * The Zion is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * The Zion is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with The Zion. If not, see . + */ + +package common + +import "math" + +const ( + MAX_UINT64 = math.MaxUint64 + MAX_INT64 = math.MaxInt64 +) + +func SafeSub(x, y uint64) (uint64, bool) { + return x - y, x < y +} + +func SafeAdd(x, y uint64) (uint64, bool) { + return x + y, y > MAX_UINT64-x +} + +func SafeMul(x, y uint64) (uint64, bool) { + if x == 0 || y == 0 { + return 0, false + } + return x * y, y > MAX_UINT64/x +} diff --git a/contracts/native/cross_chain_manager/ont/merkle/common/uint256.go b/contracts/native/cross_chain_manager/ont/merkle/common/uint256.go new file mode 100644 index 00000000..58785397 --- /dev/null +++ b/contracts/native/cross_chain_manager/ont/merkle/common/uint256.go @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2021 The Zion Authors + * This file is part of The Zion library. + * + * The Zion is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * The Zion is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with The Zion. If not, see . + */ + +package common + +import ( + "errors" + "fmt" + "io" +) + +const ( + UINT16_SIZE = 2 + UINT32_SIZE = 4 + UINT64_SIZE = 8 + UINT256_SIZE = 32 +) + +type Uint256 [UINT256_SIZE]byte + +var UINT256_EMPTY = Uint256{} + +func (u *Uint256) ToArray() []byte { + x := make([]byte, UINT256_SIZE) + for i := 0; i < 32; i++ { + x[i] = byte(u[i]) + } + + return x +} + +func (u *Uint256) ToHexString() string { + return fmt.Sprintf("%x", ToArrayReverse(u[:])) +} + +func (u *Uint256) Serialize(w io.Writer) error { + _, err := w.Write(u[:]) + return err +} + +func (u *Uint256) Deserialize(r io.Reader) error { + _, err := io.ReadFull(r, u[:]) + if err != nil { + return errors.New("deserialize Uint256 error") + } + return nil +} + +func Uint256ParseFromBytes(f []byte) (Uint256, error) { + if len(f) != UINT256_SIZE { + return Uint256{}, errors.New("[Common]: Uint256ParseFromBytes err, len != 32") + } + + var hash Uint256 + copy(hash[:], f) + return hash, nil +} + +func Uint256FromHexString(s string) (Uint256, error) { + hx, err := HexToBytes(s) + if err != nil { + return UINT256_EMPTY, err + } + return Uint256ParseFromBytes(ToArrayReverse(hx)) +} diff --git a/contracts/native/cross_chain_manager/ont/merkle/common/zero_copy_sink.go b/contracts/native/cross_chain_manager/ont/merkle/common/zero_copy_sink.go new file mode 100644 index 00000000..64ac3e39 --- /dev/null +++ b/contracts/native/cross_chain_manager/ont/merkle/common/zero_copy_sink.go @@ -0,0 +1,198 @@ +/* + * Copyright (C) 2021 The Zion Authors + * This file is part of The Zion library. + * + * The Zion is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * The Zion is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with The Zion. If not, see . + */ +package common + +import ( + "bytes" + "encoding/binary" + "errors" +) + +type ZeroCopySink struct { + buf []byte +} + +// tryGrowByReslice is a inlineable version of grow for the fast-case where the +// internal buffer only needs to be resliced. +// It returns the index where bytes should be written and whether it succeeded. +func (self *ZeroCopySink) tryGrowByReslice(n int) (int, bool) { + if l := len(self.buf); n <= cap(self.buf)-l { + self.buf = self.buf[:l+n] + return l, true + } + return 0, false +} + +const maxInt = int(^uint(0) >> 1) + +// grow grows the buffer to guarantee space for n more bytes. +// It returns the index where bytes should be written. +// If the buffer can't grow it will panic with ErrTooLarge. +func (self *ZeroCopySink) grow(n int) int { + // Try to grow by means of a reslice. + if i, ok := self.tryGrowByReslice(n); ok { + return i + } + + l := len(self.buf) + c := cap(self.buf) + if c > maxInt-c-n { + panic(ErrTooLarge) + } + // Not enough space anywhere, we need to allocate. + buf := makeSlice(2*c + n) + copy(buf, self.buf) + self.buf = buf[:l+n] + return l +} + +func (self *ZeroCopySink) WriteBytes(p []byte) { + data := self.NextBytes(uint64(len(p))) + copy(data, p) +} + +func (self *ZeroCopySink) Size() uint64 { return uint64(len(self.buf)) } + +func (self *ZeroCopySink) NextBytes(n uint64) (data []byte) { + m, ok := self.tryGrowByReslice(int(n)) + if !ok { + m = self.grow(int(n)) + } + data = self.buf[m:] + return +} + +// Backs up a number of bytes, so that the next call to NextXXX() returns data again +// that was already returned by the last call to NextXXX(). +func (self *ZeroCopySink) BackUp(n uint64) { + l := len(self.buf) - int(n) + self.buf = self.buf[:l] +} + +func (self *ZeroCopySink) WriteUint8(data uint8) { + buf := self.NextBytes(1) + buf[0] = data +} + +func (self *ZeroCopySink) WriteByte(c byte) { + self.WriteUint8(c) +} + +func (self *ZeroCopySink) WriteBool(data bool) { + if data { + self.WriteByte(1) + } else { + self.WriteByte(0) + } +} + +func (self *ZeroCopySink) WriteUint16(data uint16) { + buf := self.NextBytes(2) + binary.LittleEndian.PutUint16(buf, data) +} + +func (self *ZeroCopySink) WriteUint32(data uint32) { + buf := self.NextBytes(4) + binary.LittleEndian.PutUint32(buf, data) +} + +func (self *ZeroCopySink) WriteUint64(data uint64) { + buf := self.NextBytes(8) + binary.LittleEndian.PutUint64(buf, data) +} + +func (self *ZeroCopySink) WriteInt64(data int64) { + self.WriteUint64(uint64(data)) +} + +func (self *ZeroCopySink) WriteInt32(data int32) { + self.WriteUint32(uint32(data)) +} + +func (self *ZeroCopySink) WriteInt16(data int16) { + self.WriteUint16(uint16(data)) +} + +func (self *ZeroCopySink) WriteVarBytes(data []byte) (size uint64) { + l := uint64(len(data)) + size = self.WriteVarUint(l) + l + + self.WriteBytes(data) + return +} + +func (self *ZeroCopySink) WriteString(data string) (size uint64) { + return self.WriteVarBytes([]byte(data)) +} + +func (self *ZeroCopySink) WriteAddress(addr Address) { + self.WriteBytes(addr[:]) +} + +func (self *ZeroCopySink) WriteHash(hash Uint256) { + self.WriteBytes(hash[:]) +} + +func (self *ZeroCopySink) WriteVarUint(data uint64) (size uint64) { + buf := self.NextBytes(9) + if data < 0xFD { + buf[0] = uint8(data) + size = 1 + } else if data <= 0xFFFF { + buf[0] = 0xFD + binary.LittleEndian.PutUint16(buf[1:], uint16(data)) + size = 3 + } else if data <= 0xFFFFFFFF { + buf[0] = 0xFE + binary.LittleEndian.PutUint32(buf[1:], uint32(data)) + size = 5 + } else { + buf[0] = 0xFF + binary.LittleEndian.PutUint64(buf[1:], uint64(data)) + size = 9 + } + + self.BackUp(9 - size) + return +} + +// NewReader returns a new ZeroCopySink reading from b. +func NewZeroCopySink(b []byte) *ZeroCopySink { + if b == nil { + b = make([]byte, 0, 512) + } + return &ZeroCopySink{b} +} + +func (self *ZeroCopySink) Bytes() []byte { return self.buf } + +func (self *ZeroCopySink) Reset() { self.buf = self.buf[:0] } + +var ErrTooLarge = errors.New("bytes.Buffer: too large") + +// makeSlice allocates a slice of size n. If the allocation fails, it panics +// with ErrTooLarge. +func makeSlice(n int) []byte { + // If the make fails, give a known error. + defer func() { + if recover() != nil { + panic(bytes.ErrTooLarge) + } + }() + return make([]byte, n) +} diff --git a/contracts/native/cross_chain_manager/ont/merkle/common/zero_copy_source.go b/contracts/native/cross_chain_manager/ont/merkle/common/zero_copy_source.go new file mode 100644 index 00000000..6b05c787 --- /dev/null +++ b/contracts/native/cross_chain_manager/ont/merkle/common/zero_copy_source.go @@ -0,0 +1,241 @@ +/* + * Copyright (C) 2021 The Zion Authors + * This file is part of The Zion library. + * + * The Zion is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * The Zion is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with The Zion. If not, see . + */ + +package common + +import ( + "encoding/binary" +) + +type ZeroCopySource struct { + s []byte + off uint64 // current reading index +} + +// Len returns the number of bytes of the unread portion of the +// slice. +func (self *ZeroCopySource) Len() uint64 { + length := uint64(len(self.s)) + if self.off >= length { + return 0 + } + return length - self.off +} + +func (self *ZeroCopySource) Bytes() []byte { + return self.s +} + +func (self *ZeroCopySource) OffBytes() []byte { + return self.s[self.off:] +} + +func (self *ZeroCopySource) Pos() uint64 { + return self.off +} + +// Size returns the original length of the underlying byte slice. +// Size is the number of bytes available for reading via ReadAt. +// The returned value is always the same and is not affected by calls +// to any other method. +func (self *ZeroCopySource) Size() uint64 { return uint64(len(self.s)) } + +// Read implements the io.ZeroCopySource interface. +func (self *ZeroCopySource) NextBytes(n uint64) (data []byte, eof bool) { + m := uint64(len(self.s)) + end, overflow := SafeAdd(self.off, n) + if overflow || end > m { + end = m + eof = true + } + data = self.s[self.off:end] + self.off = end + + return +} + +func (self *ZeroCopySource) Skip(n uint64) (eof bool) { + m := uint64(len(self.s)) + end, overflow := SafeAdd(self.off, n) + if overflow || end > m { + end = m + eof = true + } + self.off = end + + return +} + +// ReadByte implements the io.ByteReader interface. +func (self *ZeroCopySource) NextByte() (data byte, eof bool) { + if self.off >= uint64(len(self.s)) { + return 0, true + } + + b := self.s[self.off] + self.off++ + return b, false +} + +func (self *ZeroCopySource) NextUint8() (data uint8, eof bool) { + var val byte + val, eof = self.NextByte() + return uint8(val), eof +} + +func (self *ZeroCopySource) NextBool() (data bool, eof bool) { + val, eof := self.NextByte() + if val == 0 { + data = false + } else if val == 1 { + data = true + } else { + eof = true + } + return +} + +// Backs up a number of bytes, so that the next call to NextXXX() returns data again +// that was already returned by the last call to NextXXX(). +func (self *ZeroCopySource) BackUp(n uint64) { + self.off -= n +} + +func (self *ZeroCopySource) NextUint16() (data uint16, eof bool) { + var buf []byte + buf, eof = self.NextBytes(UINT16_SIZE) + if eof { + return + } + + return binary.LittleEndian.Uint16(buf), eof +} + +func (self *ZeroCopySource) NextUint32() (data uint32, eof bool) { + var buf []byte + buf, eof = self.NextBytes(UINT32_SIZE) + if eof { + return + } + + return binary.LittleEndian.Uint32(buf), eof +} + +func (self *ZeroCopySource) NextUint64() (data uint64, eof bool) { + var buf []byte + buf, eof = self.NextBytes(UINT64_SIZE) + if eof { + return + } + + return binary.LittleEndian.Uint64(buf), eof +} + +func (self *ZeroCopySource) NextInt32() (data int32, eof bool) { + var val uint32 + val, eof = self.NextUint32() + return int32(val), eof +} + +func (self *ZeroCopySource) NextInt64() (data int64, eof bool) { + var val uint64 + val, eof = self.NextUint64() + return int64(val), eof +} + +func (self *ZeroCopySource) NextInt16() (data int16, eof bool) { + var val uint16 + val, eof = self.NextUint16() + return int16(val), eof +} + +func (self *ZeroCopySource) NextVarBytes() (data []byte, eof bool) { + count, eof := self.NextVarUint() + if eof { + return + } + data, eof = self.NextBytes(count) + return +} + +func (self *ZeroCopySource) NextAddress() (data Address, eof bool) { + var buf []byte + buf, eof = self.NextBytes(ADDR_LEN) + if eof { + return + } + copy(data[:], buf) + + return +} + +func (self *ZeroCopySource) NextHash() (data Uint256, eof bool) { + var buf []byte + buf, eof = self.NextBytes(UINT256_SIZE) + if eof { + return + } + copy(data[:], buf) + + return +} + +func (self *ZeroCopySource) NextString() (data string, eof bool) { + var val []byte + val, eof = self.NextVarBytes() + data = string(val) + return +} + +func (self *ZeroCopySource) NextVarUint() (data uint64, eof bool) { + var fb byte + fb, eof = self.NextByte() + if eof { + return + } + + switch fb { + case 0xFD: + val, e := self.NextUint16() + if e { + eof = e + return + } + data = uint64(val) + case 0xFE: + val, e := self.NextUint32() + if e { + eof = e + return + } + data = uint64(val) + case 0xFF: + val, e := self.NextUint64() + if e { + eof = e + return + } + data = uint64(val) + default: + data = uint64(fb) + } + return +} + +// NewReader returns a new ZeroCopySource reading from b. +func NewZeroCopySource(b []byte) *ZeroCopySource { return &ZeroCopySource{b, 0} } diff --git a/contracts/native/cross_chain_manager/ont/merkle/file_hash_store.go b/contracts/native/cross_chain_manager/ont/merkle/file_hash_store.go new file mode 100644 index 00000000..b6ba678c --- /dev/null +++ b/contracts/native/cross_chain_manager/ont/merkle/file_hash_store.go @@ -0,0 +1,152 @@ +/* + * Copyright (C) 2021 The Zion Authors + * This file is part of The Zion library. + * + * The Zion is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * The Zion is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with The Zion. If not, see . + */ + +package merkle + +import ( + "errors" + "io" + "os" + + "github.com/ethereum/go-ethereum/contracts/native/cross_chain_manager/ont/merkle/common" +) + +// HashStore is an interface for persist hash +type HashStore interface { + Append(hash []common.Uint256) error + Flush() error + Close() + GetHash(pos uint32) (common.Uint256, error) +} + +type fileHashStore struct { + file_name string + file *os.File +} + +// NewFileHashStore returns a HashStore implement in file +func NewFileHashStore(name string, tree_size uint32) (HashStore, error) { + f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE, 0755) + if err != nil { + return nil, err + } + store := &fileHashStore{ + file_name: name, + file: f, + } + + err = store.checkConsistence(tree_size) + if err != nil { + return nil, err + } + + num_hashes := getStoredHashNum(tree_size) + size := int64(num_hashes) * int64(common.UINT256_SIZE) + + _, err = store.file.Seek(size, io.SeekStart) + if err != nil { + return nil, err + } + return store, nil +} + +func getStoredHashNum(tree_size uint32) int64 { + subtreesize := getSubTreeSize(tree_size) + sum := int64(0) + for _, v := range subtreesize { + sum += int64(v) + } + + return sum +} + +func (self *fileHashStore) checkConsistence(tree_size uint32) error { + num_hashes := getStoredHashNum(tree_size) + + stat, err := self.file.Stat() + if err != nil { + return err + } else if stat.Size() < int64(num_hashes)*int64(common.UINT256_SIZE) { + return errors.New("stored hashes are less than expected") + } + + return nil +} + +func (self *fileHashStore) Append(hash []common.Uint256) error { + if self == nil { + return nil + } + buf := make([]byte, 0, len(hash)*common.UINT256_SIZE) + for _, h := range hash { + buf = append(buf, h[:]...) + } + _, err := self.file.Write(buf) + return err +} + +func (self *fileHashStore) Flush() error { + if self == nil { + return nil + } + return self.file.Sync() +} + +func (self *fileHashStore) Close() { + if self == nil { + return + } + self.file.Close() +} + +func (self *fileHashStore) GetHash(pos uint32) (common.Uint256, error) { + if self == nil { + return EMPTY_HASH, errors.New("FileHashstore is nil") + } + hash := EMPTY_HASH + _, err := self.file.ReadAt(hash[:], int64(pos)*int64(common.UINT256_SIZE)) + if err != nil { + return EMPTY_HASH, err + } + + return hash, nil +} + +type memHashStore struct { + hashes []common.Uint256 +} + +// NewMemHashStore returns a HashStore implement in memory +func NewMemHashStore() HashStore { + return &memHashStore{} +} + +func (self *memHashStore) Append(hash []common.Uint256) error { + self.hashes = append(self.hashes, hash...) + return nil +} + +func (self *memHashStore) GetHash(pos uint32) (common.Uint256, error) { + return self.hashes[pos], nil +} + +func (self *memHashStore) Flush() error { + return nil +} + +func (self *memHashStore) Close() {} diff --git a/contracts/native/cross_chain_manager/ont/merkle/merkle_hasher.go b/contracts/native/cross_chain_manager/ont/merkle/merkle_hasher.go new file mode 100644 index 00000000..fe255953 --- /dev/null +++ b/contracts/native/cross_chain_manager/ont/merkle/merkle_hasher.go @@ -0,0 +1,239 @@ +/* + * Copyright (C) 2021 The Zion Authors + * This file is part of The Zion library. + * + * The Zion is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * The Zion is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with The Zion. If not, see . + */ + +package merkle + +import ( + "bytes" + "crypto/sha256" + "errors" + "fmt" + "math" + + "github.com/ethereum/go-ethereum/contracts/native/cross_chain_manager/ont/merkle/common" +) + +const ( + LEFT byte = iota + RIGHT +) + +const ( + MAX_SIZE = 1024 * 1024 +) + +var debugCheck = false + +type TreeHasher struct { +} + +func (self TreeHasher) hash_empty() common.Uint256 { + return sha256.Sum256(nil) +} + +func (self TreeHasher) hash_leaf(data []byte) common.Uint256 { + tmp := append([]byte{0}, data...) + return sha256.Sum256(tmp) +} + +func (self TreeHasher) hash_children(left, right common.Uint256) common.Uint256 { + data := append([]byte{1}, left[:]...) + data = append(data, right[:]...) + return sha256.Sum256(data) +} + +func (self TreeHasher) HashFullTreeWithLeafHash(leaves []common.Uint256) common.Uint256 { + length := uint32(len(leaves)) + root_hash, hashes := self._hash_full(leaves, 0, length) + + if uint(len(hashes)) != countBit(length) { + panic("hashes length mismatch") + } + + if debugCheck { + root2 := self.hash_empty() + if len(hashes) != 0 { + root2 = self._hash_fold(hashes) + } + + if root_hash != root2 { + panic("root hash mismatch") + } + } + + // assert len(hashes) == countBit(len(leaves)) + // assert self._hash_fold(hashes) == root_hash if hashes else root_hash == self.hash_empty() + + return root_hash +} + +func (self TreeHasher) HashFullTree(leaves [][]byte) common.Uint256 { + length := uint32(len(leaves)) + leafhashes := make([]common.Uint256, length, length) + for i := range leaves { + leafhashes[i] = self.hash_leaf(leaves[i]) + } + + return self.HashFullTreeWithLeafHash(leafhashes) +} + +func (self TreeHasher) _hash_full(leaves []common.Uint256, l_idx, r_idx uint32) (root_hash common.Uint256, hashes []common.Uint256) { + width := r_idx - l_idx + if width == 0 { + return self.hash_empty(), nil + } else if width == 1 { + leaf_hash := leaves[l_idx] + return leaf_hash, []common.Uint256{leaf_hash} + } else { + var split_width uint32 = 1 << (highBit(width-1) - 1) + l_root, l_hashes := self._hash_full(leaves, l_idx, l_idx+split_width) + if len(l_hashes) != 1 { + panic("left tree always full") + } + r_root, r_hashes := self._hash_full(leaves, l_idx+split_width, r_idx) + root_hash = self.hash_children(l_root, r_root) + var hashes []common.Uint256 + if split_width*2 == width { + hashes = []common.Uint256{root_hash} + } else { + hashes = append(l_hashes, r_hashes[:]...) + } + return root_hash, hashes + } +} + +func (self TreeHasher) _hash_fold(hashes []common.Uint256) common.Uint256 { + l := len(hashes) + accum := hashes[l-1] + for i := l - 2; i >= 0; i-- { + accum = self.hash_children(hashes[i], accum) + } + + return accum +} + +func HashLeaf(data []byte) common.Uint256 { + tmp := append([]byte{0}, data...) + return sha256.Sum256(tmp) +} + +func HashChildren(left, right common.Uint256) common.Uint256 { + data := append([]byte{1}, left[:]...) + data = append(data, right[:]...) + return sha256.Sum256(data) +} + +func MerkleLeafPath(data []byte, hashes []common.Uint256) ([]byte, error) { + size := len(hashes)*(common.UINT256_SIZE+1) + len(data) + 8 + if size > MAX_SIZE { + return nil, fmt.Errorf("data length over max value:%d", MAX_SIZE) + } + index := getIndex(HashLeaf(data), hashes) + if index < 0 { + return nil, fmt.Errorf("%s", "values doesn't exist!") + } + sink := common.NewZeroCopySink(make([]byte, 0, size)) + sink.WriteVarBytes(data) + d := depth(len(hashes)) + merkleTree := MerkleHashes(hashes, d) + for i := d; i > 0; i-- { + subTree := merkleTree[i] + subLen := len(subTree) + nIndex := index / 2 + if index == subLen-1 && subLen%2 != 0 { + index = nIndex + continue + } + if index%2 != 0 { + sink.WriteByte(LEFT) + sink.WriteHash(subTree[index-1]) + } else { + sink.WriteByte(RIGHT) + sink.WriteHash(subTree[index+1]) + } + index = nIndex + } + return sink.Bytes(), nil +} + +func MerkleHashes(preLeaves []common.Uint256, depth int) [][]common.Uint256 { + levels := make([][]common.Uint256, depth+1, depth+1) + levels[depth] = preLeaves + for i := depth; i > 0; i -= 1 { + level := levels[i] + levelLen := len(level) + remainder := levelLen % 2 + nextLevel := make([]common.Uint256, levelLen/2+remainder) + k := 0 + for j := 0; j < len(level)-1; j += 2 { + left := level[j] + right := level[j+1] + + nextLevel[k] = HashChildren(left, right) + k += 1 + } + if remainder != 0 { + nextLevel[k] = level[len(level)-1] + } + levels[i-1] = nextLevel + } + return levels +} + +func MerkleProve(path []byte, root []byte) ([]byte, error) { + source := common.NewZeroCopySource(path) + value, eof := source.NextVarBytes() + if eof { + return nil, errors.New("read bytes error") + } + hash := HashLeaf(value) + size := int((source.Size() - source.Pos()) / (common.UINT256_SIZE + 1)) + for i := 0; i < size; i++ { + f, eof := source.NextByte() + if eof { + return nil, errors.New("read byte error") + } + v, eof := source.NextHash() + if eof { + return nil, errors.New("read hash error") + } + if f == LEFT { + hash = HashChildren(v, hash) + } else { + hash = HashChildren(hash, v) + } + } + + if !bytes.Equal(hash[:], root) { + return nil, fmt.Errorf("expect root is not equal actual root, expect:%x, actual:%x", hash, root) + } + return value, nil +} + +func depth(n int) int { + return int(math.Ceil(math.Log2(float64(n)))) +} + +func getIndex(leaf common.Uint256, hashes []common.Uint256) int { + for i, v := range hashes { + if bytes.Equal(v[:], leaf[:]) { + return i + } + } + return -1 +} diff --git a/contracts/native/cross_chain_manager/ont/merkle/merkle_tree.go b/contracts/native/cross_chain_manager/ont/merkle/merkle_tree.go new file mode 100644 index 00000000..45eb782f --- /dev/null +++ b/contracts/native/cross_chain_manager/ont/merkle/merkle_tree.go @@ -0,0 +1,607 @@ +/* + * Copyright (C) 2021 The Zion Authors + * This file is part of The Zion library. + * + * The Zion is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * The Zion is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with The Zion. If not, see . + */ + +package merkle + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + + "github.com/ethereum/go-ethereum/contracts/native/cross_chain_manager/ont/merkle/common" +) + +// const UINT256_SIZE int = 32 + +// type common.Uint256 [UINT256_SIZE]byte + +var EMPTY_HASH = common.Uint256{} + +// CompactMerkleTree calculate merkle tree with compact hash store in HashStore +type CompactMerkleTree struct { + mintree_h uint + hashes []common.Uint256 + hasher TreeHasher + hashStore HashStore + rootHash common.Uint256 + treeSize uint32 +} + +// NewTree returns a CompactMerkleTree instance +func NewTree(tree_size uint32, hashes []common.Uint256, store HashStore) *CompactMerkleTree { + + tree := &CompactMerkleTree{ + mintree_h: 0, + hashes: nil, + hasher: TreeHasher{}, + hashStore: store, + rootHash: EMPTY_HASH, + } + + tree._update(tree_size, hashes) + return tree +} + +func (self *CompactMerkleTree) Hashes() []common.Uint256 { + return self.hashes +} + +func (self *CompactMerkleTree) TreeSize() uint32 { + return self.treeSize +} + +func (self *CompactMerkleTree) Marshal() ([]byte, error) { + length := 4 + len(self.hashes)*common.UINT256_SIZE + buf := make([]byte, 4, length) + binary.BigEndian.PutUint32(buf[0:], self.treeSize) + for _, h := range self.hashes { + buf = append(buf, h[:]...) + } + + return buf, nil +} + +func (self *CompactMerkleTree) UnMarshal(buf []byte) error { + tree_size := binary.BigEndian.Uint32(buf[0:4]) + nhashes := countBit(tree_size) + if len(buf) < 4+int(nhashes)*common.UINT256_SIZE { + return errors.New("Too short input buf length") + } + hashes := make([]common.Uint256, nhashes, nhashes) + for i := 0; i < int(nhashes); i++ { + copy(hashes[i][:], buf[4+i*common.UINT256_SIZE:]) + } + + self._update(tree_size, hashes) + + return nil +} + +func (self *CompactMerkleTree) _update(tree_size uint32, hashes []common.Uint256) { + numBit := countBit(tree_size) + if len(hashes) != int(numBit) { + panic("number of hashes != num bit in tree_size") + } + self.treeSize = tree_size + self.hashes = hashes + self.mintree_h = lowBit(tree_size) + self.rootHash = EMPTY_HASH + +} + +// Root returns root hash of merkle tree +func (self *CompactMerkleTree) Root() common.Uint256 { + if self.rootHash == EMPTY_HASH { + if len(self.hashes) != 0 { + self.rootHash = self.hasher._hash_fold(self.hashes) + } else { + self.rootHash = self.hasher.hash_empty() + } + } + return self.rootHash +} + +// GetRootWithNewLeaf returns the new root hash if newLeaf is appended to the merkle tree +func (self *CompactMerkleTree) GetRootWithNewLeaf(newLeaf common.Uint256) common.Uint256 { + hashes := append(self.hashes, self.hasher.hash_leaf(newLeaf.ToArray())) + root := self.hasher._hash_fold(hashes) + + return root +} + +// clone except internal hash storage +func (self *CompactMerkleTree) cloneMem() CompactMerkleTree { + temp := CompactMerkleTree{mintree_h: self.mintree_h, hasher: self.hasher, hashStore: nil, + rootHash: self.rootHash, treeSize: self.treeSize, + } + temp.hashes = make([]common.Uint256, len(self.hashes)) + for i, h := range self.hashes { + temp.hashes[i] = h + } + + return temp +} + +func (self *CompactMerkleTree) GetRootWithNewLeaves(newLeaf []common.Uint256) common.Uint256 { + tree := self.cloneMem() + for _, h := range newLeaf { + tree.Append(h.ToArray()) + } + + return tree.Root() +} + +// Append appends a leaf to the merkle tree and returns the audit path +func (self *CompactMerkleTree) Append(leafv []byte) []common.Uint256 { + leaf := self.hasher.hash_leaf(leafv) + + return self.appendHash(leaf) +} + +// AppendHash appends a leaf hash to the merkle tree and returns the audit path +func (self *CompactMerkleTree) appendHash(leaf common.Uint256) []common.Uint256 { + size := len(self.hashes) + auditPath := make([]common.Uint256, size, size) + storehashes := make([]common.Uint256, 0) + // reverse + for i, v := range self.hashes { + auditPath[size-i-1] = v + } + + storehashes = append(storehashes, leaf) + self.mintree_h = 1 + for s := self.treeSize; s%2 == 1; s = s >> 1 { + self.mintree_h += 1 + leaf = self.hasher.hash_children(self.hashes[size-1], leaf) + storehashes = append(storehashes, leaf) + size -= 1 + } + if self.hashStore != nil { + self.hashStore.Append(storehashes) + self.hashStore.Flush() + } + self.treeSize += 1 + self.hashes = self.hashes[0:size] + self.hashes = append(self.hashes, leaf) + self.rootHash = EMPTY_HASH + + return auditPath +} + +// 1 based n +func getSubTreeSize(n uint32) []uint32 { + nhashes := countBit(n) + subtreesize := make([]uint32, nhashes, nhashes) + for i, id := nhashes-1, uint32(1); n != 0; n = n >> 1 { + id = id * 2 + if n%2 == 1 { + subtreesize[i] = id - 1 + i -= 1 + } + } + + return subtreesize +} + +// 1-based n and return value +func getSubTreePos(n uint32) []uint32 { + nhashes := countBit(n) + hashespos := make([]uint32, nhashes, nhashes) + for i, id := nhashes-1, uint32(1); n != 0; n = n >> 1 { + id = id * 2 + if n%2 == 1 { + hashespos[i] = id - 1 + i -= 1 + } + } + + for i := uint(1); i < nhashes; i++ { + hashespos[i] += hashespos[i-1] + } + + return hashespos +} + +// return merkle root of D[0:n] not include n +func (self *CompactMerkleTree) merkleRoot(n uint32) common.Uint256 { + hashespos := getSubTreePos(n) + nhashes := uint(len(hashespos)) + + hashes := make([]common.Uint256, nhashes, nhashes) + for i := uint(0); i < nhashes; i++ { + hashes[i], _ = self.hashStore.GetHash(hashespos[i] - 1) + } + return self.hasher._hash_fold(hashes) +} + +// ConsistencyProof returns consistency proof +func (self *CompactMerkleTree) ConsistencyProof(m, n uint32) []common.Uint256 { + if m > n || self.treeSize < n || self.hashStore == nil { + return nil + } + + return self.subproof(m, n, true) +} + +// m, n 1-based +func (self *CompactMerkleTree) subproof(m, n uint32, b bool) []common.Uint256 { + offset := uint32(0) + var hashes []common.Uint256 + for m < n { + k := uint32(1 << (highBit(n-1) - 1)) + if m <= k { + pos := getSubTreePos(n - k) + subhashes := make([]common.Uint256, len(pos), len(pos)) + for p := range pos { + pos[p] += offset + k*2 - 1 + subhashes[p], _ = self.hashStore.GetHash(pos[p] - 1) + } + rootk2n := self.hasher._hash_fold(subhashes) + hashes = append(hashes, rootk2n) + n = k + } else { + offset += k*2 - 1 + root02k, _ := self.hashStore.GetHash(offset - 1) + hashes = append(hashes, root02k) + m -= k + n -= k + b = false + } + } + + //assert m == n + if b == false { + pos := getSubTreePos(n) + //assert len(pos) == 1 + if len(pos) != 1 { + panic("assert error") + } + root02n, _ := self.hashStore.GetHash(pos[0] + offset - 1) + hashes = append(hashes, root02n) + } + + length := len(hashes) + reverse := make([]common.Uint256, length, length) + for k, _ := range reverse { + reverse[k] = hashes[length-k-1] + } + + return reverse +} + +// InclusionProof returns the proof d[m] in D[0:n] +// m zero based index, n size 1-based +// return sink.Bytes() of WriteVarBytes(hash_index_by_m) + loop of { WriteByte(PosInfo) + WriteByte(ProofPathNodeHash) } +func (self *CompactMerkleTree) MerkleInclusionLeafPath(data []byte, m, n uint32) ([]byte, error) { + if m >= n { + return nil, errors.New("wrong parameters") + } else if self.treeSize < n { + return nil, errors.New("not available yet") + } else if self.hashStore == nil { + return nil, errors.New("hash store not available") + } + + offset := uint32(0) + depth := int(math.Ceil(math.Log2(float64(n)))) + hashes := make([]common.Uint256, 0, depth) + poses := make([]byte, 0, depth) + for n != 1 { + k := uint32(1 << (highBit(n-1) - 1)) + if m < k { + pos := getSubTreePos(n - k) + subhashes := make([]common.Uint256, len(pos), len(pos)) + for p := range pos { + pos[p] += offset + k*2 - 1 + subhashes[p], _ = self.hashStore.GetHash(pos[p] - 1) + } + rootk2n := self.hasher._hash_fold(subhashes) + hashes = append(hashes, rootk2n) + poses = append(poses, byte(1)) + n = k + } else { + offset += k*2 - 1 + root02k, _ := self.hashStore.GetHash(offset - 1) + hashes = append(hashes, root02k) + poses = append(poses, byte(0)) + m -= k + n -= k + } + } + length := len(hashes) + sink := common.NewZeroCopySink(nil) + sink.WriteVarBytes(data) + for k, _ := range hashes { + index := length - k - 1 + sink.WriteByte(poses[index]) + sink.WriteHash(hashes[index]) + } + return sink.Bytes(), nil +} + +// InclusionProof returns the proof d[m] in D[0:n] +// m zero based index, n size 1-based +func (self *CompactMerkleTree) InclusionProof(m, n uint32) ([]common.Uint256, error) { + if m >= n { + return nil, errors.New("wrong parameters") + } else if self.treeSize < n { + return nil, errors.New("not available yet") + } else if self.hashStore == nil { + return nil, errors.New("hash store not available") + } + + offset := uint32(0) + var hashes []common.Uint256 + for n != 1 { + k := uint32(1 << (highBit(n-1) - 1)) + if m < k { + pos := getSubTreePos(n - k) + subhashes := make([]common.Uint256, len(pos), len(pos)) + for p := range pos { + pos[p] += offset + k*2 - 1 + subhashes[p], _ = self.hashStore.GetHash(pos[p] - 1) + } + rootk2n := self.hasher._hash_fold(subhashes) + hashes = append(hashes, rootk2n) + n = k + } else { + offset += k*2 - 1 + root02k, _ := self.hashStore.GetHash(offset - 1) + hashes = append(hashes, root02k) + m -= k + n -= k + } + } + + length := len(hashes) + reverse := make([]common.Uint256, length, length) + for k := range reverse { + reverse[k] = hashes[length-k-1] + } + + return reverse, nil +} + +// MerkleVerifier verify inclusion and consist proof +type MerkleVerifier struct { + hasher TreeHasher +} + +func NewMerkleVerifier() *MerkleVerifier { + return &MerkleVerifier{ + hasher: TreeHasher{}, + } +} + +/* + Verify a Merkle Audit PATH. + + leaf_hash: The hash of the leaf for which the proof was provided. + leaf_index: Index of the leaf in the tree. + proof: A list of SHA-256 hashes representing the Merkle audit path. + tree_size: The size of the tree + root_hash: The root hash of the tree + + Returns: + nil when the proof is valid +*/ +func (self *MerkleVerifier) VerifyLeafHashInclusion(leaf_hash common.Uint256, + leaf_index uint32, proof []common.Uint256, root_hash common.Uint256, tree_size uint32) error { + + if tree_size <= leaf_index { + return errors.New("Wrong params: the tree size is smaller than the leaf index") + } + + calculated_root_hash, err := self.calculate_root_hash_from_audit_path(leaf_hash, + leaf_index, proof, tree_size) + if err != nil { + return err + } + if calculated_root_hash != root_hash { + return errors.New(fmt.Sprintf("Constructed root hash differs from provided root hash. Constructed: %x, Expected: %x", + calculated_root_hash, root_hash)) + } + + return nil +} + +/* + Verify a Merkle Audit PATH. + + leaf: The leaf for which the proof is provided + leaf_index: Index of the leaf in the tree. + proof: A list of SHA-256 hashes representing the Merkle audit path. + tree_size: The size of the tree + root_hash: The root hash of the tree + + Returns: + nil when the proof is valid +*/ +func (self *MerkleVerifier) VerifyLeafInclusion(leaf []byte, + leaf_index uint32, proof []common.Uint256, root_hash common.Uint256, tree_size uint32) error { + leaf_hash := self.hasher.hash_leaf(leaf) + return self.VerifyLeafHashInclusion(leaf_hash, leaf_index, proof, root_hash, tree_size) +} + +func (self *MerkleVerifier) calculate_root_hash_from_audit_path(leaf_hash common.Uint256, + node_index uint32, audit_path []common.Uint256, tree_size uint32) (common.Uint256, error) { + calculated_hash := leaf_hash + last_node := tree_size - 1 + pos := 0 + path_len := len(audit_path) + for last_node > 0 { + if pos >= path_len { + return EMPTY_HASH, errors.New(fmt.Sprintf("Proof too short. expected %d, got %d", + audit_path_length(node_index, tree_size), path_len)) + } + + if node_index%2 == 1 { + calculated_hash = self.hasher.hash_children(audit_path[pos], calculated_hash) + pos += 1 + } else if node_index < last_node { + calculated_hash = self.hasher.hash_children(calculated_hash, audit_path[pos]) + pos += 1 + } + node_index /= 2 + last_node /= 2 + } + + if pos < path_len { + return EMPTY_HASH, errors.New("Proof too long") + } + return calculated_hash, nil +} + +func audit_path_length(index, tree_size uint32) int { + length := 0 + last_node := tree_size - 1 + for last_node > 0 { + if index%2 == 1 || index < last_node { + length += 1 + } + index /= 2 + last_node /= 2 + } + return length +} + +/* +Verify the consistency between two root hashes. + + old_tree_size must be <= new_tree_size. + + Args: + old_tree_size: size of the older tree. + new_tree_size: size of the newer_tree. + old_root: the root hash of the older tree. + new_root: the root hash of the newer tree. + proof: the consistency proof. + + Returns: + True. The return value is enforced by a decorator and need not be + checked by the caller. + + Raises: + ConsistencyError: the proof indicates an inconsistency + (this is usually really serious!). + ProofError: the proof is invalid. + ValueError: supplied tree sizes are invalid. +*/ +func (self *MerkleVerifier) VerifyConsistency(old_tree_size, + new_tree_size uint32, old_root, new_root common.Uint256, proof []common.Uint256) error { + old_size := old_tree_size + new_size := new_tree_size + + if old_size > new_size { + return errors.New(fmt.Sprintf("Older tree has bigger size %d vs %d", old_size, new_size)) + } + if old_root == new_root { + return nil + } + if old_size == 0 { + return nil + } + //assert o < old_size < new_size + /* + A consistency proof is essentially an audit proof for the node with + index old_size - 1 in the newer tree. The sole difference is that + the path is already hashed together into a single hash up until the + first audit node that occurs in the newer tree only. + */ + node := old_size - 1 + last_node := new_size - 1 + + // while we are the right child, everything is in both trees, so move one level up + for node%2 == 1 { + node /= 2 + last_node /= 2 + } + + lenp := len(proof) + pos := 0 + var new_hash, old_hash common.Uint256 + + if pos >= lenp { + return errors.New("Wrong proof length") + } + if node != 0 { + // compute the two root hashes in parallel. + new_hash = proof[pos] + old_hash = proof[pos] + pos += 1 + } else { + // The old tree was balanced (2^k nodes), so we already have the first root hash + new_hash = old_root + old_hash = old_root + } + + for node != 0 { + if node%2 == 1 { + if pos >= lenp { + return errors.New("Wrong proof length") + } + // node is a right child: left sibling exists in both trees + next_node := proof[pos] + pos += 1 + old_hash = self.hasher.hash_children(next_node, old_hash) + new_hash = self.hasher.hash_children(next_node, new_hash) + } else if node < last_node { + if pos >= lenp { + return errors.New("Wrong proof length") + } + // node is a left child: right sibling only exists inthe newer tree + next_node := proof[pos] + pos += 1 + new_hash = self.hasher.hash_children(new_hash, next_node) + } + // else node == last_node: node is a left child with no sibling in either tree + + node /= 2 + last_node /= 2 + } + + // Now old_hash is the hash of the first subtree. If the two trees have different + // height, continue the path until the new root. + for last_node != 0 { + if pos >= lenp { + return errors.New("Wrong proof length") + } + next_node := proof[pos] + pos += 1 + new_hash = self.hasher.hash_children(new_hash, next_node) + last_node /= 2 + } + + /* If the second hash does not match, the proof is invalid for the given pair + If, on the other hand, the newer hash matches but the older one does not, then + the proof (together with the signatures on the hashes) is proof of inconsistency. + */ + if new_hash != new_root { + return errors.New(fmt.Sprintf(`Bad Merkle proof: second root hash does not match. + Expected hash:%x, computed hash: %x`, new_root, new_hash)) + } else if old_hash != old_root { + return errors.New(fmt.Sprintf(`Inconsistency: first root hash does not match." + "Expected hash: %x, computed hash:%x`, old_root, old_hash)) + } + + if pos != lenp { + return errors.New("Proof too long") + } + + return nil +} diff --git a/contracts/native/cross_chain_manager/ont/merkle/merkle_tree_test.go b/contracts/native/cross_chain_manager/ont/merkle/merkle_tree_test.go new file mode 100644 index 00000000..e74a4390 --- /dev/null +++ b/contracts/native/cross_chain_manager/ont/merkle/merkle_tree_test.go @@ -0,0 +1,326 @@ +/* + * Copyright (C) 2021 The Zion Authors + * This file is part of The Zion library. + * + * The Zion is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * The Zion is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with The Zion. If not, see . + */ + +package merkle + +import ( + "crypto/sha256" + "fmt" + "os" + "testing" + + "github.com/ethereum/go-ethereum/contracts/native/cross_chain_manager/ont/merkle/common" + "github.com/stretchr/testify/assert" +) + +func TestMerkleLeaf3(t *testing.T) { + hasher := TreeHasher{} + leafs := []common.Uint256{hasher.hash_leaf([]byte{1}), + hasher.hash_leaf([]byte{2}), + hasher.hash_leaf([]byte{3})} + store, _ := NewFileHashStore("merkletree.db", 0) + tree := NewTree(0, nil, store) + if tree.Root() != sha256.Sum256(nil) { + t.Fatal("root error") + } + for i := range leafs { + tree.Append([]byte{byte(i + 1)}) + } + + hashes := make([]common.Uint256, 5, 5) + for i := 0; i < 4; i++ { + hashes[i], _ = tree.hashStore.GetHash(uint32(i)) + } + hashes[4] = tree.Root() + + cmp := []common.Uint256{ + leafs[0], + leafs[1], + hasher.hash_children(leafs[0], leafs[1]), + leafs[2], + hasher.hash_children(hasher.hash_children(leafs[0], leafs[1]), + leafs[2]), + } + + for i := 0; i < 5; i++ { + if hashes[i] != cmp[i] { + t.Fatal(fmt.Sprintf("error: %d, expected %x, found %x", i, cmp[i], hashes[i])) + } + } + +} + +func TestCompactMerkleTree_GetRootWithNewLeaves(t *testing.T) { + N := 1000 + tree1 := NewTree(0, nil, nil) + tree2 := NewTree(0, nil, nil) + leaves := make([]common.Uint256, N) + for i := 0; i < N; i++ { + leaves[i][:][0] = byte(i) + hash := leaves[i] + assert.Equal(t, tree1.GetRootWithNewLeaf(hash), tree2.GetRootWithNewLeaves([]common.Uint256{hash})) + tree1.Append(hash.ToArray()) + tree2.Append(hash.ToArray()) + } +} + +func TestMerkle(t *testing.T) { + hasher := TreeHasher{} + leafs := []common.Uint256{hasher.hash_leaf([]byte{1}), + hasher.hash_leaf([]byte{2}), + hasher.hash_leaf([]byte{3}), + hasher.hash_leaf([]byte{4})} + + store, _ := NewFileHashStore("merkletree.db", 0) + tree := NewTree(0, nil, store) + if tree.Root() != sha256.Sum256(nil) { + t.Fatal("root error") + } + for i, _ := range leafs { + tree.Append([]byte{byte(i + 1)}) + } + + hashes := make([]common.Uint256, 6, 6) + for i := 0; i < 6; i++ { + hashes[i], _ = tree.hashStore.GetHash(uint32(i)) + } + cmp := []common.Uint256{ + leafs[0], + leafs[1], + hasher.hash_children(leafs[0], leafs[1]), + leafs[2], + leafs[3], + hasher.hash_children(leafs[2], leafs[3]), + hasher.hash_children(hasher.hash_children(leafs[0], leafs[1]), + hasher.hash_children(leafs[2], leafs[3])), + } + + for i := 0; i < 6; i++ { + if hashes[i] != cmp[i] { + fmt.Println(hashes) + fmt.Println(cmp) + t.Fatal(fmt.Sprintf("error: %d, expected %x, found %x", i, cmp[i], hashes[i])) + } + } + +} + +func TestMerkleHashes(t *testing.T) { + store, _ := NewFileHashStore("merkletree.db", 0) + tree := NewTree(0, nil, store) + for i := 0; i < 100; i++ { + tree.Append([]byte{byte(i + 1)}) + } + + // 100 == 110 0100 + if len(tree.hashes) != 3 { + t.Fatal(fmt.Sprintf("error tree hashes size")) + } + +} + +// zero based return merkle root of D[0:n] +func TestMerkleRoot(t *testing.T) { + n := 100 + roots := make([]common.Uint256, n, n) + store, _ := NewFileHashStore("merkletree.db", 0) + tree := NewTree(0, nil, store) + for i := 0; i < n; i++ { + tree.Append([]byte{byte(i + 1)}) + roots[i] = tree.Root() + } + + cmp := make([]common.Uint256, n, n) + for i := 0; i < n; i++ { + cmp[i] = tree.merkleRoot(uint32(i) + 1) + if cmp[i] != roots[i] { + t.Error(fmt.Sprintf("error merkle root is not equal at %d", i)) + } + } + +} + +func TestGetSubTreeSize(t *testing.T) { + sizes := getSubTreeSize(7) + fmt.Println("sub tree size", sizes) +} + +// zero based return merkle root of D[0:n] +func TestMerkleIncludeProof(t *testing.T) { + n := uint32(9) + store, _ := NewFileHashStore("merkletree.db", 0) + defer func() { os.Remove("merkletree.db") }() + tree := NewTree(0, nil, store) + for i := uint32(0); i < n; i++ { + tree.Append([]byte{byte(i + 1)}) + } + verify := NewMerkleVerifier() + root := tree.Root() + for i := uint32(2); i < n; i++ { + proof, _ := tree.InclusionProof(i, n) + leaf_hash := tree.hasher.hash_leaf([]byte{byte(i + 1)}) + res := verify.VerifyLeafHashInclusion(leaf_hash, i, proof, root, n) + if res != nil { + t.Fatal(res, i, proof) + } + } +} + +func TestMerkleInclusionLeafPath(t *testing.T) { + n := uint32(10) + store, _ := NewFileHashStore("merkletree.db", 0) + defer func() { os.Remove("merkletree.db") }() + tree := NewTree(0, nil, store) + for i := uint32(0); i < n; i++ { + tree.Append([]byte{byte(i + 1)}) + } + root := tree.Root() + for i := uint32(0); i < n; i++ { + data := []byte{byte(i + 1)} + path, err := tree.MerkleInclusionLeafPath(data, i, n) + assert.Nil(t, err) + val, err := MerkleProve(path, root.ToArray()) + assert.Nil(t, err) + assert.Equal(t, data, val) + } +} + +func TestMerkleConsistencyProofLen(t *testing.T) { + n := uint32(7) + store, _ := NewFileHashStore("merkletree.db", 0) + tree := NewTree(0, nil, store) + for i := uint32(0); i < n; i++ { + tree.Append([]byte{byte(i + 1)}) + } + + cmp := []int{3, 2, 4, 1, 4, 3, 0} + for i := uint32(0); i < n; i++ { + proof := tree.ConsistencyProof(i+1, n) + if len(proof) != cmp[i] { + t.Fatal("error: wrong proof length") + } + } + +} + +func TestMerkleConsistencyProof(t *testing.T) { + n := uint32(140) + roots := make([]common.Uint256, n, n) + store, _ := NewFileHashStore("merkletree.db", 0) + tree := NewTree(0, nil, store) + for i := uint32(0); i < n; i++ { + tree.Append([]byte{byte(i + 1)}) + roots[i] = tree.Root() + } + + verify := NewMerkleVerifier() + + for i := uint32(0); i < n; i++ { + proof := tree.ConsistencyProof(i+1, n) + err := verify.VerifyConsistency(i+1, n, roots[i], roots[n-1], proof) + if err != nil { + t.Fatal("verify consistency error:", i, err) + } + + } +} + +//~70w +func BenchmarkMerkleInsert(b *testing.B) { + store, _ := NewFileHashStore("merkletree.db", 0) + tree := NewTree(0, nil, store) + for i := 0; i < b.N; i++ { + //use b.N for looping + tree.Append([]byte(fmt.Sprintf("bench %d", i))) + } +} + +var treeTest *CompactMerkleTree +var storeTest HashStore +var N = 100 //00 + +func init() { + storeTest, _ = NewFileHashStore("merkletree.db", 0) + treeTest = NewTree(0, nil, storeTest) + for i := 0; i < N; i++ { + treeTest.Append([]byte(fmt.Sprintf("setup %d", i))) + } + +} + +/* +// ~20w +func BenchmarkMerkleInclusionProof(b *testing.B) { + for i := 0; i < b.N; i++ { + treeTest.InclusionProof(uint32(i), uint32(N)) + } +} + +// ~20w +func BenchmarkMerkleConsistencyProof(b *testing.B) { + for i := 0; i < b.N; i++ { + treeTest.ConsistencyProof(uint32(i+1), uint32(N)) + } +} +*/ + +//~70w +func BenchmarkMerkleInsert2(b *testing.B) { + for i := 0; i < b.N; i++ { + treeTest.Append([]byte(fmt.Sprintf("bench %d", i))) + } +} + +func TestTreeHasher_HashFullTree(t *testing.T) { + debugCheck = true + leaves := make([][]byte, 0) + for i := byte(0); i < 200; i++ { + leaves = append(leaves, []byte{i}) + TreeHasher{}.HashFullTree(leaves) + } +} + +func TestTreeHasher(t *testing.T) { + tree := NewTree(0, nil, nil) + leaves := make([][]byte, 0) + for i := uint32(0); i < 1000; i++ { + leaf := []byte{byte(i + 1)} + leaves = append(leaves, leaf) + tree.Append(leaf) + root := TreeHasher{}.HashFullTree(leaves) + assert.Equal(t, root, tree.Root()) + } +} + +func TestAudit(t *testing.T) { + var hashes []common.Uint256 + n := 100 + tree := TreeHasher{} + for i := 0; i < n; i++ { + hashes = append(hashes, HashLeaf([]byte(fmt.Sprintf("%d", i)))) + } + root := tree.HashFullTreeWithLeafHash(hashes) + treeHashes := MerkleHashes(hashes, depth(len(hashes))) + assert.Equal(t, root, treeHashes[0][0]) + for i := 0; i < n; i++ { + auditPath, _ := MerkleLeafPath([]byte(fmt.Sprintf("%d", i)), hashes) + value, err := MerkleProve(auditPath, root[:]) + assert.NoError(t, err) + assert.Equal(t, []byte(fmt.Sprintf("%d", i)), value) + } +} diff --git a/contracts/native/cross_chain_manager/ont/merkle/util.go b/contracts/native/cross_chain_manager/ont/merkle/util.go new file mode 100644 index 00000000..216f330e --- /dev/null +++ b/contracts/native/cross_chain_manager/ont/merkle/util.go @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2021 The Zion Authors + * This file is part of The Zion library. + * + * The Zion is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * The Zion is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with The Zion. If not, see . + */ + +package merkle + +// return the number of 1 bit +func countBit(num uint32) uint { + var count uint + for num != 0 { + num &= (num - 1) + count += 1 + } + return count +} + +func isPower2(num uint32) bool { + return countBit(num) == 1 +} + +// return the position of the heightest 1 bit +// 1-based index +func highBit(num uint32) uint { + var hiBit uint + for num != 0 { + num >>= 1 + hiBit += 1 + } + return hiBit +} + +// return the position of the lowest 1 bit +// 1-based index +func lowBit(num uint32) uint { + return highBit(num & -num) +} diff --git a/contracts/native/cross_chain_manager/ont/ont_handler.go b/contracts/native/cross_chain_manager/ont/ont_handler.go new file mode 100644 index 00000000..7c9bcdc1 --- /dev/null +++ b/contracts/native/cross_chain_manager/ont/ont_handler.go @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2021 The Zion Authors + * This file is part of The Zion library. + * + * The Zion is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * The Zion is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with The Zion. If not, see . + */ + +package ont + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/contracts/native" + scom "github.com/ethereum/go-ethereum/contracts/native/cross_chain_manager/common" + "github.com/ethereum/go-ethereum/contracts/native/governance/side_chain_manager" + "github.com/ethereum/go-ethereum/contracts/native/header_sync/ont" + "github.com/ethereum/go-ethereum/contracts/native/utils" + "github.com/ontio/ontology-crypto/keypair" + ocommon "github.com/ontio/ontology/common" + otypes "github.com/ontio/ontology/core/types" +) + +type ONTHandler struct { +} + +func NewONTHandler() *ONTHandler { + return &ONTHandler{} +} + +func (this *ONTHandler) MakeDepositProposal(service *native.NativeContract) (*scom.MakeTxParam, error) { + ctx := service.ContractRef().CurrentContext() + params := &scom.EntranceParam{} + if err := utils.UnpackMethod(scom.ABI, scom.MethodImportOuterTransfer, params, ctx.Payload); err != nil { + return nil, err + } + + crossChainMsg, err := ont.GetCrossChainMsg(service, params.SourceChainID, params.Height) + if crossChainMsg == nil { + source := ocommon.NewZeroCopySource(params.HeaderOrCrossChainMsg) + crossChainMsg = new(otypes.CrossChainMsg) + err := crossChainMsg.Deserialization(source) + if err != nil { + return nil, fmt.Errorf("ont MakeDepositProposal, deserialize crossChainMsg error: %v", err) + } + n, _, irr, eof := source.NextVarUint() + if irr || eof { + return nil, fmt.Errorf("ont MakeDepositProposal, deserialization bookkeeper length error") + } + var bookkeepers []keypair.PublicKey + for i := 0; uint64(i) < n; i++ { + v, _, irr, eof := source.NextVarBytes() + if irr || eof { + return nil, fmt.Errorf("ont MakeDepositProposal, deserialization bookkeeper error") + } + bookkeeper, err := keypair.DeserializePublicKey(v) + if err != nil { + return nil, fmt.Errorf("ont MakeDepositProposal, keypair.DeserializePublicKey error: %v", err) + } + bookkeepers = append(bookkeepers, bookkeeper) + } + err = ont.VerifyCrossChainMsg(service, params.SourceChainID, crossChainMsg, bookkeepers) + if err != nil { + return nil, fmt.Errorf("ont MakeDepositProposal, VerifyCrossChainMsg error: %v", err) + } + err = ont.PutCrossChainMsg(service, params.SourceChainID, crossChainMsg) + if err != nil { + return nil, fmt.Errorf("ont MakeDepositProposal, put PutCrossChainMsg error: %v", err) + } + } + + //get registered side chain information from poly chain + sideChain, err := side_chain_manager.GetSideChain(service, params.SourceChainID) + if err != nil { + return nil, fmt.Errorf("ont MakeDepositProposal, side_chain_manager.GetSideChain error: %v", err) + } + + value, err := VerifyFromOntTx(params.Proof, crossChainMsg, sideChain) + if err != nil { + return nil, fmt.Errorf("ont MakeDepositProposal, VerifyOntTx error: %v", err) + } + if err := scom.CheckDoneTx(service, value.CrossChainID, params.SourceChainID); err != nil { + return nil, fmt.Errorf("ont MakeDepositProposal, check done transaction error:%s", err) + } + if err = scom.PutDoneTx(service, value.CrossChainID, params.SourceChainID); err != nil { + return nil, fmt.Errorf("VerifyFromOntTx, putDoneTx error:%s", err) + } + return value, nil +} diff --git a/contracts/native/cross_chain_manager/ont/utils.go b/contracts/native/cross_chain_manager/ont/utils.go new file mode 100644 index 00000000..ff6d35fa --- /dev/null +++ b/contracts/native/cross_chain_manager/ont/utils.go @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2021 The Zion Authors + * This file is part of The Zion library. + * + * The Zion is free software: you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * The Zion is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with The Zion. If not, see . + */ + +package ont + +import ( + "bytes" + "fmt" + + scom "github.com/ethereum/go-ethereum/contracts/native/cross_chain_manager/common" + "github.com/ethereum/go-ethereum/contracts/native/cross_chain_manager/ont/merkle" + "github.com/ethereum/go-ethereum/contracts/native/governance/side_chain_manager" + otypes "github.com/ontio/ontology/core/types" +) + +func VerifyFromOntTx(proof []byte, crossChainMsg *otypes.CrossChainMsg, sideChain *side_chain_manager.SideChain) (*scom.MakeTxParam, error) { + v, err := merkle.MerkleProve(proof, crossChainMsg.StatesRoot.ToArray()) + if err != nil { + return nil, fmt.Errorf("VerifyFromOntTx, merkle.MerkleProve verify merkle proof error") + } + + if len(sideChain.CCMCAddress) == 0 { + // old sideChain for ontology + txParam, err := scom.DecodeTxParam(v) + if err != nil { + return nil, fmt.Errorf("VerifyFromOntTx, deserialize MakeTxParam error:%s", err) + } + return txParam, nil + } + + // new sideChain for ontology + txParam := new(scom.MakeTxParamWithSender) + if err := txParam.Deserialization(v); err != nil { + return nil, fmt.Errorf("VerifyFromOntTx, deserialize MakeTxParamWithSender error:%s", err) + } + + if !bytes.Equal(txParam.Sender[:], sideChain.CCMCAddress) { + return nil, fmt.Errorf("VerifyFromOntTx, invalid sender:%s", err) + } + + return &txParam.MakeTxParam, nil + +} diff --git a/contracts/native/header_sync/entrance.go b/contracts/native/header_sync/entrance.go index 01f2b2ec..4c16a38c 100644 --- a/contracts/native/header_sync/entrance.go +++ b/contracts/native/header_sync/entrance.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/contracts/native/header_sync/heco" "github.com/ethereum/go-ethereum/contracts/native/header_sync/msc" "github.com/ethereum/go-ethereum/contracts/native/header_sync/okex" + "github.com/ethereum/go-ethereum/contracts/native/header_sync/ont" "github.com/ethereum/go-ethereum/contracts/native/header_sync/polygon" "github.com/ethereum/go-ethereum/contracts/native/header_sync/quorum" "github.com/ethereum/go-ethereum/contracts/native/header_sync/zilliqa" @@ -171,6 +172,8 @@ func GetChainHandler(router uint64) (hscommon.HeaderSyncHandler, error) { return polygon.NewBorHandler(), nil case utils.COSMOS_ROUTER: return cosmos.NewCosmosHandler(), nil + case utils.ONT_ROUTER: + return ont.NewONTHandler(), nil case utils.ZILLIQA_ROUTER: return zilliqa.NewHandler(), nil default: diff --git a/go.mod b/go.mod index 5d7de307..792dae86 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,6 @@ module github.com/ethereum/go-ethereum go 1.16 require ( - github.com/ontio/ontology v1.11.0 github.com/Azure/azure-storage-blob-go v0.7.0 github.com/VictoriaMetrics/fastcache v1.6.0 github.com/Zilliqa/gozilliqa-sdk v1.2.1-0.20210329093354-1b8e0a7a2e25 @@ -37,6 +36,7 @@ require ( github.com/holiman/uint256 v1.2.0 github.com/huin/goupnp v1.0.2 github.com/influxdata/influxdb v1.8.3 + github.com/itchyny/base58-go v0.1.0 github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e github.com/julienschmidt/httprouter v1.2.0 @@ -45,6 +45,8 @@ require ( github.com/mattn/go-isatty v0.0.12 github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 github.com/olekukonko/tablewriter v0.0.5 + github.com/ontio/ontology v1.11.0 + github.com/ontio/ontology-crypto v1.0.9 github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 github.com/pkg/errors v0.9.1 github.com/prometheus/tsdb v0.7.1 diff --git a/go.sum b/go.sum index 43396d55..41fc1351 100644 --- a/go.sum +++ b/go.sum @@ -44,6 +44,7 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d h1:nalkkPQcITbvhmL4+C4cKA87NW0tfm3Kl9VXRoPywFg= github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/JohnCGriffin/overflow v0.0.0-20170615021017-4d914c927216 h1:2ZboyJ8vl75fGesnG9NpMTD2DyQI3FzMXy4x752rGF0= github.com/JohnCGriffin/overflow v0.0.0-20170615021017-4d914c927216/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -391,6 +392,7 @@ github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19y github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= +github.com/itchyny/base58-go v0.1.0 h1:zF5spLDo956exUAD17o+7GamZTRkXOZlqJjRciZwd1I= github.com/itchyny/base58-go v0.1.0/go.mod h1:SrMWPE3DFuJJp1M/RUhu4fccp/y9AlB8AL3o3duPToU= github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA= github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= @@ -516,7 +518,9 @@ github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/ontio/ontology v1.11.0 h1:0T/hxFDHQqRcs1+yEdgaym5YIvGx5yebOsHYdKVWgHI= github.com/ontio/ontology v1.11.0/go.mod h1:Qw74bfTBlIQka+jQX4nXuWvyOYGGt368/V7XFxaf4tY= +github.com/ontio/ontology-crypto v1.0.9 h1:6fxBsz3W4CcdJk4/9QO7j0Qq7NdlP2ixPrViu8XpzzM= github.com/ontio/ontology-crypto v1.0.9/go.mod h1:h/jeqqb9Ma/Leszxqh6zY3eTF2yks44hyRKikMni+YQ= github.com/ontio/ontology-eventbus v0.9.1/go.mod h1:hCQIlbdPckcfykMeVUdWrqHZ8d30TBdmLfXCVWGkYhM= github.com/ontio/wagon v0.4.1/go.mod h1:oTPdgWT7WfPlEyzVaHSn1vQPMSbOpQPv+WphxibWlhg=