diff --git a/.github/workflows/gobra.yml b/.github/workflows/gobra.yml index 277205413..d88022b09 100644 --- a/.github/workflows/gobra.yml +++ b/.github/workflows/gobra.yml @@ -23,6 +23,10 @@ env: mceMode: 'od' requireTriggers: '1' useZ3API: '0' + viperBackend: 'SILICON' + disableNL: '0' + unsafeWildcardOptimization: '1' + overflow: '0' jobs: verify-deps: @@ -60,7 +64,11 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + viperBackend: ${{ env.viperBackend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/addr' uses: viperproject/gobra-action@main with: @@ -76,12 +84,16 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + viperBackend: ${{ env.viperBackend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/experimental/epic' uses: viperproject/gobra-action@main with: packages: 'pkg/experimental/epic' - timeout: 5m + timeout: 7m headerOnly: ${{ env.headerOnly }} module: ${{ env.module }} includePaths: ${{ env.includePaths }} @@ -91,7 +103,11 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + viperBackend: ${{ env.viperBackend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/log' uses: viperproject/gobra-action@main with: @@ -106,7 +122,11 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + viperBackend: ${{ env.viperBackend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/private/serrors' uses: viperproject/gobra-action@main with: @@ -121,7 +141,11 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + viperBackend: ${{ env.viperBackend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/scrypto' uses: viperproject/gobra-action@main with: @@ -136,7 +160,11 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + viperBackend: ${{ env.viperBackend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/slayers' uses: viperproject/gobra-action@main with: @@ -151,7 +179,11 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + viperBackend: ${{ env.viperBackend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/slayers/path' uses: viperproject/gobra-action@main with: @@ -166,7 +198,11 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + viperBackend: ${{ env.viperBackend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/slayers/path/empty' uses: viperproject/gobra-action@main with: @@ -181,7 +217,11 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + viperBackend: ${{ env.viperBackend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/slayers/path/epic' uses: viperproject/gobra-action@main with: @@ -197,7 +237,11 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + viperBackend: ${{ env.viperBackend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/slayers/path/onehop' uses: viperproject/gobra-action@main with: @@ -212,7 +256,11 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + viperBackend: ${{ env.viperBackend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'pkg/slayers/path/scion' uses: viperproject/gobra-action@main with: @@ -227,7 +275,11 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + viperBackend: ${{ env.viperBackend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'private/topology' uses: viperproject/gobra-action@main with: @@ -242,7 +294,11 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + viperBackend: ${{ env.viperBackend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'private/topology/underlay' uses: viperproject/gobra-action@main with: @@ -257,7 +313,11 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + viperBackend: ${{ env.viperBackend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'private/underlay/conn' uses: viperproject/gobra-action@main with: @@ -272,7 +332,11 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + viperBackend: ${{ env.viperBackend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'private/underlay/sockctrl' uses: viperproject/gobra-action@main with: @@ -287,7 +351,11 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + viperBackend: ${{ env.viperBackend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'router/bfd' uses: viperproject/gobra-action@main with: @@ -302,7 +370,11 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + viperBackend: ${{ env.viperBackend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Verify package 'router/control' uses: viperproject/gobra-action@main with: @@ -317,7 +389,11 @@ jobs: imageVersion: ${{ env.imageVersion }} mceMode: ${{ env.mceMode }} requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} + disableNL: ${{ env.disableNL }} + viperBackend: ${{ env.viperBackend }} + unsafeWildcardOptimization: ${{ env.unsafeWildcardOptimization }} - name: Upload the verification report uses: actions/upload-artifact@v2 with: @@ -339,12 +415,20 @@ jobs: includePaths: ${{ env.includePaths }} assumeInjectivityOnInhale: ${{ env.assumeInjectivityOnInhale }} checkConsistency: ${{ env.checkConsistency }} + # Due to a bug introduced in https://github.com/viperproject/gobra/pull/776, + # we must currently disable the chopper, otherwise we well-founded orders + # for termination checking are not available at the chopped Viper parts. + # We should reenable it whenever possible, as it reduces verification time in + # ~8 min (20%). + # chop: 10 parallelizeBranches: '1' - # The following flag has a significant influence on the number of branches verified. - # Without it, verification would take a lot longer. conditionalizePermissions: '1' + moreJoins: 'impure' imageVersion: ${{ env.imageVersion }} mceMode: 'on' requireTriggers: ${{ env.requireTriggers }} + overflow: ${{ env.overflow }} useZ3API: ${{ env.useZ3API }} - + disableNL: '0' + viperBackend: ${{ env.viperBackend }} + unsafeWildcardOptimization: '0' \ No newline at end of file diff --git a/README.md b/README.md index a1c68552b..07834b485 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # VerifiedSCION -This package contains the **verified** implementation of the +This package contains the **verified** implementation of the router from the [SCION](http://www.scion-architecture.net) protocol, a future Internet architecture. SCION is the first clean-slate Internet architecture designed to provide route control, failure @@ -10,7 +10,7 @@ isolation, and explicit trust information for end-to-end communication. To find out more about the project, please visit the [official project page](https://www.pm.inf.ethz.ch/research/verifiedscion.html). -> We are currently in the process of migrating the specifications and other annotations from the [original VerifiedSCION repository](https://github.com/jcp19/VerifiedSCION) to this one. This repository contains an up-to-date version of SCION (which we plan to keep updated), as well as improvements resulting from our experience from our first efforts on verifying SCION. +> This repository contains a recent version of SCION (which we plan to keep updated), as well as fixes to the bugs we report as a result of verifying the SCION router from the mainline SCION repository. ## Methodology We focus on verifying the main implementation of SCION, written in the *Go* programming language. diff --git a/pkg/addr/host.go b/pkg/addr/host.go index 067b348a5..8efeb76aa 100644 --- a/pkg/addr/host.go +++ b/pkg/addr/host.go @@ -28,7 +28,7 @@ import ( "github.com/scionproto/scion/pkg/private/serrors" //@ . "github.com/scionproto/scion/verification/utils/definitions" - //@ "github.com/scionproto/scion/verification/utils/slices" + //@ sl "github.com/scionproto/scion/verification/utils/slices" ) type HostAddrType uint8 @@ -196,7 +196,7 @@ func (h HostIPv4) Pack() (res []byte) { func (h HostIPv4) IP() (res net.IP) { // XXX(kormat): ensure the reply is the 4-byte representation. //@ unfold acc(h.Mem(), R13) - //@ unfold acc(slices.AbsSlice_Bytes(h, 0, len(h)), R13) + //@ unfold acc(sl.Bytes(h, 0, len(h)), R13) return net.IP(h).To4( /*@ false @*/ ) } @@ -205,10 +205,10 @@ func (h HostIPv4) IP() (res net.IP) { // @ decreases func (h HostIPv4) Copy() (res HostAddr) { //@ unfold acc(h.Mem(), R13) - //@ unfold acc(slices.AbsSlice_Bytes(h, 0, len(h)), R13) + //@ unfold acc(sl.Bytes(h, 0, len(h)), R13) var tmp HostIPv4 = HostIPv4(append( /*@ R13, @*/ net.IP(nil), h...)) - //@ fold acc(slices.AbsSlice_Bytes(h, 0, len(h)), R13) - //@ fold slices.AbsSlice_Bytes(tmp, 0, len(tmp)) + //@ fold acc(sl.Bytes(h, 0, len(h)), R13) + //@ fold sl.Bytes(tmp, 0, len(tmp)) //@ fold acc(h.Mem(), R13) //@ fold tmp.Mem() return tmp @@ -231,7 +231,7 @@ func (h HostIPv4) Equal(o HostAddr) bool { func (h HostIPv4) String() string { //@ assert unfolding acc(h.Mem(), R13) in len(h) == HostLenIPv4 //@ ghost defer fold acc(h.Mem(), R13) - //@ ghost defer fold acc(slices.AbsSlice_Bytes(h, 0, len(h)), R13) + //@ ghost defer fold acc(sl.Bytes(h, 0, len(h)), R13) return h.IP().String() } @@ -254,7 +254,7 @@ func (h HostIPv6) Type() HostAddrType { // @ decreases func (h HostIPv6) Pack() (res []byte) { //@ unfold acc(h.Mem(), R13) - //@ unfold acc(slices.AbsSlice_Bytes(h, 0, len(h)), R13) + //@ unfold acc(sl.Bytes(h, 0, len(h)), R13) return []byte(h)[:HostLenIPv6] } @@ -264,7 +264,7 @@ func (h HostIPv6) Pack() (res []byte) { // @ decreases func (h HostIPv6) IP() (res net.IP) { //@ unfold acc(h.Mem(), R13) - //@ unfold acc(slices.AbsSlice_Bytes(h, 0, len(h)), R13) + //@ unfold acc(sl.Bytes(h, 0, len(h)), R13) return net.IP(h) } @@ -273,10 +273,10 @@ func (h HostIPv6) IP() (res net.IP) { // @ decreases func (h HostIPv6) Copy() (res HostAddr) { //@ unfold acc(h.Mem(), R13) - //@ unfold acc(slices.AbsSlice_Bytes(h, 0, len(h)), R13) + //@ unfold acc(sl.Bytes(h, 0, len(h)), R13) var tmp HostIPv6 = HostIPv6(append( /*@ R13, @*/ net.IP(nil), h...)) - //@ fold acc(slices.AbsSlice_Bytes(h, 0, len(h)), R13) - //@ fold slices.AbsSlice_Bytes(tmp, 0, len(tmp)) + //@ fold acc(sl.Bytes(h, 0, len(h)), R13) + //@ fold sl.Bytes(tmp, 0, len(tmp)) //@ fold acc(h.Mem(), R13) //@ fold tmp.Mem() return tmp @@ -299,7 +299,7 @@ func (h HostIPv6) Equal(o HostAddr) bool { func (h HostIPv6) String() string { //@ assert unfolding acc(h.Mem(), R13) in len(h) == HostLenIPv6 //@ ghost defer fold acc(h.Mem(), R13) - //@ ghost defer fold acc(slices.AbsSlice_Bytes(h, 0, len(h)), R13) + //@ ghost defer fold acc(sl.Bytes(h, 0, len(h)), R13) return h.IP().String() } @@ -442,7 +442,7 @@ func HostFromRaw(b []byte, htype HostAddrType) (res HostAddr, err error) { } //@ assert forall i int :: { &b[:HostLenIPv4][i] } 0 <= i && i < len(b[:HostLenIPv4]) ==> &b[:HostLenIPv4][i] == &b[i] tmp := HostIPv4(b[:HostLenIPv4]) - //@ fold slices.AbsSlice_Bytes(tmp, 0, len(tmp)) + //@ fold sl.Bytes(tmp, 0, len(tmp)) //@ fold tmp.Mem() return tmp, nil case HostTypeIPv6: @@ -451,7 +451,7 @@ func HostFromRaw(b []byte, htype HostAddrType) (res HostAddr, err error) { } //@ assert forall i int :: { &b[:HostLenIPv4][i] } 0 <= i && i < len(b[:HostLenIPv4]) ==> &b[:HostLenIPv4][i] == &b[i] tmp := HostIPv6(b[:HostLenIPv6]) - //@ fold slices.AbsSlice_Bytes(tmp, 0, len(tmp)) + //@ fold sl.Bytes(tmp, 0, len(tmp)) //@ fold tmp.Mem() return tmp, nil case HostTypeSVC: @@ -473,12 +473,12 @@ func HostFromRaw(b []byte, htype HostAddrType) (res HostAddr, err error) { func HostFromIP(ip net.IP) (res HostAddr) { if ip4 := ip.To4( /*@ false @*/ ); ip4 != nil { tmp := HostIPv4(ip4) - //@ fold slices.AbsSlice_Bytes(tmp, 0, len(tmp)) + //@ fold sl.Bytes(tmp, 0, len(tmp)) //@ fold tmp.Mem() return tmp } tmp := HostIPv6(ip) - //@ fold slices.AbsSlice_Bytes(tmp, 0, len(tmp)) + //@ fold sl.Bytes(tmp, 0, len(tmp)) //@ fold tmp.Mem() return tmp } diff --git a/pkg/addr/host_spec.gobra b/pkg/addr/host_spec.gobra index 16364a6c5..ed6e9032f 100644 --- a/pkg/addr/host_spec.gobra +++ b/pkg/addr/host_spec.gobra @@ -28,14 +28,14 @@ HostNone implements HostAddr pred (h HostIPv4) Mem() { len(h) == HostLenIPv4 && - slices.AbsSlice_Bytes(h, 0, len(h)) + slices.Bytes(h, 0, len(h)) } HostIPv4 implements HostAddr pred (h HostIPv6) Mem() { len(h) == HostLenIPv6 && - slices.AbsSlice_Bytes(h, 0, len(h)) + slices.Bytes(h, 0, len(h)) } HostIPv6 implements HostAddr diff --git a/pkg/experimental/epic/epic.go b/pkg/experimental/epic/epic.go index 2a3c083f6..a183f361d 100644 --- a/pkg/experimental/epic/epic.go +++ b/pkg/experimental/epic/epic.go @@ -48,13 +48,11 @@ const ( var zeroInitVector /*@@@*/ [16]byte -/*@ // ghost init -func init() { - fold acc(sl.AbsSlice_Bytes(zeroInitVector[:], 0, len(zeroInitVector[:])), _) - fold acc(postInitInvariant(), _) -} -@*/ +// @ func init() { +// @ fold acc(sl.Bytes(zeroInitVector[:], 0, len(zeroInitVector[:])), _) +// @ fold acc(postInitInvariant(), _) +// @ } // CreateTimestamp returns the epic timestamp, which encodes the current time (now) relative to the // input timestamp. The input timestamp must not be in the future (compared to the current time), @@ -107,26 +105,24 @@ func VerifyTimestamp(timestamp time.Time, epicTS uint32, now time.Time) (err err // If the same buffer is provided in subsequent calls to this function, the previously returned // EPIC MAC may get overwritten. Only the most recently returned EPIC MAC is guaranteed to be // valid. -// (VerifiedSCION) the following function is marked as trusted, even though it is verified, -// due to an incompletness of Gobra that keeps it from being able to prove that we have -// the magic wand at the end of a successful run. -// @ trusted // @ requires len(auth) == 16 -// @ requires sl.AbsSlice_Bytes(buffer, 0, len(buffer)) +// @ requires sl.Bytes(buffer, 0, len(buffer)) // @ preserves acc(s.Mem(ub), R20) -// @ preserves acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R20) -// @ preserves acc(sl.AbsSlice_Bytes(auth, 0, len(auth)), R30) -// @ ensures reserr == nil ==> sl.AbsSlice_Bytes(res, 0, len(res)) -// @ ensures reserr == nil ==> (sl.AbsSlice_Bytes(res, 0, len(res)) --* sl.AbsSlice_Bytes(buffer, 0, len(buffer))) +// @ preserves acc(sl.Bytes(ub, 0, len(ub)), R20) +// @ preserves acc(sl.Bytes(auth, 0, len(auth)), R30) +// @ ensures reserr == nil ==> sl.Bytes(res, 0, len(res)) +// @ ensures reserr == nil ==> (sl.Bytes(res, 0, len(res)) --* sl.Bytes(buffer, 0, len(buffer))) // @ ensures reserr != nil ==> reserr.ErrorMem() -// @ ensures reserr != nil ==> sl.AbsSlice_Bytes(buffer, 0, len(buffer)) +// @ ensures reserr != nil ==> sl.Bytes(buffer, 0, len(buffer)) // @ decreases func CalcMac(auth []byte, pktID epic.PktID, s *slayers.SCION, timestamp uint32, buffer []byte /*@ , ghost ub []byte @*/) (res []byte, reserr error) { + // @ ghost oldBuffer := buffer + // @ ghost allocatesNewBuffer := len(buffer) < MACBufferSize if len(buffer) < MACBufferSize { buffer = make([]byte, MACBufferSize) - // @ fold sl.AbsSlice_Bytes(buffer, 0, len(buffer)) + // @ fold sl.Bytes(buffer, 0, len(buffer)) } // Initialize cryptographic MAC function @@ -149,11 +145,14 @@ func CalcMac(auth []byte, pktID epic.PktID, s *slayers.SCION, // @ ghost end := start + 4 result := input[len(input)-f.BlockSize() : len(input)-f.BlockSize()+4] // @ sl.SplitRange_Bytes(input, start, end, writePerm) - // @ package (sl.AbsSlice_Bytes(result, 0, len(result)) --* sl.AbsSlice_Bytes(buffer, 0, len(buffer))) { - // @ sl.CombineRange_Bytes(input, start, end, writePerm) - // @ sl.CombineRange_Bytes(buffer, 0, inputLength, writePerm) + // @ package (sl.Bytes(result, 0, len(result)) --* sl.Bytes(oldBuffer, 0, len(oldBuffer))) { + // @ ghost if !allocatesNewBuffer { + // @ assert oldBuffer === buffer + // @ sl.CombineRange_Bytes(input, start, end, writePerm) + // @ sl.CombineRange_Bytes(oldBuffer, 0, inputLength, writePerm) + // @ } // @ } - // @ assert (sl.AbsSlice_Bytes(result, 0, len(result)) --* sl.AbsSlice_Bytes(buffer, 0, len(buffer))) + // @ assert (sl.Bytes(result, 0, len(result)) --* sl.Bytes(oldBuffer, 0, len(oldBuffer))) return result, nil } @@ -162,11 +161,11 @@ func CalcMac(auth []byte, pktID epic.PktID, s *slayers.SCION, // bytes of the SCION path type MAC, has invalid length, or if the MAC calculation gives an error, // also VerifyHVF returns an error. The verification was successful if and only if VerifyHVF // returns nil. -// @ preserves sl.AbsSlice_Bytes(buffer, 0, len(buffer)) +// @ preserves sl.Bytes(buffer, 0, len(buffer)) // @ preserves acc(s.Mem(ub), R20) -// @ preserves acc(sl.AbsSlice_Bytes(hvf, 0, len(hvf)), R50) -// @ preserves acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R20) -// @ preserves acc(sl.AbsSlice_Bytes(auth, 0, len(auth)), R30) +// @ preserves acc(sl.Bytes(hvf, 0, len(hvf)), R50) +// @ preserves acc(sl.Bytes(ub, 0, len(ub)), R20) +// @ preserves acc(sl.Bytes(auth, 0, len(auth)), R30) // @ ensures reserr != nil ==> reserr.ErrorMem() // @ decreases func VerifyHVF(auth []byte, pktID epic.PktID, s *slayers.SCION, @@ -182,11 +181,11 @@ func VerifyHVF(auth []byte, pktID epic.PktID, s *slayers.SCION, } if subtle.ConstantTimeCompare(hvf, mac) == 0 { - // @ apply sl.AbsSlice_Bytes(mac, 0, len(mac)) --* sl.AbsSlice_Bytes(buffer, 0, len(buffer)) + // @ apply sl.Bytes(mac, 0, len(mac)) --* sl.Bytes(buffer, 0, len(buffer)) return serrors.New("epic hop validation field verification failed", "hvf in packet", hvf, "calculated mac", mac, "auth", auth) } - // @ apply sl.AbsSlice_Bytes(mac, 0, len(mac)) --* sl.AbsSlice_Bytes(buffer, 0, len(buffer)) + // @ apply sl.Bytes(mac, 0, len(mac)) --* sl.Bytes(buffer, 0, len(buffer)) return nil } @@ -205,7 +204,7 @@ func CoreFromPktCounter(counter uint32) (uint8, uint32) { } // @ requires len(key) == 16 -// @ preserves acc(sl.AbsSlice_Bytes(key, 0, len(key)), R50) +// @ preserves acc(sl.Bytes(key, 0, len(key)), R50) // @ ensures reserr == nil ==> res != nil && res.Mem() && res.BlockSize() == 16 // @ ensures reserr != nil ==> reserr.ErrorMem() // @ decreases @@ -224,8 +223,8 @@ func initEpicMac(key []byte) (res cipher.BlockMode, reserr error) { // @ requires MACBufferSize <= len(inputBuffer) // @ preserves acc(s.Mem(ub), R20) -// @ preserves acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R20) -// @ preserves sl.AbsSlice_Bytes(inputBuffer, 0, len(inputBuffer)) +// @ preserves acc(sl.Bytes(ub, 0, len(ub)), R20) +// @ preserves sl.Bytes(inputBuffer, 0, len(inputBuffer)) // @ ensures reserr == nil ==> 16 <= res && res <= len(inputBuffer) // @ ensures reserr != nil ==> reserr.ErrorMem() // @ decreases @@ -264,7 +263,7 @@ func prepareMacInput(pktID epic.PktID, s *slayers.SCION, timestamp uint32, inputLength := 16 * nrBlocks // Fill input - // @ unfold sl.AbsSlice_Bytes(inputBuffer, 0, len(inputBuffer)) + // @ unfold sl.Bytes(inputBuffer, 0, len(inputBuffer)) offset := 0 inputBuffer[0] = uint8(s.SrcAddrType & 0x3) // extract length bits offset += 1 @@ -272,12 +271,12 @@ func prepareMacInput(pktID epic.PktID, s *slayers.SCION, timestamp uint32, // @ &inputBuffer[offset:][i] == &inputBuffer[offset+i] binary.BigEndian.PutUint32(inputBuffer[offset:], timestamp) offset += 4 - // @ fold sl.AbsSlice_Bytes(inputBuffer, 0, len(inputBuffer)) + // @ fold sl.Bytes(inputBuffer, 0, len(inputBuffer)) // @ sl.SplitRange_Bytes(inputBuffer, offset, len(inputBuffer), writePerm) pktID.SerializeTo(inputBuffer[offset:]) // @ sl.CombineRange_Bytes(inputBuffer, offset, len(inputBuffer), writePerm) offset += epic.PktIDLen - // @ unfold sl.AbsSlice_Bytes(inputBuffer, 0, len(inputBuffer)) + // @ unfold sl.Bytes(inputBuffer, 0, len(inputBuffer)) // @ assert forall i int :: { &inputBuffer[offset:][i] } 0 <= i && i < len(inputBuffer[offset:]) ==> // @ &inputBuffer[offset:][i] == &inputBuffer[offset+i] binary.BigEndian.PutUint64(inputBuffer[offset:], uint64(s.SrcIA)) @@ -285,9 +284,9 @@ func prepareMacInput(pktID epic.PktID, s *slayers.SCION, timestamp uint32, // @ assert forall i int :: { &inputBuffer[offset:][i] } 0 <= i && i < len(inputBuffer[offset:]) ==> // @ &inputBuffer[offset:][i] == &inputBuffer[offset+i] // @ sl.SplitRange_Bytes(ub, start, end, R20) - // @ unfold acc(sl.AbsSlice_Bytes(srcAddr, 0, len(srcAddr)), R20) + // @ unfold acc(sl.Bytes(srcAddr, 0, len(srcAddr)), R20) copy(inputBuffer[offset:], srcAddr /*@ , R20 @*/) - // @ fold acc(sl.AbsSlice_Bytes(srcAddr, 0, len(srcAddr)), R20) + // @ fold acc(sl.Bytes(srcAddr, 0, len(srcAddr)), R20) // @ sl.CombineRange_Bytes(ub, start, end, R20) offset += l // @ assert forall i int :: { &inputBuffer[offset:][i] } 0 <= i && i < len(inputBuffer[offset:]) ==> @@ -303,15 +302,15 @@ func prepareMacInput(pktID epic.PktID, s *slayers.SCION, timestamp uint32, // @ acc(&inputBuffer[offset:inputLength][i]) // @ establishPostInitInvariant() // @ unfold acc(postInitInvariant(), _) - // @ assert acc(sl.AbsSlice_Bytes(zeroInitVector[:], 0, 16), _) + // @ assert acc(sl.Bytes(zeroInitVector[:], 0, 16), _) // (VerifiedSCION) From the package invariant, we learn that we have a wildcard access to zeroInitVector. // Unfortunately, it is not possible to call `copy` with a wildcard amount, even though // that would be perfectly fine. The spec of `copy` would need to be adapted to allow for that case. - // @ inhale acc(sl.AbsSlice_Bytes(zeroInitVector[:], 0, len(zeroInitVector[:])), R55) - // @ unfold acc(sl.AbsSlice_Bytes(zeroInitVector[:], 0, len(zeroInitVector[:])), R55) + // @ inhale acc(sl.Bytes(zeroInitVector[:], 0, len(zeroInitVector[:])), R55) + // @ unfold acc(sl.Bytes(zeroInitVector[:], 0, len(zeroInitVector[:])), R55) // @ assert forall i int :: { &zeroInitVector[:][i] } 0 <= i && i < len(zeroInitVector[:]) ==> // @ &zeroInitVector[:][i] == &zeroInitVector[i] copy(inputBuffer[offset:inputLength], zeroInitVector[:] /*@ , R55 @*/) - // @ fold sl.AbsSlice_Bytes(inputBuffer, 0, len(inputBuffer)) + // @ fold sl.Bytes(inputBuffer, 0, len(inputBuffer)) return inputLength, nil } diff --git a/pkg/experimental/epic/epic_spec.gobra b/pkg/experimental/epic/epic_spec.gobra index b0eae8f15..7a8fdc0ed 100644 --- a/pkg/experimental/epic/epic_spec.gobra +++ b/pkg/experimental/epic/epic_spec.gobra @@ -21,7 +21,7 @@ import sl "github.com/scionproto/scion/verification/utils/slices" pred postInitInvariant() { acc(&zeroInitVector, _) && len(zeroInitVector[:]) == 16 && - acc(sl.AbsSlice_Bytes(zeroInitVector[:], 0, len(zeroInitVector[:])), _) + acc(sl.Bytes(zeroInitVector[:], 0, len(zeroInitVector[:])), _) } // learn the invariant established by init diff --git a/pkg/scrypto/scrypto_spec.gobra b/pkg/scrypto/scrypto_spec.gobra index 803e0d146..9244f498b 100644 --- a/pkg/scrypto/scrypto_spec.gobra +++ b/pkg/scrypto/scrypto_spec.gobra @@ -20,18 +20,18 @@ package scrypto import "hash" -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" // The error returned by initMac is produced deterministically depending on the key. // If an initial call to initmac succeeds with a key, then any subsequent // calls to it will also succeed. This behaviour is abstracted using this // ghost function. ghost -requires acc(slices.AbsSlice_Bytes(key, 0, len(key)), _) +requires acc(sl.Bytes(key, 0, len(key)), _) decreases _ pure func ValidKeyForHash(key []byte) bool -preserves acc(slices.AbsSlice_Bytes(key, 0, len(key)), _) +preserves acc(sl.Bytes(key, 0, len(key)), _) ensures old(ValidKeyForHash(key)) ==> e == nil ensures e == nil ==> (h != nil && h.Mem() && ValidKeyForHash(key)) ensures e != nil ==> e.ErrorMem() diff --git a/pkg/slayers/extn.go b/pkg/slayers/extn.go index 499ada75c..951771c4f 100644 --- a/pkg/slayers/extn.go +++ b/pkg/slayers/extn.go @@ -70,16 +70,16 @@ func (o *tlvOption) length(fixLengths bool) (res int) { // @ requires 2 <= len(data) // @ preserves acc(o) -// @ preserves acc(sl.AbsSlice_Bytes(o.OptData, 0, len(o.OptData)), R20) -// @ preserves sl.AbsSlice_Bytes(data, 0, len(data)) +// @ preserves acc(sl.Bytes(o.OptData, 0, len(o.OptData)), R20) +// @ preserves sl.Bytes(data, 0, len(data)) // @ decreases func (o *tlvOption) serializeTo(data []byte, fixLengths bool) { dryrun := data == nil if o.OptType == OptTypePad1 { if !dryrun { - // @ unfold sl.AbsSlice_Bytes(data, 0, len(data)) + // @ unfold sl.Bytes(data, 0, len(data)) data[0] = 0x0 - // @ fold sl.AbsSlice_Bytes(data, 0, len(data)) + // @ fold sl.Bytes(data, 0, len(data)) } return } @@ -87,19 +87,19 @@ func (o *tlvOption) serializeTo(data []byte, fixLengths bool) { o.OptDataLen = uint8(len(o.OptData)) } if !dryrun { - // @ unfold sl.AbsSlice_Bytes(data, 0, len(data)) - // @ unfold acc(sl.AbsSlice_Bytes(o.OptData, 0, len(o.OptData)), R20) + // @ unfold sl.Bytes(data, 0, len(data)) + // @ unfold acc(sl.Bytes(o.OptData, 0, len(o.OptData)), R20) data[0] = uint8(o.OptType) data[1] = o.OptDataLen // @ assert forall i int :: { &data[2:][i] } 0 <= i && i < len(data[2:]) ==> &data[2:][i] == &data[2+i] copy(data[2:], o.OptData /*@ , R20 @*/) - // @ fold acc(sl.AbsSlice_Bytes(o.OptData, 0, len(o.OptData)), R20) - // @ fold sl.AbsSlice_Bytes(data, 0, len(data)) + // @ fold acc(sl.Bytes(o.OptData, 0, len(o.OptData)), R20) + // @ fold sl.Bytes(data, 0, len(data)) } } // @ requires 1 <= len(data) -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R41) // @ ensures err == nil ==> acc(res) // @ ensures (err == nil && res.OptType != OptTypePad1) ==> ( // @ 2 <= res.ActualLength && res.ActualLength <= len(data) && res.OptData === data[2:res.ActualLength]) @@ -107,8 +107,8 @@ func (o *tlvOption) serializeTo(data []byte, fixLengths bool) { // @ ensures err != nil ==> err.ErrorMem() // @ decreases func decodeTLVOption(data []byte) (res *tlvOption, err error) { - // @ unfold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) - // @ defer fold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) + // @ unfold acc(sl.Bytes(data, 0, len(data)), R41) + // @ defer fold acc(sl.Bytes(data, 0, len(data)), R41) o := &tlvOption{OptType: OptionType(data[0])} if OptionType(data[0]) == OptTypePad1 { o.ActualLength = 1 @@ -133,16 +133,16 @@ func decodeTLVOption(data []byte) (res *tlvOption, err error) { // serializeTLVOptionPadding adds an appropriate PadN extension. // @ requires padLength == 1 ==> 1 <= len(data) // @ requires 1 < padLength ==> 2 <= len(data) -// @ preserves sl.AbsSlice_Bytes(data, 0, len(data)) +// @ preserves sl.Bytes(data, 0, len(data)) // @ decreases func serializeTLVOptionPadding(data []byte, padLength int) { if padLength <= 0 { return } if padLength == 1 { - // @ unfold sl.AbsSlice_Bytes(data, 0, len(data)) + // @ unfold sl.Bytes(data, 0, len(data)) data[0] = 0x0 - // @ fold sl.AbsSlice_Bytes(data, 0, len(data)) + // @ fold sl.Bytes(data, 0, len(data)) return } dataLen := uint8(padLength) - 2 @@ -151,7 +151,7 @@ func serializeTLVOptionPadding(data []byte, padLength int) { OptDataLen: dataLen, OptData: make([]byte, int(dataLen)), } - // @ fold sl.AbsSlice_Bytes(padN.OptData, 0, len(padN.OptData)) + // @ fold sl.Bytes(padN.OptData, 0, len(padN.OptData)) padN.serializeTo(data, false) } @@ -241,7 +241,7 @@ func (e *extnBase) serializeToWithTLVOptions(b gopacket.SerializeBuffer, // @ requires df != nil // @ preserves df.Mem() -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R40) // @ ensures resErr != nil ==> resErr.ErrorMem() // The following poscondition is more a lot more complicated than it would be if the return type // was *extnBase instead of extnBase @@ -259,10 +259,10 @@ func decodeExtnBase(data []byte, df gopacket.DecodeFeedback) (res extnBase, resE len(data))) } - // @ unfold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) + // @ unfold acc(sl.Bytes(data, 0, len(data)), R40) e.NextHdr = L4ProtocolType(data[0]) e.ExtLen = data[1] - // @ fold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) + // @ fold acc(sl.Bytes(data, 0, len(data)), R40) e.ActualLen = (int(e.ExtLen) + 1) * LineLen if len(data) < e.ActualLen { return extnBase{}, serrors.New(fmt.Sprintf("invalid extension header. "+ @@ -346,7 +346,7 @@ func (h *HopByHopExtn) SerializeTo(b gopacket.SerializeBuffer, // @ requires h.NonInitMem() // @ requires df != nil // @ preserves df.Mem() -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R40) // @ ensures res == nil ==> h.Mem(data) // @ ensures res != nil ==> (h.NonInitMem() && res.ErrorMem()) // @ decreases @@ -373,14 +373,14 @@ func (h *HopByHopExtn) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) // @ invariant len(h.Options) == lenOptions // @ invariant forall i int :: { &h.Options[i] } 0 <= i && i < lenOptions ==> // @ (acc(&h.Options[i]) && h.Options[i].Mem(i)) - // @ invariant acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) + // @ invariant acc(sl.Bytes(data, 0, len(data)), R40) // @ invariant h.BaseLayer.Contents === data[:h.ActualLen] // @ invariant h.BaseLayer.Payload === data[h.ActualLen:] // @ decreases h.ActualLen - offset for offset < h.ActualLen { - // @ sl.SplitRange_Bytes(data, offset, h.ActualLen, R20) + // @ sl.SplitRange_Bytes(data, offset, h.ActualLen, R40) opt, err := decodeTLVOption(data[offset:h.ActualLen]) - // @ sl.CombineRange_Bytes(data, offset, h.ActualLen, R20) + // @ sl.CombineRange_Bytes(data, offset, h.ActualLen, R40) if err != nil { // @ fold h.NonInitMem() return err @@ -399,7 +399,7 @@ func (h *HopByHopExtn) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) } // @ requires p != nil -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ preserves p.Mem() // @ ensures res != nil ==> res.ErrorMem() // @ decreases @@ -478,7 +478,7 @@ func (e *EndToEndExtn) LayerPayload( /*@ ghost ub []byte @*/ ) (res []byte /*@ , // @ requires e.NonInitMem() // @ requires df != nil // @ preserves df.Mem() -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R40) // @ ensures res == nil ==> e.Mem(data) // @ ensures res != nil ==> (e.NonInitMem() && res.ErrorMem()) // @ decreases @@ -505,14 +505,14 @@ func (e *EndToEndExtn) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) // @ invariant len(e.Options) == lenOptions // @ invariant forall i int :: { &e.Options[i] } 0 <= i && i < lenOptions ==> // @ (acc(&e.Options[i]) && e.Options[i].Mem(i)) - // @ invariant acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) + // @ invariant acc(sl.Bytes(data, 0, len(data)), R40) // @ invariant e.BaseLayer.Contents === data[:e.ActualLen] // @ invariant e.BaseLayer.Payload === data[e.ActualLen:] // @ decreases e.ActualLen - offset for offset < e.ActualLen { - // @ sl.SplitRange_Bytes(data, offset, e.ActualLen, R20) + // @ sl.SplitRange_Bytes(data, offset, e.ActualLen, R40) opt, err := decodeTLVOption(data[offset:e.ActualLen]) - // @ sl.CombineRange_Bytes(data, offset, e.ActualLen, R20) + // @ sl.CombineRange_Bytes(data, offset, e.ActualLen, R40) if err != nil { // @ fold e.NonInitMem() return err @@ -531,7 +531,7 @@ func (e *EndToEndExtn) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) } // @ requires p != nil -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ preserves p.Mem() // @ ensures res != nil ==> res.ErrorMem() // @ decreases @@ -600,7 +600,7 @@ type HopByHopExtnSkipper struct { // DecodeFromBytes implementation according to gopacket.DecodingLayer // @ requires s.NonInitMem() // @ requires df != nil -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R40) // @ preserves df.Mem() // @ ensures res == nil ==> s.Mem(data) // @ ensures res != nil ==> (s.NonInitMem() && res.ErrorMem()) @@ -656,7 +656,7 @@ type EndToEndExtnSkipper struct { // @ requires s.NonInitMem() // @ requires df != nil // @ preserves df.Mem() -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R20) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R40) // @ ensures res == nil ==> s.Mem(data) // @ ensures res != nil ==> (s.NonInitMem() && res.ErrorMem()) // @ decreases diff --git a/pkg/slayers/extn_spec.gobra b/pkg/slayers/extn_spec.gobra index aa9ff299f..32c6ab920 100644 --- a/pkg/slayers/extn_spec.gobra +++ b/pkg/slayers/extn_spec.gobra @@ -20,7 +20,7 @@ import ( "github.com/google/gopacket" . "github.com/scionproto/scion/verification/utils/definitions" - "github.com/scionproto/scion/verification/utils/slices" + // sl "github.com/scionproto/scion/verification/utils/slices" ) /** start of extnBase **/ @@ -226,7 +226,7 @@ func (s *EndToEndExtnSkipper) DowngradePerm(ghost ub []byte) { pred (o *HopByHopOption) Mem(_ int) { // permissions to the elements of OptData will be stored // together with the underlying, not in the option itself - acc(o) // && slices.AbsSlice_Bytes(o.OptData, 0, len(o.OptData)) + acc(o) // && sl.Bytes(o.OptData, 0, len(o.OptData)) } // TODO: maybe add the underlying slice as a parameter to be able to @@ -234,7 +234,7 @@ pred (o *HopByHopOption) Mem(_ int) { pred (e *EndToEndOption) Mem(_ int) { // permissions to the elements of OptData will be stored // together with the underlying, not in the option itself - acc(e) // && slices.AbsSlice_Bytes(e.OptData, 0, len(e.OptData)) + acc(e) // && sl.Bytes(e.OptData, 0, len(e.OptData)) } diff --git a/pkg/slayers/path/empty/empty.go b/pkg/slayers/path/empty/empty.go index 7151b024e..73e92dc15 100644 --- a/pkg/slayers/path/empty/empty.go +++ b/pkg/slayers/path/empty/empty.go @@ -71,29 +71,28 @@ func (o Path) DecodeFromBytes(r []byte) (e error) { // @ ensures e == nil // @ decreases -func (o Path) SerializeTo(b []byte /*@, underlyingBuf []byte @*/) (e error) { +func (o Path) SerializeTo(b []byte /*@, ub []byte @*/) (e error) { return nil } -// @ requires o.Mem(underlyingBuf) +// @ requires o.Mem(ub) // @ ensures p == o -// @ ensures p.Mem(underlyingBuf) +// @ ensures p.Mem(ub) // @ ensures e == nil // @ decreases -func (o Path) Reverse( /*@ underlyingBuf []byte @*/ ) (p path.Path, e error) { +func (o Path) Reverse( /*@ ub []byte @*/ ) (p path.Path, e error) { return o, nil } -// @ pure -// @ ensures 0 <= r +// @ ensures r == o.LenSpec(ub) // @ decreases -func (o Path) Len( /*@ underlyingBuf []byte @*/ ) (r int) { +func (o Path) Len( /*@ ub []byte @*/ ) (r int) { return PathLen } // @ pure // @ ensures r == PathType // @ decreases -func (o Path) Type( /*@ underlyingBuf []byte @*/ ) (r path.Type) { +func (o Path) Type( /*@ ub []byte @*/ ) (r path.Type) { return PathType } diff --git a/pkg/slayers/path/empty/empty_spec.gobra b/pkg/slayers/path/empty/empty_spec.gobra index 99e53a26e..044a671fa 100644 --- a/pkg/slayers/path/empty/empty_spec.gobra +++ b/pkg/slayers/path/empty/empty_spec.gobra @@ -34,6 +34,20 @@ func (e Path) DowngradePerm(buf []byte) { fold e.NonInitMem() } +ghost +pure +decreases +func (p Path) IsValidResultOfDecoding(b []byte, err error) (res bool) { + return true +} + +ghost +pure +decreases +func (p Path) LenSpec(ghost ub []byte) (l int) { + return PathLen +} + Path implements path.Path // Definitions to allow *Path to be treated as a path.Path diff --git a/pkg/slayers/path/epic/epic.go b/pkg/slayers/path/epic/epic.go index f8b1a3c08..b9400a1ca 100644 --- a/pkg/slayers/path/epic/epic.go +++ b/pkg/slayers/path/epic/epic.go @@ -24,7 +24,7 @@ import ( "github.com/scionproto/scion/pkg/slayers/path" "github.com/scionproto/scion/pkg/slayers/path/scion" //@ . "github.com/scionproto/scion/verification/utils/definitions" - //@ "github.com/scionproto/scion/verification/utils/slices" + //@ sl "github.com/scionproto/scion/verification/utils/slices" ) const ( @@ -81,11 +81,11 @@ type Path struct { // SerializeTo serializes the Path into buffer b. On failure, an error is returned, otherwise // SerializeTo will return nil. // @ preserves acc(p.Mem(ubuf), R1) -// @ preserves slices.AbsSlice_Bytes(ubuf, 0, len(ubuf)) -// @ preserves slices.AbsSlice_Bytes(b, 0, len(b)) +// @ preserves sl.Bytes(ubuf, 0, len(ubuf)) +// @ preserves sl.Bytes(b, 0, len(b)) // @ ensures r != nil ==> r.ErrorMem() // @ ensures !old(p.hasScionPath(ubuf)) ==> r != nil -// @ ensures len(b) < old(p.Len(ubuf)) ==> r != nil +// @ ensures len(b) < old(p.LenSpec(ubuf)) ==> r != nil // @ ensures old(p.getPHVFLen(ubuf)) != HVFLen ==> r != nil // @ ensures old(p.getLHVFLen(ubuf)) != HVFLen ==> r != nil // @ decreases @@ -105,40 +105,40 @@ func (p *Path) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { if p.ScionPath == nil { return serrors.New("SCION path is nil") } - //@ slices.SplitByIndex_Bytes(b, 0, len(b), PktIDLen, writePerm) - //@ slices.Reslice_Bytes(b, 0, PktIDLen, writePerm) + //@ sl.SplitByIndex_Bytes(b, 0, len(b), PktIDLen, writePerm) + //@ sl.Reslice_Bytes(b, 0, PktIDLen, writePerm) p.PktID.SerializeTo(b[:PktIDLen]) - //@ slices.Unslice_Bytes(b, 0, PktIDLen, writePerm) - //@ slices.SplitByIndex_Bytes(b, PktIDLen, len(b), PktIDLen+HVFLen, writePerm) - //@ slices.Reslice_Bytes(b, PktIDLen, PktIDLen+HVFLen, writePerm) - //@ unfold slices.AbsSlice_Bytes(b[PktIDLen:(PktIDLen+HVFLen)], 0, HVFLen) - //@ unfold acc(slices.AbsSlice_Bytes(p.PHVF, 0, len(p.PHVF)), R2) + //@ sl.Unslice_Bytes(b, 0, PktIDLen, writePerm) + //@ sl.SplitByIndex_Bytes(b, PktIDLen, len(b), PktIDLen+HVFLen, writePerm) + //@ sl.Reslice_Bytes(b, PktIDLen, PktIDLen+HVFLen, writePerm) + //@ unfold sl.Bytes(b[PktIDLen:(PktIDLen+HVFLen)], 0, HVFLen) + //@ unfold acc(sl.Bytes(p.PHVF, 0, len(p.PHVF)), R2) copy(b[PktIDLen:(PktIDLen+HVFLen)], p.PHVF /*@, R3 @*/) - //@ fold slices.AbsSlice_Bytes(b[PktIDLen:(PktIDLen+HVFLen)], 0, HVFLen) - //@ fold acc(slices.AbsSlice_Bytes(p.PHVF, 0, len(p.PHVF)), R2) - //@ slices.Unslice_Bytes(b, PktIDLen, PktIDLen+HVFLen, writePerm) - //@ slices.CombineAtIndex_Bytes(b, 0, PktIDLen+HVFLen, PktIDLen, writePerm) - //@ slices.SplitByIndex_Bytes(b, PktIDLen+HVFLen, len(b), MetadataLen, writePerm) - //@ slices.Reslice_Bytes(b, PktIDLen+HVFLen, MetadataLen, writePerm) - //@ unfold acc(slices.AbsSlice_Bytes(p.LHVF, 0, len(p.LHVF)), R3) - //@ unfold slices.AbsSlice_Bytes(b[(PktIDLen+HVFLen):MetadataLen], 0, HVFLen) + //@ fold sl.Bytes(b[PktIDLen:(PktIDLen+HVFLen)], 0, HVFLen) + //@ fold acc(sl.Bytes(p.PHVF, 0, len(p.PHVF)), R2) + //@ sl.Unslice_Bytes(b, PktIDLen, PktIDLen+HVFLen, writePerm) + //@ sl.CombineAtIndex_Bytes(b, 0, PktIDLen+HVFLen, PktIDLen, writePerm) + //@ sl.SplitByIndex_Bytes(b, PktIDLen+HVFLen, len(b), MetadataLen, writePerm) + //@ sl.Reslice_Bytes(b, PktIDLen+HVFLen, MetadataLen, writePerm) + //@ unfold acc(sl.Bytes(p.LHVF, 0, len(p.LHVF)), R3) + //@ unfold sl.Bytes(b[(PktIDLen+HVFLen):MetadataLen], 0, HVFLen) copy(b[(PktIDLen+HVFLen):MetadataLen], p.LHVF /*@, R3 @*/) - //@ fold slices.AbsSlice_Bytes(b[(PktIDLen+HVFLen):MetadataLen], 0, HVFLen) - //@ fold acc(slices.AbsSlice_Bytes(p.LHVF, 0, len(p.LHVF)), R3) - //@ slices.Unslice_Bytes(b, PktIDLen+HVFLen, MetadataLen, writePerm) - //@ slices.CombineAtIndex_Bytes(b, 0, MetadataLen, PktIDLen+HVFLen, writePerm) - //@ slices.Reslice_Bytes(b, MetadataLen, len(b), writePerm) - //@ ghost defer slices.CombineAtIndex_Bytes(b, 0, len(b), MetadataLen, writePerm) - //@ ghost defer slices.Unslice_Bytes(b, MetadataLen, len(b), writePerm) - //@ slices.SplitRange_Bytes(ubuf, MetadataLen, len(ubuf), writePerm) - //@ ghost defer slices.CombineRange_Bytes(ubuf, MetadataLen, len(ubuf), writePerm) + //@ fold sl.Bytes(b[(PktIDLen+HVFLen):MetadataLen], 0, HVFLen) + //@ fold acc(sl.Bytes(p.LHVF, 0, len(p.LHVF)), R3) + //@ sl.Unslice_Bytes(b, PktIDLen+HVFLen, MetadataLen, writePerm) + //@ sl.CombineAtIndex_Bytes(b, 0, MetadataLen, PktIDLen+HVFLen, writePerm) + //@ sl.Reslice_Bytes(b, MetadataLen, len(b), writePerm) + //@ ghost defer sl.CombineAtIndex_Bytes(b, 0, len(b), MetadataLen, writePerm) + //@ ghost defer sl.Unslice_Bytes(b, MetadataLen, len(b), writePerm) + //@ sl.SplitRange_Bytes(ubuf, MetadataLen, len(ubuf), writePerm) + //@ ghost defer sl.CombineRange_Bytes(ubuf, MetadataLen, len(ubuf), writePerm) return p.ScionPath.SerializeTo(b[MetadataLen:] /*@, ubuf[MetadataLen:] @*/) } // DecodeFromBytes deserializes the buffer b into the Path. On failure, an error is returned, // otherwise SerializeTo will return nil. // @ requires p.NonInitMem() -// @ preserves slices.AbsSlice_Bytes(b, 0, len(b)) +// @ preserves acc(sl.Bytes(b, 0, len(b)), R42) // @ ensures len(b) < MetadataLen ==> r != nil // @ ensures r == nil ==> p.Mem(b) // @ ensures r != nil ==> p.NonInitMem() && r.ErrorMem() @@ -148,74 +148,41 @@ func (p *Path) DecodeFromBytes(b []byte) (r error) { return serrors.New("EPIC Path raw too short", "expected", int(MetadataLen), "actual", int(len(b))) } //@ unfold p.NonInitMem() - //@ slices.SplitByIndex_Bytes(b, 0, len(b), PktIDLen, writePerm) - //@ preserves slices.AbsSlice_Bytes(b, 0, PktIDLen) - //@ preserves acc(&p.PktID) - //@ preserves acc(&p.PHVF) - //@ preserves acc(&p.LHVF) - //@ ensures p.PHVF != nil && len(p.PHVF) == HVFLen - //@ ensures p.LHVF != nil && len(p.LHVF) == HVFLen - //@ ensures slices.AbsSlice_Bytes(p.PHVF, 0, len(p.PHVF)) - //@ ensures slices.AbsSlice_Bytes(p.LHVF, 0, len(p.LHVF)) - //@ decreases - //@ outline( - //@ ghost slices.Reslice_Bytes(b, 0, PktIDLen, writePerm) + //@ sl.SplitRange_Bytes(b, 0, PktIDLen, R42) p.PktID.DecodeFromBytes(b[:PktIDLen]) + //@ sl.CombineRange_Bytes(b, 0, PktIDLen, R42) + //@ unfold acc(sl.Bytes(b, 0, len(b)), R42) p.PHVF = make([]byte, HVFLen) p.LHVF = make([]byte, HVFLen) - //@ fold slices.AbsSlice_Bytes(p.PHVF, 0, len(p.PHVF)) - //@ fold slices.AbsSlice_Bytes(p.LHVF, 0, len(p.LHVF)) - //@ slices.Unslice_Bytes(b, 0, PktIDLen, writePerm) - //@ ) - //@ slices.SplitByIndex_Bytes(b, PktIDLen, len(b), PktIDLen+HVFLen, writePerm) - //@ preserves acc(&p.PHVF) - //@ preserves slices.AbsSlice_Bytes(p.PHVF, 0, len(p.PHVF)) - //@ preserves slices.AbsSlice_Bytes(b, PktIDLen, PktIDLen + HVFLen) - //@ decreases - //@ outline( - //@ slices.Reslice_Bytes(b, PktIDLen, PktIDLen+HVFLen, writePerm) - //@ unfold slices.AbsSlice_Bytes(p.PHVF, 0, len(p.PHVF)) - //@ unfold acc(slices.AbsSlice_Bytes(b[PktIDLen:(PktIDLen+HVFLen)], 0, HVFLen), R1) - copy(p.PHVF, b[PktIDLen:(PktIDLen+HVFLen)] /*@, R1 @*/) - //@ fold acc(slices.AbsSlice_Bytes(b[PktIDLen:(PktIDLen+HVFLen)], 0, HVFLen), R1) - //@ fold slices.AbsSlice_Bytes(p.PHVF, 0, len(p.PHVF)) - //@ slices.Unslice_Bytes(b, PktIDLen, PktIDLen+HVFLen, writePerm) - //@ ) - //@ slices.CombineAtIndex_Bytes(b, 0, PktIDLen+HVFLen, PktIDLen, writePerm) - //@ slices.SplitByIndex_Bytes(b, PktIDLen+HVFLen, len(b), MetadataLen, writePerm) - //@ preserves acc(&p.LHVF) - //@ preserves slices.AbsSlice_Bytes(p.LHVF, 0, len(p.LHVF)) - //@ preserves slices.AbsSlice_Bytes(b, PktIDLen+HVFLen, MetadataLen) - //@ decreases - //@ outline( - //@ slices.Reslice_Bytes(b, PktIDLen+HVFLen, MetadataLen, writePerm) - //@ unfold slices.AbsSlice_Bytes(p.LHVF, 0, len(p.LHVF)) - //@ unfold acc(slices.AbsSlice_Bytes(b[PktIDLen+HVFLen:MetadataLen], 0, HVFLen), R1) - copy(p.LHVF, b[(PktIDLen+HVFLen):MetadataLen] /*@, R1 @*/) - //@ fold acc(slices.AbsSlice_Bytes(b[PktIDLen+HVFLen:MetadataLen], 0, HVFLen), R1) - //@ fold slices.AbsSlice_Bytes(p.LHVF, 0, len(p.LHVF)) - //@ slices.Unslice_Bytes(b, PktIDLen+HVFLen, MetadataLen, writePerm) - //@ ) - //@ slices.CombineAtIndex_Bytes(b, 0, MetadataLen, PktIDLen+HVFLen, writePerm) + //@ assert forall i int :: { &b[PktIDLen:(PktIDLen+HVFLen)][i] } 0 <= i && + //@ i < len(b[PktIDLen:(PktIDLen+HVFLen)]) ==> + //@ &b[PktIDLen:(PktIDLen+HVFLen)][i] == &b[PktIDLen+i] + copy(p.PHVF, b[PktIDLen:(PktIDLen+HVFLen)] /*@, R42 @*/) + //@ fold sl.Bytes(p.PHVF, 0, len(p.PHVF)) + //@ assert forall i int :: { &b[(PktIDLen+HVFLen):MetadataLen][i] } 0 <= i && + //@ i < len(b[(PktIDLen+HVFLen):MetadataLen]) ==> + //@ &b[(PktIDLen+HVFLen):MetadataLen][i] == &b[(PktIDLen+HVFLen)+i] + copy(p.LHVF, b[(PktIDLen+HVFLen):MetadataLen] /*@, R42 @*/) + //@ fold sl.Bytes(p.LHVF, 0, len(p.LHVF)) p.ScionPath = &scion.Raw{} //@ fold p.ScionPath.Base.NonInitMem() //@ fold p.ScionPath.NonInitMem() - //@ slices.Reslice_Bytes(b, MetadataLen, len(b), writePerm) + //@ fold acc(sl.Bytes(b, 0, len(b)), R42) + //@ sl.SplitRange_Bytes(b, MetadataLen, len(b), R42) ret := p.ScionPath.DecodeFromBytes(b[MetadataLen:]) //@ ghost if ret == nil { //@ fold p.Mem(b) //@ } else { //@ fold p.NonInitMem() //@ } - //@ slices.Unslice_Bytes(b, MetadataLen, len(b), writePerm) - //@ slices.CombineAtIndex_Bytes(b, 0, len(b), MetadataLen, writePerm) + //@ sl.CombineRange_Bytes(b, MetadataLen, len(b), R42) return ret } // Reverse reverses the EPIC path. In particular, this means that the SCION path type subheader // is reversed. // @ requires p.Mem(ubuf) -// @ preserves slices.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ preserves sl.Bytes(ubuf, 0, len(ubuf)) // @ ensures r == nil ==> ret != nil // @ ensures r == nil ==> ret.Mem(ubuf) // @ ensures r == nil ==> ret != nil @@ -227,13 +194,13 @@ func (p *Path) Reverse( /*@ ghost ubuf []byte @*/ ) (ret path.Path, r error) { //@ fold p.Mem(ubuf) return nil, serrors.New("scion subpath must not be nil") } - //@ slices.SplitRange_Bytes(ubuf, MetadataLen, len(ubuf), writePerm) + //@ sl.SplitRange_Bytes(ubuf, MetadataLen, len(ubuf), writePerm) revScion, err := p.ScionPath.Reverse( /*@ ubuf[MetadataLen:] @*/ ) if err != nil { - // @ slices.CombineRange_Bytes(ubuf, MetadataLen, len(ubuf), writePerm) + // @ sl.CombineRange_Bytes(ubuf, MetadataLen, len(ubuf), writePerm) return nil, err } - //@ slices.CombineRange_Bytes(ubuf, MetadataLen, len(ubuf), writePerm) + //@ sl.CombineRange_Bytes(ubuf, MetadataLen, len(ubuf), writePerm) ScionPath, ok := revScion.(*scion.Raw) if !ok { return nil, serrors.New("reversed path of type scion.Raw must not change type") @@ -244,20 +211,16 @@ func (p *Path) Reverse( /*@ ghost ubuf []byte @*/ ) (ret path.Path, r error) { } // Len returns the length of the EPIC path in bytes. -// (VerifiedSCION) This is currently not checked here because Gobra -// does not support statements in pure functions. The proof obligations -// for this method are discharged in function `len_test` in the file `epic_spec_test.gobra`. -// @ trusted -// @ pure -// @ requires acc(p.Mem(ubuf), _) -// @ ensures !p.hasScionPath(ubuf) ==> l == MetadataLen -// @ ensures p.hasScionPath(ubuf) ==> l == MetadataLen + unfolding acc(p.Mem(ubuf), _) in p.ScionPath.Len(ubuf[MetadataLen:]) +// @ preserves acc(p.Mem(ubuf), R50) +// @ ensures l == p.LenSpec(ubuf) // @ decreases func (p *Path) Len( /*@ ghost ubuf []byte @*/ ) (l int) { + // @ unfold acc(p.Mem(ubuf), R50) + // @ defer fold acc(p.Mem(ubuf), R50) if p.ScionPath == nil { return MetadataLen } - return MetadataLen + p.ScionPath.Len( /*@ ubuf @*/ ) + return MetadataLen + p.ScionPath.Len( /*@ ubuf[MetadataLen:] @*/ ) } // Type returns the EPIC path type identifier. @@ -278,29 +241,29 @@ type PktID struct { // DecodeFromBytes deserializes the buffer (raw) into the PktID. // @ requires len(raw) >= PktIDLen // @ preserves acc(i) -// @ preserves acc(slices.AbsSlice_Bytes(raw, 0, len(raw)), R1) +// @ preserves acc(sl.Bytes(raw, 0, len(raw)), R42) // @ ensures 0 <= i.Timestamp // @ ensures 0 <= i.Counter // @ decreases func (i *PktID) DecodeFromBytes(raw []byte) { - //@ unfold acc(slices.AbsSlice_Bytes(raw, 0, len(raw)), R1) + //@ unfold acc(sl.Bytes(raw, 0, len(raw)), R42) //@ assert forall i int :: { &raw[:4][i] } 0 <= i && i < 4 ==> &raw[:4][i] == &raw[i] i.Timestamp = binary.BigEndian.Uint32(raw[:4]) //@ assert forall i int :: { &raw[4:8][i] } 0 <= i && i < 4 ==> &raw[4:8][i] == &raw[4 + i] i.Counter = binary.BigEndian.Uint32(raw[4:8]) - //@ fold acc(slices.AbsSlice_Bytes(raw, 0, len(raw)), R1) + //@ fold acc(sl.Bytes(raw, 0, len(raw)), R42) } // SerializeTo serializes the PktID into the buffer (b). // @ requires len(b) >= PktIDLen // @ preserves acc(i, R1) -// @ preserves slices.AbsSlice_Bytes(b, 0, len(b)) +// @ preserves sl.Bytes(b, 0, len(b)) // @ decreases func (i *PktID) SerializeTo(b []byte) { - //@ unfold slices.AbsSlice_Bytes(b, 0, len(b)) + //@ unfold sl.Bytes(b, 0, len(b)) //@ assert forall j int :: { &b[:4][j] } 0 <= 4 ==> &b[:4][j] == &b[j] binary.BigEndian.PutUint32(b[:4], i.Timestamp) //@ assert forall j int :: { &b[4:8][j] } 0 <= 4 ==> &b[4:8][j] == &b[4 + j] binary.BigEndian.PutUint32(b[4:8], i.Counter) - //@ fold slices.AbsSlice_Bytes(b, 0, len(b)) + //@ fold sl.Bytes(b, 0, len(b)) } diff --git a/pkg/slayers/path/epic/epic_spec.gobra b/pkg/slayers/path/epic/epic_spec.gobra index 46666a552..93380d08c 100644 --- a/pkg/slayers/path/epic/epic_spec.gobra +++ b/pkg/slayers/path/epic/epic_spec.gobra @@ -18,7 +18,9 @@ package epic import ( "github.com/scionproto/scion/pkg/slayers/path" - "github.com/scionproto/scion/verification/utils/slices" + "github.com/scionproto/scion/pkg/slayers/path/scion" + . "github.com/scionproto/scion/verification/utils/definitions" + sl "github.com/scionproto/scion/verification/utils/slices" ) pred (p *Path) NonInitMem() { @@ -27,8 +29,8 @@ pred (p *Path) NonInitMem() { pred (p *Path) Mem(ubuf []byte) { acc(&p.PktID) && - acc(&p.PHVF) && slices.AbsSlice_Bytes(p.PHVF, 0, len(p.PHVF)) && - acc(&p.LHVF) && slices.AbsSlice_Bytes(p.LHVF, 0, len(p.LHVF)) && + acc(&p.PHVF) && sl.Bytes(p.PHVF, 0, len(p.PHVF)) && + acc(&p.LHVF) && sl.Bytes(p.LHVF, 0, len(p.LHVF)) && acc(&p.ScionPath) && p.ScionPath != nil && MetadataLen <= len(ubuf) && @@ -36,39 +38,31 @@ pred (p *Path) Mem(ubuf []byte) { } ghost -requires p.Mem(buf) -ensures p.NonInitMem() -decreases -func (p *Path) DowngradePerm(buf []byte) { - unfold p.Mem(buf) - fold p.NonInitMem() -} - -ghost +pure requires acc(p.Mem(ub), _) decreases -pure func (p *Path) ValidCurrINF(ghost ub []byte) bool { +func (p *Path) LenSpec(ghost ub []byte) (l int) { return unfolding acc(p.Mem(ub), _) in - let ubPath := ub[MetadataLen:] in - p.ScionPath.ValidCurrINF(ubPath) + (p.ScionPath == nil ? + MetadataLen : + MetadataLen + p.ScionPath.LenSpec(ub[MetadataLen:])) } ghost -requires acc(p.Mem(ub), _) +requires p.Mem(buf) +ensures p.NonInitMem() decreases -pure func (p *Path) ValidCurrHF(ghost ub []byte) bool { - return unfolding acc(p.Mem(ub), _) in - let ubPath := ub[MetadataLen:] in - p.ScionPath.ValidCurrHF(ubPath) +func (p *Path) DowngradePerm(buf []byte) { + unfold p.Mem(buf) + fold p.NonInitMem() } ghost -requires acc(p.Mem(ub), _) +requires acc(r.Mem(ub), _) decreases -pure func (p *Path) ValidCurrIdxs(ghost ub []byte) bool { - return unfolding acc(p.Mem(ub), _) in - let ubPath := ub[MetadataLen:] in - p.ScionPath.ValidCurrIdxs(ubPath) +pure func (r *Path) GetBase(ub []byte) scion.Base { + return unfolding acc(r.Mem(ub), _) in + r.ScionPath.GetBase(ub[MetadataLen:]) } ghost @@ -105,4 +99,11 @@ pure func (p *Path) GetUnderlyingScionPathBuf(buf []byte) []byte { return unfolding acc(p.Mem(buf), _) in buf[MetadataLen:] } +ghost +pure +decreases +func (p *Path) IsValidResultOfDecoding(b []byte, err error) (res bool) { + return true +} + (*Path) implements path.Path \ No newline at end of file diff --git a/pkg/slayers/path/epic/epic_spec_test.gobra b/pkg/slayers/path/epic/epic_spec_test.gobra index a53b22eb0..ac1d32a33 100644 --- a/pkg/slayers/path/epic/epic_spec_test.gobra +++ b/pkg/slayers/path/epic/epic_spec_test.gobra @@ -24,21 +24,4 @@ func testAllocateNonInitMem() { } // A test folding Mem(ubuf) is skipped here, as one can just call DesugarFromBytes to get the -// desired predicate - -// (*Path).Len() cannot be currently be verified because Gobra does not allow statements in -// pure functions, but Len must be pure. -// This method contains the same exact body and checks that the contract holds. -ghost -preserves acc(p.Mem(ubuf), _) -ensures !p.hasScionPath(ubuf) ==> l == MetadataLen -ensures p.hasScionPath(ubuf) ==> l == MetadataLen + unfolding acc(p.Mem(ubuf), _) in p.ScionPath.Len(ubuf[MetadataLen:]) -decreases -func len_test(p *Path, ubuf []byte) (l int) { - unfold acc(p.Mem(ubuf), _) // would need to be 'unfolding' in the pure version - if p.ScionPath == nil { - return MetadataLen - } - unfold acc(p.ScionPath.Mem(ubuf[MetadataLen:]), _) // would need to be 'unfolding' in the pure version - return MetadataLen + p.ScionPath.Len(ubuf[MetadataLen:]) -} +// desired predicate. diff --git a/pkg/slayers/path/hopfield.go b/pkg/slayers/path/hopfield.go index 89ceaab80..57452ac26 100644 --- a/pkg/slayers/path/hopfield.go +++ b/pkg/slayers/path/hopfield.go @@ -22,7 +22,7 @@ import ( "github.com/scionproto/scion/pkg/private/serrors" //@ . "github.com/scionproto/scion/verification/utils/definitions" - //@ "github.com/scionproto/scion/verification/utils/slices" + //@ sl "github.com/scionproto/scion/verification/utils/slices" ) const ( @@ -76,21 +76,17 @@ type HopField struct { // path.HopLen. // @ requires acc(h) // @ requires len(raw) >= HopLen -// @ preserves acc(slices.AbsSlice_Bytes(raw, 0, HopLen), R45) +// @ preserves acc(sl.Bytes(raw, 0, HopLen), R45) // @ ensures h.Mem() // @ ensures err == nil +// @ ensures BytesToIO_HF(raw, 0, 0, HopLen) == +// @ unfolding acc(h.Mem(), R10) in h.ToIO_HF() // @ decreases func (h *HopField) DecodeFromBytes(raw []byte) (err error) { if len(raw) < HopLen { return serrors.New("HopField raw too short", "expected", HopLen, "actual", len(raw)) } - //@ preserves acc(h) - //@ preserves acc(slices.AbsSlice_Bytes(raw, 0, HopLen), R46) - //@ ensures h.ConsIngress >= 0 - //@ ensures h.ConsEgress >= 0 - //@ decreases - //@ outline( - //@ unfold acc(slices.AbsSlice_Bytes(raw, 0, HopLen), R46) + //@ unfold acc(sl.Bytes(raw, 0, HopLen), R46) h.EgressRouterAlert = raw[0]&0x1 == 0x1 h.IngressRouterAlert = raw[0]&0x2 == 0x2 h.ExpTime = raw[1] @@ -98,20 +94,16 @@ func (h *HopField) DecodeFromBytes(raw []byte) (err error) { h.ConsIngress = binary.BigEndian.Uint16(raw[2:4]) //@ assert &raw[4:6][0] == &raw[4] && &raw[4:6][1] == &raw[5] h.ConsEgress = binary.BigEndian.Uint16(raw[4:6]) - //@ fold acc(slices.AbsSlice_Bytes(raw, 0, HopLen), R46) - //@ ) - //@ preserves acc(&h.Mac) - //@ preserves acc(slices.AbsSlice_Bytes(raw, 0, HopLen), R46) - //@ decreases - //@ outline( - //@ unfold acc(slices.AbsSlice_Bytes(raw, 0, HopLen), R46) //@ assert forall i int :: { &h.Mac[:][i] } 0 <= i && i < len(h.Mac[:]) ==> //@ &h.Mac[i] == &h.Mac[:][i] //@ assert forall i int :: { &raw[6:6+MacLen][i] } 0 <= i && i < len(raw[6:6+MacLen]) ==> //@ &raw[6:6+MacLen][i] == &raw[i+6] copy(h.Mac[:], raw[6:6+MacLen] /*@ , R47 @*/) - //@ fold acc(slices.AbsSlice_Bytes(raw, 0, HopLen), R46) - //@ ) + //@ assert forall i int :: {&h.Mac[:][i]} 0 <= i && i < MacLen ==> h.Mac[:][i] == raw[6:6+MacLen][i] + //@ assert forall i int :: {&h.Mac[i]} 0 <= i && i < MacLen ==> h.Mac[:][i] == h.Mac[i] + //@ EqualBytesImplyEqualMac(raw[6:6+MacLen], h.Mac) + //@ assert BytesToIO_HF(raw, 0, 0, HopLen) == h.ToIO_HF() + //@ fold acc(sl.Bytes(raw, 0, HopLen), R46) //@ fold h.Mem() return nil } @@ -120,19 +112,16 @@ func (h *HopField) DecodeFromBytes(raw []byte) (err error) { // path.HopLen. // @ requires len(b) >= HopLen // @ preserves acc(h.Mem(), R10) -// @ preserves slices.AbsSlice_Bytes(b, 0, HopLen) +// @ preserves sl.Bytes(b, 0, HopLen) // @ ensures err == nil +// @ ensures BytesToIO_HF(b, 0, 0, HopLen) == +// @ unfolding acc(h.Mem(), R10) in h.ToIO_HF() // @ decreases func (h *HopField) SerializeTo(b []byte) (err error) { if len(b) < HopLen { return serrors.New("buffer for HopField too short", "expected", MacLen, "actual", len(b)) } - //@ requires len(b) >= HopLen - //@ preserves acc(h.Mem(), R11) - //@ preserves slices.AbsSlice_Bytes(b, 0, HopLen) - //@ decreases - //@ outline( - //@ unfold slices.AbsSlice_Bytes(b, 0, HopLen) + //@ unfold sl.Bytes(b, 0, HopLen) //@ unfold acc(h.Mem(), R11) b[0] = 0 if h.EgressRouterAlert { @@ -147,24 +136,17 @@ func (h *HopField) SerializeTo(b []byte) (err error) { //@ assert &b[4:6][0] == &b[4] && &b[4:6][1] == &b[5] binary.BigEndian.PutUint16(b[4:6], h.ConsEgress) //@ assert forall i int :: { &b[i] } 0 <= i && i < HopLen ==> acc(&b[i]) - //@ fold slices.AbsSlice_Bytes(b, 0, HopLen) - //@ fold acc(h.Mem(), R11) - //@ ) - //@ requires len(b) >= HopLen - //@ preserves acc(h.Mem(), R11) - //@ preserves slices.AbsSlice_Bytes(b, 0, HopLen) - //@ decreases - //@ outline( - //@ unfold slices.AbsSlice_Bytes(b, 0, HopLen) - //@ unfold acc(h.Mem(), R11) //@ assert forall i int :: { &h.Mac[:][i] } 0 <= i && i < len(h.Mac) ==> //@ &h.Mac[i] == &h.Mac[:][i] //@ assert forall i int :: { &b[6:6+MacLen][i] }{ &b[i+6] } 0 <= i && i < MacLen ==> //@ &b[6:6+MacLen][i] == &b[i+6] - copy(b[6:6+MacLen], h.Mac[:] /*@, R11 @*/) - //@ fold slices.AbsSlice_Bytes(b, 0, HopLen) + copy(b[6:6+MacLen], h.Mac[:] /*@, R47 @*/) + //@ assert forall i int :: {&h.Mac[:][i]} 0 <= i && i < MacLen ==> h.Mac[:][i] == b[6:6+MacLen][i] + //@ assert forall i int :: {&h.Mac[i]} 0 <= i && i < MacLen ==> h.Mac[:][i] == h.Mac[i] + //@ EqualBytesImplyEqualMac(b[6:6+MacLen], h.Mac) + //@ fold sl.Bytes(b, 0, HopLen) + //@ assert h.ToIO_HF() == BytesToIO_HF(b, 0, 0, HopLen) //@ fold acc(h.Mem(), R11) - //@ ) return nil } diff --git a/pkg/slayers/path/hopfield_spec.gobra b/pkg/slayers/path/hopfield_spec.gobra index 53a84309e..26e79f8a1 100644 --- a/pkg/slayers/path/hopfield_spec.gobra +++ b/pkg/slayers/path/hopfield_spec.gobra @@ -16,32 +16,57 @@ package path -ghost const MetaLen = 4 +import ( + "verification/io" + sl "verification/utils/slices" + "verification/dependencies/encoding/binary" + . "verification/utils/definitions" +) pred (h *HopField) Mem() { acc(h) && h.ConsIngress >= 0 && h.ConsEgress >= 0 } -ghost + +ghost +decreases +pure func ifsToIO_ifs(ifs uint16) option[io.IO_ifs]{ + return ifs == 0 ? none[io.IO_ifs] : some(io.IO_ifs(ifs)) +} + +ghost decreases -pure func InfoFieldOffset(currINF int) int { - return MetaLen + InfoLen * currINF +pure func IO_ifsToIfs(ifs option[io.IO_ifs]) uint16{ + return ifs == none[io.IO_ifs] ? 0 : uint16(get(ifs)) } -ghost -requires 0 <= currINF -requires InfoFieldOffset(currINF) < len(raw) -requires acc(&raw[InfoFieldOffset(currINF)], _) +ghost +requires 0 <= start && start <= middle +requires middle + HopLen <= end && end <= len(raw) +requires acc(sl.Bytes(raw, start, end), _) decreases -pure func ConsDir(raw []byte, currINF int) bool { - return raw[InfoFieldOffset(currINF)] & 0x1 == 0x1 +pure func BytesToIO_HF(raw [] byte, start int, middle int, end int) (io.IO_HF) { + return let _ := Asserting(forall k int :: {&raw[middle+2:middle+4][k]} 0 <= k && k < 2 ==> &raw[middle+2:middle+4][k] == &raw[middle + 2 + k]) in + let _ := Asserting(forall k int :: {&raw[middle+4:middle+6][k]} 0 <= k && k < 4 ==> &raw[middle+4:middle+6][k] == &raw[middle + 4 + k]) in + let _ := Asserting(forall k int :: {&raw[middle+6:middle+6+MacLen][k]} 0 <= k && k < MacLen ==> &raw[middle+6:middle+6+MacLen][k] == &raw[middle + 6 + k]) in + unfolding acc(sl.Bytes(raw, start, end), _) in + let inif2 := binary.BigEndian.Uint16(raw[middle+2:middle+4]) in + let egif2 := binary.BigEndian.Uint16(raw[middle+4:middle+6]) in + let op_inif2 := ifsToIO_ifs(inif2) in + let op_egif2 := ifsToIO_ifs(egif2) in + io.IO_HF_ { + InIF2: op_inif2, + EgIF2: op_egif2, + HVF: AbsMac(FromSliceToMacArray(raw[middle+6:middle+6+MacLen])), + } } -ghost -requires 0 <= currINF -requires InfoFieldOffset(currINF) < len(raw) -requires acc(&raw[InfoFieldOffset(currINF)], _) +ghost decreases -pure func Peer(raw []byte, currINF int) bool { - return raw[InfoFieldOffset(currINF)] & 0x2 == 0x2 +pure func (h HopField) ToIO_HF() (io.IO_HF) { + return io.IO_HF_ { + InIF2: ifsToIO_ifs(h.ConsIngress), + EgIF2: ifsToIO_ifs(h.ConsEgress), + HVF: AbsMac(h.Mac), + } } diff --git a/pkg/slayers/path/infofield.go b/pkg/slayers/path/infofield.go index 0e1a9442c..f3488e768 100644 --- a/pkg/slayers/path/infofield.go +++ b/pkg/slayers/path/infofield.go @@ -22,8 +22,10 @@ import ( "github.com/scionproto/scion/pkg/private/serrors" "github.com/scionproto/scion/pkg/private/util" + //@ bits "github.com/scionproto/scion/verification/utils/bitwise" //@ . "github.com/scionproto/scion/verification/utils/definitions" //@ "github.com/scionproto/scion/verification/utils/slices" + //@ "verification/io" ) // InfoLen is the size of an InfoField in bytes. @@ -60,14 +62,16 @@ type InfoField struct { // path.InfoLen. // @ requires len(raw) >= InfoLen // @ preserves acc(inf) -// @ preserves acc(slices.AbsSlice_Bytes(raw, 0, InfoLen), R45) +// @ preserves acc(slices.Bytes(raw, 0, len(raw)), R45) // @ ensures err == nil +// @ ensures BytesToAbsInfoField(raw, 0) == +// @ inf.ToAbsInfoField() // @ decreases func (inf *InfoField) DecodeFromBytes(raw []byte) (err error) { if len(raw) < InfoLen { return serrors.New("InfoField raw too short", "expected", InfoLen, "actual", len(raw)) } - //@ unfold acc(slices.AbsSlice_Bytes(raw, 0, InfoLen), R50) + //@ unfold acc(slices.Bytes(raw, 0, len(raw)), R50) inf.ConsDir = raw[0]&0x1 == 0x1 inf.Peer = raw[0]&0x2 == 0x2 //@ assert &raw[2:4][0] == &raw[2] && &raw[2:4][1] == &raw[3] @@ -75,7 +79,9 @@ func (inf *InfoField) DecodeFromBytes(raw []byte) (err error) { //@ assert &raw[4:8][0] == &raw[4] && &raw[4:8][1] == &raw[5] //@ assert &raw[4:8][2] == &raw[6] && &raw[4:8][3] == &raw[7] inf.Timestamp = binary.BigEndian.Uint32(raw[4:8]) - //@ fold acc(slices.AbsSlice_Bytes(raw, 0, InfoLen), R50) + //@ fold acc(slices.Bytes(raw, 0, len(raw)), R50) + //@ assert reveal BytesToAbsInfoField(raw, 0) == + //@ inf.ToAbsInfoField() return nil } @@ -83,40 +89,63 @@ func (inf *InfoField) DecodeFromBytes(raw []byte) (err error) { // path.InfoLen. // @ requires len(b) >= InfoLen // @ preserves acc(inf, R10) -// @ preserves slices.AbsSlice_Bytes(b, 0, InfoLen) +// @ preserves slices.Bytes(b, 0, len(b)) // @ ensures err == nil +// @ ensures inf.ToAbsInfoField() == +// @ BytesToAbsInfoField(b, 0) // @ decreases func (inf *InfoField) SerializeTo(b []byte) (err error) { if len(b) < InfoLen { return serrors.New("buffer for InfoField too short", "expected", InfoLen, "actual", len(b)) } - //@ unfold slices.AbsSlice_Bytes(b, 0, InfoLen) + //@ ghost targetAbsInfo := inf.ToAbsInfoField() + //@ unfold slices.Bytes(b, 0, len(b)) b[0] = 0 if inf.ConsDir { b[0] |= 0x1 } + //@ ghost tmpInfo1 := BytesToAbsInfoFieldHelper(b, 0) + //@ bits.InfoFieldFirstByteSerializationLemmas() + //@ assert tmpInfo1.ConsDir == targetAbsInfo.ConsDir + //@ ghost firstByte := b[0] if inf.Peer { b[0] |= 0x2 } + //@ tmpInfo2 := BytesToAbsInfoFieldHelper(b, 0) + //@ assert tmpInfo2.Peer == (b[0] & 0x2 == 0x2) + //@ assert tmpInfo2.ConsDir == (b[0] & 0x1 == 0x1) + //@ assert tmpInfo2.Peer == targetAbsInfo.Peer + //@ assert tmpInfo2.ConsDir == tmpInfo1.ConsDir + //@ assert tmpInfo2.ConsDir == targetAbsInfo.ConsDir b[1] = 0 // reserved //@ assert &b[2:4][0] == &b[2] && &b[2:4][1] == &b[3] binary.BigEndian.PutUint16(b[2:4], inf.SegID) + //@ ghost tmpInfo3 := BytesToAbsInfoFieldHelper(b, 0) + //@ assert tmpInfo3.UInfo == targetAbsInfo.UInfo //@ assert &b[4:8][0] == &b[4] && &b[4:8][1] == &b[5] //@ assert &b[4:8][2] == &b[6] && &b[4:8][3] == &b[7] binary.BigEndian.PutUint32(b[4:8], inf.Timestamp) - //@ fold slices.AbsSlice_Bytes(b, 0, InfoLen) + //@ ghost tmpInfo4 := BytesToAbsInfoFieldHelper(b, 0) + //@ assert tmpInfo4.AInfo == targetAbsInfo.AInfo + //@ fold slices.Bytes(b, 0, len(b)) + //@ assert inf.ToAbsInfoField() == + //@ reveal BytesToAbsInfoField(b, 0) return nil } // UpdateSegID updates the SegID field by XORing the SegID field with the 2 // first bytes of the MAC. It is the beta calculation according to // https://docs.scion.org/en/latest/protocols/scion-header.html#hop-field-mac-computation +// @ requires hf.HVF == AbsMac(hfMac) // @ preserves acc(&inf.SegID) +// @ ensures AbsUInfoFromUint16(inf.SegID) == +// @ old(io.upd_uinfo(AbsUInfoFromUint16(inf.SegID), hf)) // @ decreases -func (inf *InfoField) UpdateSegID(hfMac [MacLen]byte) { +func (inf *InfoField) UpdateSegID(hfMac [MacLen]byte /* @, ghost hf io.IO_HF @ */) { //@ share hfMac inf.SegID = inf.SegID ^ binary.BigEndian.Uint16(hfMac[:2]) + // @ AssumeForIO(AbsUInfoFromUint16(inf.SegID) == old(io.upd_uinfo(AbsUInfoFromUint16(inf.SegID), hf))) } // @ decreases diff --git a/pkg/slayers/path/infofield_spec.gobra b/pkg/slayers/path/infofield_spec.gobra new file mode 100644 index 000000000..da554ab37 --- /dev/null +++ b/pkg/slayers/path/infofield_spec.gobra @@ -0,0 +1,117 @@ +// Copyright 2022 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package path + +import ( + "verification/io" + sl "verification/utils/slices" + "verification/dependencies/encoding/binary" + . "verification/utils/definitions" +) + +ghost +decreases +pure func InfoFieldOffset(currINF, headerOffset int) int { + return headerOffset + InfoLen * currINF +} + +ghost +requires 0 <= currINF && 0 <= headerOffset +requires InfoFieldOffset(currINF, headerOffset) < len(raw) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) +decreases +pure func ConsDir(raw []byte, currINF int, headerOffset int) bool { + return unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in + raw[InfoFieldOffset(currINF, headerOffset)] & 0x1 == 0x1 +} + +ghost +requires 0 <= currINF && 0 <= headerOffset +requires InfoFieldOffset(currINF, headerOffset) < len(raw) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) +decreases +pure func Peer(raw []byte, currINF int, headerOffset int) bool { + return unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in + raw[InfoFieldOffset(currINF, headerOffset)] & 0x2 == 0x2 +} + +ghost +requires 0 <= currINF && 0 <= headerOffset +requires InfoFieldOffset(currINF, headerOffset) + InfoLen < len(raw) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) +decreases +pure func Timestamp(raw []byte, currINF int, headerOffset int) io.IO_ainfo { + return let idx := InfoFieldOffset(currINF, headerOffset) + 4 in + unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in + let _ := Asserting(forall i int :: { &raw[idx+i] } { &raw[idx:idx+4][i] } 0 <= i && i < 4 ==> + &raw[idx+i] == &raw[idx:idx+4][i]) in + io.IO_ainfo(binary.BigEndian.Uint32(raw[idx : idx + 4])) +} + +ghost +requires 0 <= currINF && 0 <= headerOffset +requires InfoFieldOffset(currINF, headerOffset) + InfoLen < len(raw) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) +decreases +pure func AbsUinfo(raw []byte, currINF int, headerOffset int) set[io.IO_msgterm] { + return let idx := InfoFieldOffset(currINF, headerOffset) + 2 in + unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in + let _ := Asserting(forall k int :: {&raw[idx:idx+2][k]} 0 <= k && k < 2 ==> + &raw[idx:idx+4][k] == &raw[idx + k]) in + AbsUInfoFromUint16(binary.BigEndian.Uint16(raw[idx:idx+2])) +} + +ghost +opaque +requires 0 <= middle +requires middle+InfoLen <= len(raw) +requires acc(sl.Bytes(raw, 0, len(raw)), _) +decreases +pure func BytesToAbsInfoField(raw [] byte, middle int) (io.AbsInfoField) { + return unfolding acc(sl.Bytes(raw, 0, len(raw)), _) in + BytesToAbsInfoFieldHelper(raw, middle) +} + +ghost +requires 0 <= middle +requires middle+InfoLen <= len(raw) +requires forall i int :: { &raw[i] } middle <= i && i < len(raw) ==> + acc(&raw[i], _) +decreases +pure func BytesToAbsInfoFieldHelper(raw [] byte, middle int) (io.AbsInfoField) { + return let _ := Asserting(forall k int :: {&raw[middle+2:middle+4][k]} 0 <= k && k < 2 ==> + &raw[middle+2:middle+4][k] == &raw[middle+2 + k]) in + let _ := Asserting(forall k int :: {&raw[middle+4:middle+8][k]} 0 <= k && k < 4 ==> + &raw[middle+4:middle+8][k] == &raw[middle+4 + k]) in + io.AbsInfoField(io.AbsInfoField_{ + AInfo : io.IO_ainfo(binary.BigEndian.Uint32(raw[middle+4:middle+8])), + UInfo : AbsUInfoFromUint16(binary.BigEndian.Uint16(raw[middle+2:middle+4])), + ConsDir : raw[middle] & 0x1 == 0x1, + Peer : raw[middle] & 0x2 == 0x2, + }) +} + +ghost +decreases +pure func (inf InfoField) ToAbsInfoField() (io.AbsInfoField) { + return io.AbsInfoField(io.AbsInfoField_{ + AInfo : io.IO_ainfo(inf.Timestamp), + UInfo : AbsUInfoFromUint16(inf.SegID), + ConsDir : inf.ConsDir, + Peer : inf.Peer, + }) +} \ No newline at end of file diff --git a/pkg/slayers/path/io_msgterm_spec.gobra b/pkg/slayers/path/io_msgterm_spec.gobra new file mode 100644 index 000000000..601db4004 --- /dev/null +++ b/pkg/slayers/path/io_msgterm_spec.gobra @@ -0,0 +1,76 @@ +// Copyright 2020 Anapaya Systems +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package path + +import ( + "verification/io" + . "verification/utils/definitions" +) + +// At the moment, we assume that all cryptographic operations performed at the code level +// imply the desired properties at the IO spec level because we cannot currently prove in +// Gobra the correctness of these operations. Given that we do not prove any properties +// about this function, we currently do not provide a definition for it. + +ghost +decreases +pure func AbsUInfoFromUint16(SegID uint16) set[io.IO_msgterm] + +ghost +decreases +pure func AbsMac(mac [MacLen]byte) (io.IO_msgterm) + +// The following function converts a slice with at least `MacLen` elements into +// an (exclusive) array containing the mac. Note that there are no permissions +// involved for accessing exclusive arrays. +ghost +requires MacLen <= len(mac) +requires forall i int :: { &mac[i] } 0 <= i && i < MacLen ==> acc(&mac[i], _) +ensures len(res) == MacLen +ensures forall i int :: { res[i] } 0 <= i && i < MacLen ==> mac[i] == res[i] +decreases +pure func FromSliceToMacArray(mac []byte) (res [MacLen]byte) { + return [MacLen]byte{ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5] } +} + +ghost +requires len(mac1) == MacLen +requires forall i int :: { &mac1[i] } 0 <= i && i < MacLen ==> acc(&mac1[i], R50) +requires forall i int :: { &mac1[i] } 0 <= i && i < MacLen ==> mac1[i] == mac2[i] +ensures forall i int :: { &mac1[i] } 0 <= i && i < MacLen ==> acc(&mac1[i], R50) +ensures AbsMac(FromSliceToMacArray(mac1)) == AbsMac(mac2) +decreases +func EqualBytesImplyEqualMac(mac1 []byte, mac2 [MacLen]byte) { + mac1Arr := FromSliceToMacArray(mac1) + assert mac1Arr == mac2 + assert mac1Arr[0] == mac2[0] && + mac1Arr[1] == mac2[1] && + mac1Arr[2] == mac2[2] && + mac1Arr[3] == mac2[3] && + mac1Arr[4] == mac2[4] && + mac1Arr[5] == mac2[5] + assert len(mac1Arr) == len(mac2) + AbsMacArrayCongruence(mac1Arr, mac2) +} + +// The following obviously holds. However, for the time being, it cannot be proven due to an +// incompleteness in the array encoding (https://github.com/viperproject/gobra/issues/770). +ghost +requires mac1 == mac2 +ensures AbsMac(mac1) == AbsMac(mac2) +decreases +func AbsMacArrayCongruence(mac1 [MacLen]byte, mac2 [MacLen]byte) \ No newline at end of file diff --git a/pkg/slayers/path/mac.go b/pkg/slayers/path/mac.go index c3d91d918..df11254b7 100644 --- a/pkg/slayers/path/mac.go +++ b/pkg/slayers/path/mac.go @@ -20,7 +20,7 @@ import ( "encoding/binary" "hash" //@ . "github.com/scionproto/scion/verification/utils/definitions" - //@ "github.com/scionproto/scion/verification/utils/slices" + //@ sl "github.com/scionproto/scion/verification/utils/slices" ) const MACBufferSize = 16 @@ -30,13 +30,13 @@ const MACBufferSize = 16 // this method does not modify info or hf. // Modifying the provided buffer after calling this function may change the returned HopField MAC. // @ requires h != nil && h.Mem() -// @ preserves len(buffer) >= MACBufferSize ==> slices.AbsSlice_Bytes(buffer, 0, len(buffer)) +// @ preserves len(buffer) >= MACBufferSize ==> sl.Bytes(buffer, 0, len(buffer)) // @ ensures h.Mem() // @ decreases func MAC(h hash.Hash, info InfoField, hf HopField, buffer []byte) [MacLen]byte { mac := FullMAC(h, info, hf, buffer) var res /*@ @ @*/ [MacLen]byte - //@ unfold slices.AbsSlice_Bytes(mac, 0, MACBufferSize) + //@ unfold sl.Bytes(mac, 0, MACBufferSize) copy(res[:], mac[:MacLen] /*@, R1 @*/) return res } @@ -47,21 +47,21 @@ func MAC(h hash.Hash, info InfoField, hf HopField, buffer []byte) [MacLen]byte { // Modifying the provided buffer after calling this function may change the returned HopField MAC. // In contrast to MAC(), FullMAC returns all the 16 bytes instead of only 6 bytes of the MAC. // @ requires h != nil && h.Mem() -// @ preserves len(buffer) >= MACBufferSize ==> slices.AbsSlice_Bytes(buffer, 0, len(buffer)) +// @ preserves len(buffer) >= MACBufferSize ==> sl.Bytes(buffer, 0, len(buffer)) // @ ensures h.Mem() -// @ ensures len(res) == MACBufferSize && slices.AbsSlice_Bytes(res, 0, MACBufferSize) +// @ ensures len(res) == MACBufferSize && sl.Bytes(res, 0, MACBufferSize) // @ decreases func FullMAC(h hash.Hash, info InfoField, hf HopField, buffer []byte) (res []byte) { if len(buffer) < MACBufferSize { buffer = make([]byte, MACBufferSize) - //@ fold slices.AbsSlice_Bytes(buffer, 0, len(buffer)) + //@ fold sl.Bytes(buffer, 0, len(buffer)) } h.Reset() MACInput(info.SegID, info.Timestamp, hf.ExpTime, hf.ConsIngress, hf.ConsEgress, buffer) - //@ unfold slices.AbsSlice_Bytes(buffer, 0, len(buffer)) - //@ defer fold slices.AbsSlice_Bytes(buffer, 0, len(buffer)) + //@ unfold sl.Bytes(buffer, 0, len(buffer)) + //@ defer fold sl.Bytes(buffer, 0, len(buffer)) // Write must not return an error: https://godoc.org/hash#Hash if _, err := h.Write(buffer); err != nil { // @ Unreachable() @@ -69,7 +69,7 @@ func FullMAC(h hash.Hash, info InfoField, hf HopField, buffer []byte) (res []byt } //@ assert h.Size() >= 16 res = h.Sum(buffer[:0])[:16] - //@ fold slices.AbsSlice_Bytes(res, 0, MACBufferSize) + //@ fold sl.Bytes(res, 0, MACBufferSize) return res } @@ -88,11 +88,11 @@ func FullMAC(h hash.Hash, info InfoField, hf HopField, buffer []byte) (res []byt // +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ // // @ requires len(buffer) >= MACBufferSize -// @ preserves slices.AbsSlice_Bytes(buffer, 0, len(buffer)) +// @ preserves sl.Bytes(buffer, 0, len(buffer)) // @ decreases func MACInput(segID uint16, timestamp uint32, expTime uint8, consIngress, consEgress uint16, buffer []byte) { - //@ unfold slices.AbsSlice_Bytes(buffer, 0, len(buffer)) + //@ unfold sl.Bytes(buffer, 0, len(buffer)) //@ assert &buffer[0:2][0] == &buffer[0] && &buffer[0:2][1] == &buffer[1] binary.BigEndian.PutUint16(buffer[0:2], 0) @@ -109,5 +109,5 @@ func MACInput(segID uint16, timestamp uint32, expTime uint8, binary.BigEndian.PutUint16(buffer[12:14], consEgress) //@ assert &buffer[14:16][0] == &buffer[14] && &buffer[14:16][1] == &buffer[15] binary.BigEndian.PutUint16(buffer[14:16], 0) - //@ fold slices.AbsSlice_Bytes(buffer, 0, len(buffer)) + //@ fold sl.Bytes(buffer, 0, len(buffer)) } diff --git a/pkg/slayers/path/onehop/onehop.go b/pkg/slayers/path/onehop/onehop.go index bd0f626f9..868147f76 100644 --- a/pkg/slayers/path/onehop/onehop.go +++ b/pkg/slayers/path/onehop/onehop.go @@ -21,7 +21,7 @@ import ( "github.com/scionproto/scion/pkg/slayers/path" "github.com/scionproto/scion/pkg/slayers/path/scion" //@ . "github.com/scionproto/scion/verification/utils/definitions" - //@ "github.com/scionproto/scion/verification/utils/slices" + //@ sl "github.com/scionproto/scion/verification/utils/slices" ) // PathLen is the length of a serialized one hop path in bytes. @@ -66,7 +66,7 @@ type Path struct { } // @ requires o.NonInitMem() -// @ preserves slices.AbsSlice_Bytes(data, 0, len(data)) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R42) // @ ensures (len(data) >= PathLen) == (r == nil) // @ ensures r == nil ==> o.Mem(data) // @ ensures r != nil ==> o.NonInitMem() @@ -79,39 +79,33 @@ func (o *Path) DecodeFromBytes(data []byte) (r error) { } offset := 0 //@ unfold o.NonInitMem() - //@ slices.SplitByIndex_Bytes(data, 0, len(data), path.InfoLen, R1) - //@ slices.Reslice_Bytes(data, 0, path.InfoLen, R1) + //@ sl.SplitRange_Bytes(data, 0, path.InfoLen, R42) if err := o.Info.DecodeFromBytes(data[:path.InfoLen]); err != nil { // @ Unreachable() return err } - //@ slices.Unslice_Bytes(data, 0, path.InfoLen, R1) + //@ sl.CombineRange_Bytes(data,0, path.InfoLen, R42) offset += path.InfoLen - //@ slices.SplitByIndex_Bytes(data, offset, len(data), offset+path.HopLen, R1) - //@ slices.Reslice_Bytes(data, offset, offset+path.HopLen, R1) + //@ sl.SplitRange_Bytes(data, offset, offset+path.HopLen, R42) if err := o.FirstHop.DecodeFromBytes(data[offset : offset+path.HopLen]); err != nil { // @ Unreachable() return err } - //@ slices.Unslice_Bytes(data, offset, offset+path.HopLen, R1) - //@ slices.CombineAtIndex_Bytes(data, 0, offset+path.HopLen, offset, R1) + //@ sl.CombineRange_Bytes(data, offset, offset+path.HopLen, R42) offset += path.HopLen - //@ slices.SplitByIndex_Bytes(data, offset, len(data), offset+path.HopLen, R1) - //@ slices.Reslice_Bytes(data, offset, offset+path.HopLen, R1) + //@ sl.SplitRange_Bytes(data, offset, offset+path.HopLen, R42) r = o.SecondHop.DecodeFromBytes(data[offset : offset+path.HopLen]) - //@ slices.Unslice_Bytes(data, offset, offset+path.HopLen, R1) - //@ slices.CombineAtIndex_Bytes(data, offset, len(data), offset+path.HopLen, R1) - //@ slices.CombineAtIndex_Bytes(data, 0, len(data), offset, R1) + //@ sl.CombineRange_Bytes(data, offset, offset+path.HopLen, R42) //@ ghost if r == nil { fold o.Mem(data) } else { fold o.NonInitMem() } return r } // @ preserves acc(o.Mem(ubuf), R1) -// @ preserves acc(slices.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R1) -// @ preserves slices.AbsSlice_Bytes(b, 0, len(b)) +// @ preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R1) +// @ preserves sl.Bytes(b, 0, len(b)) // @ ensures (len(b) >= PathLen) == (err == nil) // @ ensures err != nil ==> err.ErrorMem() -// @ ensures err == nil ==> o.Len(ubuf) <= len(b) +// @ ensures err == nil ==> o.LenSpec(ubuf) <= len(b) // @ decreases func (o *Path) SerializeTo(b []byte /*@, ubuf []byte @*/) (err error) { if len(b) < PathLen { @@ -120,32 +114,23 @@ func (o *Path) SerializeTo(b []byte /*@, ubuf []byte @*/) (err error) { } offset := 0 //@ unfold acc(o.Mem(ubuf), R1) - //@ slices.SplitByIndex_Bytes(b, 0, len(b), path.InfoLen, writePerm) - //@ slices.Reslice_Bytes(b, 0, path.InfoLen, writePerm) + //@ sl.SplitRange_Bytes(b, 0, offset+path.InfoLen, writePerm) if err := o.Info.SerializeTo(b[:offset+path.InfoLen]); err != nil { - //@ slices.Unslice_Bytes(b, 0, path.InfoLen, writePerm) - //@ slices.CombineAtIndex_Bytes(b, 0, len(b), path.InfoLen, writePerm) + //@ sl.CombineRange_Bytes(b, 0, offset+path.InfoLen, writePerm) return err } - //@ slices.Unslice_Bytes(b, 0, path.InfoLen, writePerm) + //@ sl.CombineRange_Bytes(b, 0, offset+path.InfoLen, writePerm) offset += path.InfoLen - //@ slices.SplitByIndex_Bytes(b, offset, len(b), offset+path.HopLen, writePerm) - //@ slices.Reslice_Bytes(b, offset, offset+path.HopLen, writePerm) + //@ sl.SplitRange_Bytes(b, offset, offset+path.HopLen, writePerm) if err := o.FirstHop.SerializeTo(b[offset : offset+path.HopLen]); err != nil { - //@ slices.Unslice_Bytes(b, offset, offset+path.HopLen, writePerm) - //@ slices.CombineAtIndex_Bytes(b, offset, len(b), offset+path.HopLen, writePerm) - //@ slices.CombineAtIndex_Bytes(b, 0, len(b), offset, writePerm) + //@ sl.CombineRange_Bytes(b, offset, offset+path.HopLen, writePerm) return err } - //@ slices.Unslice_Bytes(b, offset, offset+path.HopLen, writePerm) - //@ slices.CombineAtIndex_Bytes(b, 0, offset+path.HopLen, offset, writePerm) + //@ sl.CombineRange_Bytes(b, offset, offset+path.HopLen, writePerm) offset += path.HopLen - //@ slices.SplitByIndex_Bytes(b, offset, len(b), offset+path.HopLen, writePerm) - //@ slices.Reslice_Bytes(b, offset, offset+path.HopLen, writePerm) + //@ sl.SplitRange_Bytes(b, offset, offset+path.HopLen, writePerm) err = o.SecondHop.SerializeTo(b[offset : offset+path.HopLen]) - //@ slices.Unslice_Bytes(b, offset, offset+path.HopLen, writePerm) - //@ slices.CombineAtIndex_Bytes(b, offset, len(b), offset+path.HopLen, writePerm) - //@ slices.CombineAtIndex_Bytes(b, 0, len(b), offset, writePerm) + //@ sl.CombineRange_Bytes(b, offset, offset+path.HopLen, writePerm) //@ fold acc(o.Mem(ubuf), R1) return err } @@ -153,7 +138,7 @@ func (o *Path) SerializeTo(b []byte /*@, ubuf []byte @*/) (err error) { // ToSCIONDecoded converts the one hop path in to a normal SCION path in the // decoded format. // @ preserves o.Mem(ubuf) -// @ preserves slices.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ preserves sl.Bytes(ubuf, 0, len(ubuf)) // @ ensures err == nil ==> (sd != nil && sd.Mem(ubuf)) // @ ensures err != nil ==> err.ErrorMem() // @ decreases @@ -216,7 +201,7 @@ func (o *Path) ToSCIONDecoded( /*@ ghost ubuf []byte @*/ ) (sd *scion.Decoded, e // Reverse a OneHop path that returns a reversed SCION path. // @ requires o.Mem(ubuf) -// @ preserves slices.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ preserves sl.Bytes(ubuf, 0, len(ubuf)) // @ ensures err == nil ==> p != nil // @ ensures err == nil ==> p.Mem(ubuf) // @ ensures err == nil ==> typeOf(p) == type[*scion.Decoded] @@ -234,8 +219,7 @@ func (o *Path) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, err error) { return sp.Reverse( /*@ ubuf @*/ ) } -// @ pure -// @ ensures l == PathLen +// @ ensures l == o.LenSpec(ubuf) // @ decreases func (o *Path) Len( /*@ ghost ubuf []byte @*/ ) (l int) { return PathLen diff --git a/pkg/slayers/path/onehop/onehop_spec.gobra b/pkg/slayers/path/onehop/onehop_spec.gobra index 9929d3e98..84d42dc75 100644 --- a/pkg/slayers/path/onehop/onehop_spec.gobra +++ b/pkg/slayers/path/onehop/onehop_spec.gobra @@ -48,7 +48,21 @@ requires acc(o.Mem(ub), _) ensures b decreases pure func (o *Path) InferSizeUb(ghost ub []byte) (b bool) { - return unfolding acc(o.Mem(ub), _) in o.Len(ub) <= len(ub) + return unfolding acc(o.Mem(ub), _) in o.LenSpec(ub) <= len(ub) +} + +ghost +pure +decreases +func (p *Path) IsValidResultOfDecoding(b []byte, err error) (res bool) { + return true +} + +ghost +pure +decreases +func (p *Path) LenSpec(ghost ub []byte) (l int) { + return PathLen } (*Path) implements path.Path \ No newline at end of file diff --git a/pkg/slayers/path/path.go b/pkg/slayers/path/path.go index 7b03e90e8..951af3ab6 100644 --- a/pkg/slayers/path/path.go +++ b/pkg/slayers/path/path.go @@ -64,54 +64,67 @@ func (t Type) String() string { // Path is the path contained in the SCION header. type Path interface { // (VerifiedSCION) Must hold for every valid Path. - //@ pred Mem(underlyingBuf []byte) + //@ pred Mem(ub []byte) // (VerifiedSCION) Must imply the resources required to initialize // a new instance of a predicate. //@ pred NonInitMem() // SerializeTo serializes the path into the provided buffer. // (VerifiedSCION) There are implementations of this interface that modify the underlying // structure when serializing (e.g. scion.Raw) - //@ preserves sl.AbsSlice_Bytes(underlyingBuf, 0, len(underlyingBuf)) - //@ preserves acc(Mem(underlyingBuf), R1) - //@ preserves sl.AbsSlice_Bytes(b, 0, len(b)) + //@ preserves sl.Bytes(ub, 0, len(ub)) + //@ preserves acc(Mem(ub), R1) + //@ preserves sl.Bytes(b, 0, len(b)) //@ ensures e != nil ==> e.ErrorMem() //@ decreases - SerializeTo(b []byte /*@, ghost underlyingBuf []byte @*/) (e error) + SerializeTo(b []byte /*@, ghost ub []byte @*/) (e error) // DecodesFromBytes decodes the path from the provided buffer. // (VerifiedSCION) There are implementations of this interface (e.g., scion.Raw) that // store b and use it as internal data. //@ requires NonInitMem() - //@ preserves sl.AbsSlice_Bytes(b, 0, len(b)) + //@ preserves acc(sl.Bytes(b, 0, len(b)), R42) //@ ensures err == nil ==> Mem(b) //@ ensures err != nil ==> err.ErrorMem() //@ ensures err != nil ==> NonInitMem() + //@ ensures err == nil ==> IsValidResultOfDecoding(b, err) //@ decreases DecodeFromBytes(b []byte) (err error) + //@ ghost + //@ pure + //@ requires Mem(b) + //@ requires acc(sl.Bytes(b, 0, len(b)), R42) + //@ decreases + //@ IsValidResultOfDecoding(b []byte, err error) (res bool) // Reverse reverses a path such that it can be used in the reversed direction. // XXX(shitz): This method should possibly be moved to a higher-level path manipulation package. - //@ requires Mem(underlyingBuf) - //@ preserves sl.AbsSlice_Bytes(underlyingBuf, 0, len(underlyingBuf)) + //@ requires Mem(ub) + //@ preserves sl.Bytes(ub, 0, len(ub)) //@ ensures e == nil ==> p != nil - //@ ensures e == nil ==> p.Mem(underlyingBuf) + //@ ensures e == nil ==> p.Mem(ub) //@ ensures e != nil ==> e.ErrorMem() //@ decreases - Reverse( /*@ ghost underlyingBuf []byte @*/ ) (p Path, e error) - // Len returns the length of a path in bytes. + Reverse( /*@ ghost ub []byte @*/ ) (p Path, e error) + //@ ghost //@ pure - //@ requires acc(Mem(underlyingBuf), _) - //@ ensures l >= 0 + //@ requires acc(Mem(ub), _) + //@ ensures 0 <= l //@ decreases - Len( /*@ ghost underlyingBuf []byte @*/ ) (l int) + //@ LenSpec(ghost ub []byte) (l int) + + // Len returns the length of a path in bytes. + //@ preserves acc(Mem(ub), R50) + //@ ensures l == LenSpec(ub) + //@ decreases + Len( /*@ ghost ub []byte @*/ ) (l int) // Type returns the type of a path. //@ pure - //@ requires acc(Mem(underlyingBuf), _) + //@ requires acc(Mem(ub), _) //@ decreases - Type( /*@ ghost underlyingBuf []byte @*/ ) Type + Type( /*@ ghost ub []byte @*/ ) Type //@ ghost - //@ requires Mem(underlyingBuf) + //@ requires Mem(ub) //@ ensures NonInitMem() //@ decreases - //@ DowngradePerm(ghost underlyingBuf []byte) + //@ DowngradePerm(ghost ub []byte) } type metadata struct { @@ -202,24 +215,24 @@ type rawPath struct { pathType Type } -// @ preserves acc(p.Mem(underlyingBuf), R10) -// @ preserves acc(sl.AbsSlice_Bytes(underlyingBuf, 0, len(underlyingBuf)), R10) -// @ preserves sl.AbsSlice_Bytes(b, 0, len(b)) +// @ preserves acc(p.Mem(ub), R10) +// @ preserves acc(sl.Bytes(ub, 0, len(ub)), R10) +// @ preserves sl.Bytes(b, 0, len(b)) // @ ensures e == nil // @ decreases -func (p *rawPath) SerializeTo(b []byte /*@, ghost underlyingBuf []byte @*/) (e error) { - //@ unfold sl.AbsSlice_Bytes(b, 0, len(b)) - //@ unfold acc(p.Mem(underlyingBuf), R10) - //@ unfold acc(sl.AbsSlice_Bytes(p.raw, 0, len(p.raw)), R11) +func (p *rawPath) SerializeTo(b []byte /*@, ghost ub []byte @*/) (e error) { + //@ unfold sl.Bytes(b, 0, len(b)) + //@ unfold acc(p.Mem(ub), R10) + //@ unfold acc(sl.Bytes(p.raw, 0, len(p.raw)), R11) copy(b, p.raw /*@, R11 @*/) - //@ fold acc(sl.AbsSlice_Bytes(p.raw, 0, len(p.raw)), R11) - //@ fold acc(p.Mem(underlyingBuf), R10) - //@ fold sl.AbsSlice_Bytes(b, 0, len(b)) + //@ fold acc(sl.Bytes(p.raw, 0, len(p.raw)), R11) + //@ fold acc(p.Mem(ub), R10) + //@ fold sl.Bytes(b, 0, len(b)) return nil } // @ requires p.NonInitMem() -// @ preserves sl.AbsSlice_Bytes(b, 0, len(b)) +// @ preserves acc(sl.Bytes(b, 0, len(b)), R42) // @ ensures p.Mem(b) // @ ensures e == nil // @ decreases @@ -232,21 +245,20 @@ func (p *rawPath) DecodeFromBytes(b []byte) (e error) { // @ ensures e != nil && e.ErrorMem() // @ decreases -func (p *rawPath) Reverse( /*@ ghost underlyingBuf []byte @*/ ) (r Path, e error) { +func (p *rawPath) Reverse( /*@ ghost ub []byte @*/ ) (r Path, e error) { return nil, serrors.New("not supported") } -// @ pure -// @ requires acc(p.Mem(underlyingBuf), _) -// @ ensures l >= 0 +// @ preserves acc(p.Mem(ub), R50) +// @ ensures l == p.LenSpec(ub) // @ decreases -func (p *rawPath) Len( /*@ ghost underlyingBuf []byte @*/ ) (l int) { - return /*@ unfolding acc(p.Mem(underlyingBuf), _) in @*/ len(p.raw) +func (p *rawPath) Len( /*@ ghost ub []byte @*/ ) (l int) { + return /*@ unfolding acc(p.Mem(ub), R50) in @*/ len(p.raw) } // @ pure -// @ requires acc(p.Mem(underlyingBuf), _) +// @ requires acc(p.Mem(ub), _) // @ decreases -func (p *rawPath) Type( /*@ ghost underlyingBuf []byte @*/ ) Type { - return /*@ unfolding acc(p.Mem(underlyingBuf), _) in @*/ p.pathType +func (p *rawPath) Type( /*@ ghost ub []byte @*/ ) Type { + return /*@ unfolding acc(p.Mem(ub), _) in @*/ p.pathType } diff --git a/pkg/slayers/path/path_spec.gobra b/pkg/slayers/path/path_spec.gobra index f621c1f6d..0963b1cd0 100644 --- a/pkg/slayers/path/path_spec.gobra +++ b/pkg/slayers/path/path_spec.gobra @@ -37,6 +37,22 @@ func (p *rawPath) DowngradePerm(ghost buf []byte) { fold p.NonInitMem() } +ghost +pure +decreases +func (p *rawPath) IsValidResultOfDecoding(b []byte, err error) (res bool) { + return true +} + +ghost +pure +requires acc(p.Mem(ub), _) +ensures 0 <= l +decreases +func (p *rawPath) LenSpec(ghost ub []byte) (l int) { + return unfolding acc(p.Mem(ub), _) in len(p.raw) +} + (*rawPath) implements Path /** End of rawPath spec **/ diff --git a/pkg/slayers/path/path_spec_test.gobra b/pkg/slayers/path/path_spec_test.gobra index 75f7f25f4..c882f0a0d 100644 --- a/pkg/slayers/path/path_spec_test.gobra +++ b/pkg/slayers/path/path_spec_test.gobra @@ -16,11 +16,11 @@ package path -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" func foldMem_test() { r := &rawPath{} - fold slices.AbsSlice_Bytes(r.raw, 0, 0) + fold sl.Bytes(r.raw, 0, 0) fold r.Mem(nil) } diff --git a/pkg/slayers/path/scion/BUILD.bazel b/pkg/slayers/path/scion/BUILD.bazel index a4a57739f..c05261537 100644 --- a/pkg/slayers/path/scion/BUILD.bazel +++ b/pkg/slayers/path/scion/BUILD.bazel @@ -24,6 +24,7 @@ go_test( ], deps = [ ":go_default_library", + "//pkg/private/serrors:go_default_library", "//pkg/slayers/path:go_default_library", "@com_github_stretchr_testify//assert:go_default_library", "@com_github_stretchr_testify//require:go_default_library", diff --git a/pkg/slayers/path/scion/base.go b/pkg/slayers/path/scion/base.go index f8960fd72..cbd0e2078 100644 --- a/pkg/slayers/path/scion/base.go +++ b/pkg/slayers/path/scion/base.go @@ -27,10 +27,17 @@ import ( //@ sl "github.com/scionproto/scion/verification/utils/slices" ) -// MetaLen is the length of the PathMetaHeader. -const MetaLen = 4 +const ( + // MaxINFs is the maximum number of info fields in a SCION path. + MaxINFs = 3 + // MaxHops is the maximum number of hop fields in a SCION path. + MaxHops = 64 -const PathType path.Type = 1 + // MetaLen is the length of the PathMetaHeader. + MetaLen = 4 + + PathType path.Type = 1 +) // @ requires path.PathPackageMem() // @ requires !path.Registered(PathType) @@ -72,21 +79,16 @@ type Base struct { } // @ requires s.NonInitMem() -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R50) -// @ ensures r != nil ==> (s.NonInitMem() && r.ErrorMem()) -// @ ensures r == nil ==> ( +// @ preserves acc(sl.Bytes(data, 0, len(data)), R50) +// @ ensures r != nil ==> +// @ s.NonInitMem() && r.ErrorMem() +// @ ensures r == nil ==> // @ s.Mem() && -// @ let lenD := len(data) in -// @ MetaLen <= lenD && -// @ let b0 := sl.GetByte(data, 0, lenD, 0) in -// @ let b1 := sl.GetByte(data, 0, lenD, 1) in -// @ let b2 := sl.GetByte(data, 0, lenD, 2) in -// @ let b3 := sl.GetByte(data, 0, lenD, 3) in -// @ let line := binary.BigEndian.Uint32Spec(b0, b1, b2, b3) in -// @ let metaHdr := DecodedFrom(line) in -// @ metaHdr == s.GetMetaHdr() && -// @ s.InfsMatchHfs()) +// @ s.GetBase().WeaklyValid() && +// @ s.DecodeFromBytesSpec(data) // @ ensures len(data) < MetaLen ==> r != nil +// posts for IO: +// @ ensures r == nil ==> s.GetBase().EqAbsHeader(data) // @ decreases func (s *Base) DecodeFromBytes(data []byte) (r error) { // PathMeta takes care of bounds check. @@ -142,6 +144,15 @@ func (s *Base) DecodeFromBytes(data []byte) (r error) { //@ assume int(s.PathMeta.SegLen[i]) >= 0 s.NumHops += int(s.PathMeta.SegLen[i]) } + // We must check the validity of NumHops. It is possible to fit more than 64 hops in + // the length of a scion header. Yet a path of more than 64 hops cannot be followed to + // the end because CurrHF is only 6 bits long. + if s.NumHops > MaxHops { + //@ defer fold s.NonInitMem() + return serrors.New("NumHops too large", "NumHops", s.NumHops, "Maximum", MaxHops) + } + //@ assert s.PathMeta.EqAbsHeader(data) + //@ assert s.EqAbsHeader(data) //@ fold s.Mem() return nil } @@ -153,8 +164,8 @@ func (s *Base) DecodeFromBytes(data []byte) (r error) { // @ old(int(s.GetCurrHF()) >= s.GetNumHops()-1)) // @ ensures e == nil ==> ( // @ s.Mem() && -// @ let oldBase := old(unfolding s.Mem() in *s) in -// @ let newBase := (unfolding s.Mem() in *s) in +// @ let oldBase := old(s.GetBase()) in +// @ let newBase := s.GetBase() in // @ newBase == oldBase.IncPathSpec()) // @ ensures e != nil ==> (s.NonInitMem() && e.ErrorMem()) // @ decreases @@ -178,7 +189,7 @@ func (s *Base) IncPath() (e error) { // IsXover returns whether we are at a crossover point. // @ preserves acc(s.Mem(), R45) -// @ ensures r == s.IsXoverSpec() +// @ ensures r == s.GetBase().IsXoverSpec() // @ decreases func (s *Base) IsXover() (r bool) { //@ unfold acc(s.Mem(), R45) @@ -212,11 +223,12 @@ func (s *Base) infIndexForHF(hf uint8) (r uint8) { } } -// Len returns the length of the path in bytes. +// Len returns the length of the path in bytes. That is, the number of byte required to +// store it, based on the metadata. The actual number of bytes available to contain it +// can be inferred from the common header field HdrLen. It may or may not be consistent. // @ pure // @ requires acc(s.Mem(), _) // @ ensures r >= MetaLen -// @ ensures r == (unfolding acc(s.Mem(), _) in (MetaLen + int(s.NumINF)*path.InfoLen + int(s.NumHops)*path.HopLen)) // @ decreases func (s *Base) Len() (r int) { return /*@ unfolding acc(s.Mem(), _) in @*/ MetaLen + s.NumINF*path.InfoLen + s.NumHops*path.HopLen @@ -240,20 +252,10 @@ type MetaHdr struct { // DecodeFromBytes populates the fields from a raw buffer. The buffer must be of length >= // scion.MetaLen. // @ preserves acc(m) -// @ preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R50) +// @ preserves acc(sl.Bytes(raw, 0, len(raw)), R50) // @ ensures (len(raw) >= MetaLen) == (e == nil) -// @ ensures e == nil ==> ( -// @ MetaLen <= len(raw) && -// @ 0 <= m.CurrINF && m.CurrINF <= 3 && -// @ 0 <= m.CurrHF && m.CurrHF < 64 && -// @ m.SegsInBounds() && -// @ let lenR := len(raw) in -// @ let b0 := sl.GetByte(raw, 0, lenR, 0) in -// @ let b1 := sl.GetByte(raw, 0, lenR, 1) in -// @ let b2 := sl.GetByte(raw, 0, lenR, 2) in -// @ let b3 := sl.GetByte(raw, 0, lenR, 3) in -// @ let line := binary.BigEndian.Uint32Spec(b0, b1, b2, b3) in -// @ DecodedFrom(line) == *m) +// @ ensures e == nil ==> m.InBounds() +// @ ensures e == nil ==> m.DecodeFromBytesSpec(raw) // @ ensures e != nil ==> e.ErrorMem() // @ decreases func (m *MetaHdr) DecodeFromBytes(raw []byte) (e error) { @@ -261,7 +263,7 @@ func (m *MetaHdr) DecodeFromBytes(raw []byte) (e error) { // (VerifiedSCION) added cast, otherwise Gobra cannot verify call return serrors.New("MetaHdr raw too short", "expected", int(MetaLen), "actual", int(len(raw))) } - //@ unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R50) + //@ unfold acc(sl.Bytes(raw, 0, len(raw)), R50) line := binary.BigEndian.Uint32(raw) m.CurrINF = uint8(line >> 30) m.CurrHF = uint8(line>>24) & 0x3F @@ -273,7 +275,7 @@ func (m *MetaHdr) DecodeFromBytes(raw []byte) (e error) { //@ bit.And3fAtMost64(uint8(line>>12)) //@ bit.And3fAtMost64(uint8(line>>6)) //@ bit.And3fAtMost64(uint8(line)) - //@ fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R50) + //@ fold acc(sl.Bytes(raw, 0, len(raw)), R50) return nil } @@ -281,27 +283,22 @@ func (m *MetaHdr) DecodeFromBytes(raw []byte) (e error) { // scion.MetaLen. // @ requires len(b) >= MetaLen // @ preserves acc(m, R50) -// @ preserves sl.AbsSlice_Bytes(b, 0, len(b)) +// @ preserves sl.Bytes(b, 0, len(b)) // @ ensures e == nil -// @ ensures let lenR := len(b) in -// @ let b0 := sl.GetByte(b, 0, lenR, 0) in -// @ let b1 := sl.GetByte(b, 0, lenR, 1) in -// @ let b2 := sl.GetByte(b, 0, lenR, 2) in -// @ let b3 := sl.GetByte(b, 0, lenR, 3) in -// @ let v := m.SerializedToLine() in -// @ binary.BigEndian.PutUint32Spec(b0, b1, b2, b3, v) +// @ ensures m.SerializeToSpec(b) // @ decreases func (m *MetaHdr) SerializeTo(b []byte) (e error) { if len(b) < MetaLen { + // @ Unreachable() return serrors.New("buffer for MetaHdr too short", "expected", MetaLen, "actual", len(b)) } line := uint32(m.CurrINF)<<30 | uint32(m.CurrHF&0x3F)<<24 line |= uint32(m.SegLen[0]&0x3F) << 12 line |= uint32(m.SegLen[1]&0x3F) << 6 line |= uint32(m.SegLen[2] & 0x3F) - //@ unfold acc(sl.AbsSlice_Bytes(b, 0, len(b))) + //@ unfold acc(sl.Bytes(b, 0, len(b))) binary.BigEndian.PutUint32(b, line) - //@ fold acc(sl.AbsSlice_Bytes(b, 0, len(b))) + //@ fold acc(sl.Bytes(b, 0, len(b))) return nil } diff --git a/pkg/slayers/path/scion/base_spec.gobra b/pkg/slayers/path/scion/base_spec.gobra index b12aec326..ea5db53d5 100644 --- a/pkg/slayers/path/scion/base_spec.gobra +++ b/pkg/slayers/path/scion/base_spec.gobra @@ -20,6 +20,8 @@ import ( "encoding/binary" "github.com/scionproto/scion/pkg/slayers/path" sl "github.com/scionproto/scion/verification/utils/slices" + + . "github.com/scionproto/scion/verification/utils/definitions" ) pred (b *Base) NonInitMem() { @@ -27,21 +29,14 @@ pred (b *Base) NonInitMem() { } // This predicate is established right after DecodeFromBytes. -// Because this method does not perform any bounds checks, it -// is not possible to have assertions in this invariant about -// how the fileds of Base compare to those of its MetaHdr field. pred (b *Base) Mem() { - acc(&b.NumINF) && - acc(&b.NumHops) && - acc(&b.PathMeta.CurrINF) && - acc(&b.PathMeta.CurrHF) && - acc(&b.PathMeta.SegLen[0]) && - acc(&b.PathMeta.SegLen[1]) && - acc(&b.PathMeta.SegLen[2]) && - 0 <= b.NumINF && b.NumINF <= MaxINFs && - // the program defines 64 as the maximum number of hops, - // but this does not seem to be enforced anywhere. - 0 <= b.NumHops && // b.NumHops <= MaxHops && + acc(b) && + // In the future, we might want to drop the properties + // below, as they are already present in WeaklyValid. + // This requires a bit of refactoring to pass around the + // knowledge that WeaklyValid holds between methods. + 0 <= b.NumINF && b.NumINF <= MaxINFs && + 0 <= b.NumHops && b.NumHops <= MaxHops && (0 < b.NumINF ==> 0 < b.NumHops) } @@ -59,64 +54,59 @@ pure func (b Base) ValidCurrHfSpec() bool { ghost decreases -pure func (b Base) ValidCurrIdxsSpec() bool { +pure func (b Base) ValidCurrFieldsSpec() bool { return 0 <= b.NumINF && b.NumINF <= MaxINFs && 0 <= b.NumHops && b.NumHops <= MaxHops && - b.ValidCurrHfSpec() && - b.ValidCurrInfSpec() && - 0 <= b.PathMeta.SegLen[0] && b.PathMeta.SegLen[0] < MaxHops && - 0 <= b.PathMeta.SegLen[1] && b.PathMeta.SegLen[1] < MaxHops && - 0 <= b.PathMeta.SegLen[2] && b.PathMeta.SegLen[2] < MaxHops && - (b.NumINF == 1 ==> b.NumHops == int(b.PathMeta.SegLen[0])) && - (b.NumINF == 2 ==> b.NumHops == int(b.PathMeta.SegLen[0] + b.PathMeta.SegLen[1])) && - (b.NumINF == 3 ==> b.NumHops == int(b.PathMeta.SegLen[0] + b.PathMeta.SegLen[1] + b.PathMeta.SegLen[2])) && - (forall i int :: { b.PathMeta.SegLen[i] } 0 <= i && i < b.NumINF ==> - b.PathMeta.SegLen[i] != 0) && - (forall i int :: { b.PathMeta.SegLen[i] } b.NumINF <= i && i < MaxINFs ==> - b.PathMeta.SegLen[i] == 0) - // Surprisingly, the following does not seem to be needed - // b.PathMeta.CurrINF == b.InfForHfSpec(b.PathMeta.CurrHF) + b.ValidCurrInfSpec() && b.ValidCurrHfSpec() } +// A `Base` is weakly valid when the fields `NumINF` and `NumHops` are, +// at most, `MaxINF` and `MaxHops`, respectively, and the field `SegLen` +// is valid as well. `DecodeFromBytes` guarantees that all `Base`s +// obtained from raw bytes will be weakly valid. ghost decreases -pure func (b Base) InfsMatchHfsSpec() bool { - return 0 <= b.NumINF && b.NumINF <= 3 && - (b.NumINF == 1 ==> b.NumHops == int(b.PathMeta.SegLen[0])) && - (b.NumINF == 2 ==> b.NumHops == int(b.PathMeta.SegLen[0] + b.PathMeta.SegLen[1])) && - (b.NumINF == 3 ==> b.NumHops == int(b.PathMeta.SegLen[0] + b.PathMeta.SegLen[1] + b.PathMeta.SegLen[2])) && - (forall i int :: { b.PathMeta.SegLen[i] } 0 <= i && i < b.NumINF ==> - b.PathMeta.SegLen[i] != 0) && - (forall i int :: { b.PathMeta.SegLen[i] } b.NumINF <= i && i < MaxINFs ==> - b.PathMeta.SegLen[i] == 0) +pure func (b Base) WeaklyValid() bool { + return 0 <= b.NumINF && b.NumINF <= MaxINFs && + 0 <= b.NumHops && b.NumHops <= MaxHops && + (0 < b.NumINF ==> 0 < b.NumHops) && + b.PathMeta.InBounds() && b.NumsCompatibleWithSegLen() } +// A `Base` is valid (a.k.a fully valid) iff it is weakly valid +// and its `CurrHF` and `CurrINF` are within bounds, its `CurrHF` +// is compatible with its `CurrINF`, and there are no singleton +// segments. In the past, there used to be another validity +// criteria, stronger than WeaklyValid and weaker than FullyValid. +// This was known as StronglyValid and has been derprecated. ghost -requires acc(b.Mem(), _) decreases -pure func (b *Base) ValidCurrINF() bool { - return unfolding acc(b.Mem(), _) in (*b).ValidCurrInfSpec() +pure func (b Base) Valid() bool { + return b.WeaklyValid() && + b.ValidCurrFieldsSpec() && + b.CurrInfMatchesCurrHFSpec() && + b.PathMeta.SegLen[0] != 1 && + b.PathMeta.SegLen[1] != 1 && + b.PathMeta.SegLen[2] != 1 } ghost -requires acc(b.Mem(), _) decreases -pure func (b *Base) ValidCurrHF() bool { - return unfolding acc(b.Mem(), _) in (*b).ValidCurrHfSpec() +pure func (b Base) CurrInfMatchesCurrHFSpec() bool { + return b.PathMeta.CurrINF == b.InfForHfSpec(b.PathMeta.CurrHF) } ghost -requires acc(b.Mem(), _) decreases -pure func (b *Base) ValidCurrIdxs() bool { - return unfolding acc(b.Mem(), _) in (*b).ValidCurrIdxsSpec() -} - -ghost -requires acc(b.Mem(), _) -decreases -pure func (b *Base) InfsMatchHfs() bool { - return unfolding acc(b.Mem(), _) in (*b).InfsMatchHfsSpec() +pure func (b Base) NumsCompatibleWithSegLen() bool { + return 0 <= b.NumINF && b.NumINF <= 3 && + (b.NumINF == 1 ==> b.NumHops == int(b.PathMeta.SegLen[0])) && + (b.NumINF == 2 ==> b.NumHops == int(b.PathMeta.SegLen[0] + b.PathMeta.SegLen[1])) && + (b.NumINF == 3 ==> b.NumHops == int(b.PathMeta.SegLen[0] + b.PathMeta.SegLen[1] + b.PathMeta.SegLen[2])) && + (forall i int :: { b.PathMeta.SegLen[i] } 0 <= i && i < b.NumINF ==> + b.PathMeta.SegLen[i] != 0) && + (forall i int :: { b.PathMeta.SegLen[i] } b.NumINF <= i && i < MaxINFs ==> + b.PathMeta.SegLen[i] == 0) } ghost @@ -142,6 +132,13 @@ pure func (s *Base) GetMetaHdr() MetaHdr { return unfolding acc(s.Mem(), _) in s.PathMeta } +ghost +requires acc(s.Mem(), _) +decreases +pure func (s *Base) GetBase() Base { + return unfolding acc(s.Mem(), _) in *s +} + ghost requires acc(s.Mem(), _) decreases @@ -159,27 +156,68 @@ pure func (s Base) InfForHfSpec(hf uint8) (r uint8) { } ghost -requires acc(s.Mem(), _) decreases -pure func (s *Base) IsXoverSpec() bool { - return unfolding acc(s.Mem(), _) in ( - s.PathMeta.CurrHF+1 < uint8(s.NumHops) && - s.PathMeta.CurrINF != s.InfForHfSpec(s.PathMeta.CurrHF+1)) +pure func (s Base) IsXoverSpec() bool { + return s.PathMeta.CurrHF+1 < uint8(s.NumHops) && + s.PathMeta.CurrINF != s.InfForHfSpec(s.PathMeta.CurrHF+1) } ghost requires s.NumINF != 0 requires int(s.PathMeta.CurrHF) < s.NumHops-1 -ensures s.ValidCurrIdxsSpec() ==> res.ValidCurrIdxsSpec() +ensures s.WeaklyValid() ==> res.WeaklyValid() +ensures s.Valid() ==> res.Valid() decreases pure func (s Base) IncPathSpec() (res Base) { - return Base{ + return Base { PathMeta: MetaHdr{s.InfForHfSpec(s.PathMeta.CurrHF+1), s.PathMeta.CurrHF+1, s.PathMeta.SegLen}, NumINF: s.NumINF, NumHops: s.NumHops, } } +ghost +requires s.Valid() +ensures s.IsXoverSpec() ==> !s.IncPathSpec().IsXoverSpec() +decreases +func (s Base) NotIsXoverAfterIncPath() {} + +ghost +decreases +pure func (b Base) ReverseSpec() Base { + return Base { + PathMeta: b.ReverseMetaHdrSpec(), + NumINF: b.NumINF, + NumHops: b.NumHops, + } +} + +ghost +decreases +pure func (b Base) ReverseMetaHdrSpec() MetaHdr { + return MetaHdr { + CurrINF: uint8(b.NumINF) - b.PathMeta.CurrINF - 1, + CurrHF: uint8(b.NumHops) - b.PathMeta.CurrHF - 1, + SegLen: b.ReverseSegLen(), + } +} + +ghost +decreases +pure func (b Base) ReverseSegLen() [3]uint8 { + return (match b.NumINF { + case 2: [3]uint8{ b.PathMeta.SegLen[1], b.PathMeta.SegLen[0], b.PathMeta.SegLen[2] } + case 3: [3]uint8{ b.PathMeta.SegLen[2], b.PathMeta.SegLen[1], b.PathMeta.SegLen[0] } + default: b.PathMeta.SegLen + }) +} + +ghost +requires b.Valid() +ensures b.ReverseSpec().Valid() +decreases +func (b Base) ReversingBaseValidSegLenHasValidSegLen() { } + ghost requires b.Mem() ensures b.NonInitMem() @@ -199,12 +237,46 @@ pure func DecodedFrom(line uint32) MetaHdr { } } +ghost +requires acc(sl.Bytes(b, 0, len(b)), _) +decreases +pure func (m MetaHdr) DecodeFromBytesSpec(b []byte) bool { + return MetaLen <= len(b) && + 0 <= m.CurrINF && m.CurrINF <= 3 && + 0 <= m.CurrHF && m.CurrHF < 64 && + m.SegsInBounds() && + let lenR := len(b) in + let b0 := sl.GetByte(b, 0, lenR, 0) in + let b1 := sl.GetByte(b, 0, lenR, 1) in + let b2 := sl.GetByte(b, 0, lenR, 2) in + let b3 := sl.GetByte(b, 0, lenR, 3) in + let line := binary.BigEndian.Uint32Spec(b0, b1, b2, b3) in + DecodedFrom(line) == m +} + +ghost +requires acc(s.Mem(), _) +requires acc(sl.Bytes(b, 0, len(b)), _) +decreases +pure func (s *Base) DecodeFromBytesSpec(b []byte) bool { + return unfolding acc(s.Mem(), _) in + s.PathMeta.DecodeFromBytesSpec(b) +} + +ghost +decreases +pure func (m MetaHdr) InBounds() bool { + return 0 <= m.CurrINF && m.CurrINF <= MaxINFs && + 0 <= m.CurrHF && m.CurrHF < MaxHops && + m.SegsInBounds() +} + ghost decreases pure func (m MetaHdr) SegsInBounds() bool { - return 0 <= m.SegLen[0] && m.SegLen[0] <= 63 && - 0 <= m.SegLen[1] && m.SegLen[1] <= 63 && - 0 <= m.SegLen[2] && m.SegLen[2] <= 63 + return 0 <= m.SegLen[0] && m.SegLen[0] < MaxHops && + 0 <= m.SegLen[1] && m.SegLen[1] < MaxHops && + 0 <= m.SegLen[2] && m.SegLen[2] < MaxHops } ghost @@ -218,13 +290,38 @@ pure func (m MetaHdr) SerializedToLine() uint32 { } ghost +requires acc(sl.Bytes(b, 0, len(b)), _) decreases -pure func (m MetaHdr) InBounds() bool { - return 0 <= m.CurrINF && m.CurrINF <= 3 && - 0 <= m.CurrHF && m.CurrHF <= 63 && - 0 <= m.SegLen[0] && m.SegLen[0] <= 63 && - 0 <= m.SegLen[1] && m.SegLen[1] <= 63 && - 0 <= m.SegLen[2] && m.SegLen[2] <= 63 +pure func (m MetaHdr) SerializeToSpec(b []byte) bool { + return MetaLen <= len(b) && + let lenR := len(b) in + let b0 := sl.GetByte(b, 0, lenR, 0) in + let b1 := sl.GetByte(b, 0, lenR, 1) in + let b2 := sl.GetByte(b, 0, lenR, 2) in + let b3 := sl.GetByte(b, 0, lenR, 3) in + let v := m.SerializedToLine() in + binary.BigEndian.PutUint32Spec(b0, b1, b2, b3, v) +} + +ghost +requires acc(sl.Bytes(ub, 0, len(ub)), R56) +decreases +pure func (s Base) EqAbsHeader(ub []byte) bool { + // we compute the sublice ub[:MetaLen] inside this function instead + // of expecting the correct subslice to be passed, otherwise this function + // becomes too cumbersome to use in calls from (*Raw).EqAbsHeader due to the + // lack of a folding expression. Same goes for MetaHdr.EqAbsHeader. + return MetaLen <= len(ub) && + s == RawBytesToBase(ub) +} + +ghost +requires acc(sl.Bytes(ub, 0, len(ub)), _) +decreases +pure func (s MetaHdr) EqAbsHeader(ub []byte) bool { + return MetaLen <= len(ub) && + unfolding acc(sl.Bytes(ub, 0, len(ub)), _) in + s == DecodedFrom(binary.BigEndian.Uint32(ub[:MetaLen])) } /** Lemma proven in /VerifiedSCION/verification/utils/bitwise/proofs.dfy **/ diff --git a/pkg/slayers/path/scion/base_spec_test.gobra b/pkg/slayers/path/scion/base_spec_test.gobra index 5d07d6a53..9516e2cf6 100644 --- a/pkg/slayers/path/scion/base_spec_test.gobra +++ b/pkg/slayers/path/scion/base_spec_test.gobra @@ -23,11 +23,4 @@ import ( func canAllocateBase() { b := &Base{} fold b.Mem() -} - -ghost -ensures res -decreases -pure func validMetaLenInPath() (res bool) { - return MetaLen == path.MetaLen } \ No newline at end of file diff --git a/pkg/slayers/path/scion/decoded.go b/pkg/slayers/path/scion/decoded.go index 6af13f001..27eb6e8d3 100644 --- a/pkg/slayers/path/scion/decoded.go +++ b/pkg/slayers/path/scion/decoded.go @@ -24,13 +24,6 @@ import ( //@ sl "github.com/scionproto/scion/verification/utils/slices" ) -const ( - // MaxINFs is the maximum number of info fields in a SCION path. - MaxINFs = 3 - // MaxHops is the maximum number of hop fields in a SCION path. - MaxHops = 64 -) - // Decoded implements the SCION (data-plane) path type. Decoded is intended to be used in // non-performance critical code paths, where the convenience of having a fully parsed path trumps // the loss of performance. @@ -44,7 +37,7 @@ type Decoded struct { // DecodeFromBytes fully decodes the SCION path into the corresponding fields. // @ requires s.NonInitMem() -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R42) // @ ensures r == nil ==> ( // @ s.Mem(data) && // @ let lenD := len(data) in @@ -55,8 +48,8 @@ type Decoded struct { // @ let b3 := sl.GetByte(data, 0, lenD, 3) in // @ let line := binary.BigEndian.Uint32Spec(b0, b1, b2, b3) in // @ let metaHdr := DecodedFrom(line) in -// @ metaHdr == s.GetMetaHdr(data) && -// @ s.InfsMatchHfs(data)) +// @ metaHdr == s.GetMetaHdr(data)) +// @ ensures r == nil ==> s.GetBase(data).WeaklyValid() // @ ensures r != nil ==> (r.ErrorMem() && s.NonInitMem()) // @ decreases func (s *Decoded) DecodeFromBytes(data []byte) (r error) { @@ -75,7 +68,7 @@ func (s *Decoded) DecodeFromBytes(data []byte) (r error) { offset := MetaLen s.InfoFields = make([]path.InfoField, ( /*@ unfolding s.Base.Mem() in @*/ s.NumINF)) //@ assert len(data) >= MetaLen + s.Base.GetNumINF() * path.InfoLen + s.Base.GetNumHops() * path.HopLen - //@ sl.SplitByIndex_Bytes(data, 0, len(data), offset, R41) + //@ sl.SplitByIndex_Bytes(data, 0, len(data), offset, R43) //@ invariant acc(&s.InfoFields) //@ invariant acc(s.Base.Mem(), R1) @@ -84,20 +77,20 @@ func (s *Decoded) DecodeFromBytes(data []byte) (r error) { //@ invariant len(data) >= MetaLen + s.Base.GetNumINF() * path.InfoLen + s.Base.GetNumHops() * path.HopLen //@ invariant offset == MetaLen + i * path.InfoLen //@ invariant forall j int :: { &s.InfoFields[j] } 0 <= j && j < s.Base.GetNumINF() ==> acc(&s.InfoFields[j]) - //@ invariant acc(sl.AbsSlice_Bytes(data, 0, offset), R41) - //@ invariant acc(sl.AbsSlice_Bytes(data, offset, len(data)), R41) + //@ invariant acc(sl.Bytes(data, 0, offset), R43) + //@ invariant acc(sl.Bytes(data, offset, len(data)), R43) //@ decreases s.Base.GetNumINF() - i for i := 0; i < /*@ unfolding acc(s.Base.Mem(), _) in @*/ s.NumINF; i++ { - //@ sl.SplitByIndex_Bytes(data, offset, len(data), offset + path.InfoLen, R41) - //@ sl.Reslice_Bytes(data, offset, offset + path.InfoLen, R41) + //@ sl.SplitByIndex_Bytes(data, offset, len(data), offset + path.InfoLen, R43) + //@ sl.Reslice_Bytes(data, offset, offset + path.InfoLen, R43) if err := s.InfoFields[i].DecodeFromBytes(data[offset : offset+path.InfoLen]); err != nil { // (VerifiedSCION) infofield.DecodeFromBytes guarantees that err == nil. // Thus, this branch is not reachable. return err } //@ assert len(data[offset:offset+path.InfoLen]) == path.InfoLen - //@ sl.Unslice_Bytes(data, offset, offset + path.InfoLen, R41) - //@ sl.CombineAtIndex_Bytes(data, 0, offset + path.InfoLen, offset, R41) + //@ sl.Unslice_Bytes(data, offset, offset + path.InfoLen, R43) + //@ sl.CombineAtIndex_Bytes(data, 0, offset + path.InfoLen, offset, R43) offset += path.InfoLen } s.HopFields = make([]path.HopField, ( /*@ unfolding s.Base.Mem() in @*/ s.NumHops)) @@ -109,23 +102,23 @@ func (s *Decoded) DecodeFromBytes(data []byte) (r error) { //@ invariant forall j int :: { &s.HopFields[j] } 0 <= j && j < i ==> s.HopFields[j].Mem() //@ invariant len(data) >= MetaLen + s.Base.GetNumINF() * path.InfoLen + s.Base.GetNumHops() * path.HopLen //@ invariant offset == MetaLen + s.Base.GetNumINF() * path.InfoLen + i * path.HopLen - //@ invariant acc(sl.AbsSlice_Bytes(data, 0, offset), R41) - //@ invariant acc(sl.AbsSlice_Bytes(data, offset, len(data)), R41) + //@ invariant acc(sl.Bytes(data, 0, offset), R43) + //@ invariant acc(sl.Bytes(data, offset, len(data)), R43) //@ decreases s.Base.GetNumHops() - i for i := 0; i < /*@ unfolding acc(s.Base.Mem(), R2) in @*/ s.NumHops; i++ { - //@ sl.SplitByIndex_Bytes(data, offset, len(data), offset + path.HopLen, R41) - //@ sl.Reslice_Bytes(data, offset, offset + path.HopLen, R41) + //@ sl.SplitByIndex_Bytes(data, offset, len(data), offset + path.HopLen, R43) + //@ sl.Reslice_Bytes(data, offset, offset + path.HopLen, R43) if err := s.HopFields[i].DecodeFromBytes(data[offset : offset+path.HopLen]); err != nil { // (VerifiedSCION) infofield.DecodeFromBytes guarantees that err == nil. // Thus, this branch should not be reachable. return err } //@ assert len(data[offset:offset+path.HopLen]) == path.HopLen - //@ sl.Unslice_Bytes(data, offset, offset + path.HopLen, R41) - //@ sl.CombineAtIndex_Bytes(data, 0, offset + path.HopLen, offset, R41) + //@ sl.Unslice_Bytes(data, offset, offset + path.HopLen, R43) + //@ sl.CombineAtIndex_Bytes(data, 0, offset + path.HopLen, offset, R43) offset += path.HopLen } - //@ sl.CombineAtIndex_Bytes(data, 0, len(data), offset, R41) + //@ sl.CombineAtIndex_Bytes(data, 0, len(data), offset, R43) //@ fold s.Mem(data) return nil } @@ -133,8 +126,8 @@ func (s *Decoded) DecodeFromBytes(data []byte) (r error) { // SerializeTo writePerms the path to a slice. The slice must be big enough to hold the entire data, // otherwise an error is returned. // @ preserves acc(s.Mem(ubuf), R1) -// @ preserves sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) -// @ preserves b !== ubuf ==> sl.AbsSlice_Bytes(b, 0, len(b)) +// @ preserves sl.Bytes(ubuf, 0, len(ubuf)) +// @ preserves b !== ubuf ==> sl.Bytes(b, 0, len(b)) // @ ensures r != nil ==> r.ErrorMem() // @ decreases func (s *Decoded) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { @@ -143,7 +136,7 @@ func (s *Decoded) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { "actual", len(b)) } //@ unfold acc(s.Mem(ubuf), R1) - //@ assert sl.AbsSlice_Bytes(b, 0, len(b)) + //@ assert sl.Bytes(b, 0, len(b)) //@ sl.SplitByIndex_Bytes(b, 0, len(b), MetaLen, writePerm) //@ sl.Reslice_Bytes(b, 0, MetaLen, writePerm) //@ unfold acc(s.Base.Mem(), R1) @@ -158,9 +151,9 @@ func (s *Decoded) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { offset := MetaLen //@ invariant acc(s.Mem(ubuf), R1) - //@ invariant sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) - //@ invariant b !== ubuf ==> sl.AbsSlice_Bytes(b, 0, len(b)) - //@ invariant s.Len(ubuf) <= len(b) + //@ invariant sl.Bytes(ubuf, 0, len(ubuf)) + //@ invariant b !== ubuf ==> sl.Bytes(b, 0, len(b)) + //@ invariant s.LenSpec(ubuf) <= len(b) //@ invariant 0 <= i && i <= s.getLenInfoFields(ubuf) //@ invariant offset == MetaLen + i * path.InfoLen //@ invariant MetaLen + s.getLenInfoFields(ubuf) * path.InfoLen + s.getLenHopFields(ubuf) * path.HopLen <= len(b) @@ -173,7 +166,7 @@ func (s *Decoded) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { //@ sl.SplitByIndex_Bytes(b, 0, len(b), offset, writePerm) //@ sl.SplitByIndex_Bytes(b, offset, len(b), offset + path.InfoLen, writePerm) //@ sl.Reslice_Bytes(b, offset, offset + path.InfoLen, writePerm) - //@ assert sl.AbsSlice_Bytes(b[offset:offset+path.InfoLen], 0, path.InfoLen) + //@ assert sl.Bytes(b[offset:offset+path.InfoLen], 0, path.InfoLen) if err := info.SerializeTo(b[offset : offset+path.InfoLen]); err != nil { //@ Unreachable() return err @@ -185,9 +178,9 @@ func (s *Decoded) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { offset += path.InfoLen } //@ invariant acc(s.Mem(ubuf), R1) - //@ invariant sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) - //@ invariant b !== ubuf ==> sl.AbsSlice_Bytes(b, 0, len(b)) - //@ invariant s.Len(ubuf) <= len(b) + //@ invariant sl.Bytes(ubuf, 0, len(ubuf)) + //@ invariant b !== ubuf ==> sl.Bytes(b, 0, len(b)) + //@ invariant s.LenSpec(ubuf) <= len(b) //@ invariant 0 <= i && i <= s.getLenHopFields(ubuf) //@ invariant offset == MetaLen + s.getLenInfoFields(ubuf) * path.InfoLen + i * path.HopLen //@ invariant MetaLen + s.getLenInfoFields(ubuf) * path.InfoLen + s.getLenHopFields(ubuf) * path.HopLen <= len(b) @@ -220,11 +213,22 @@ func (s *Decoded) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { // @ p.Mem(ubuf) && // @ p == s && // @ typeOf(p) == type[*Decoded] && -// @ (old(s.ValidCurrIdxs(ubuf)) ==> s.ValidCurrIdxs(ubuf))) +// @ (old(s.GetBase(ubuf).Valid()) ==> s.GetBase(ubuf).Valid())) // @ ensures r != nil ==> r.ErrorMem() && s.Mem(ubuf) // @ decreases func (s *Decoded) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, r error) { - //@ ghost isValid := s.ValidCurrIdxs(ubuf) + //@ ghost isValid := s.GetBase(ubuf).Valid() + //@ ghost base := s.GetBase(ubuf) + //@ ghost metaHdrAferReversingSegLen := MetaHdr { + //@ CurrINF: base.PathMeta.CurrINF, + //@ CurrHF: base.PathMeta.CurrHF, + //@ SegLen: base.ReverseSegLen(), + //@ } + //@ ghost baseAfterReversingSegLen := Base { + //@ PathMeta: metaHdrAferReversingSegLen, + //@ NumINF: base.NumINF, + //@ NumHops: base.NumHops, + //@ } //@ unfold s.Mem(ubuf) //@ unfold s.Base.Mem() if s.NumINF == 0 { @@ -232,35 +236,20 @@ func (s *Decoded) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, r error) { //@ fold s.Mem(ubuf) return nil, serrors.New("empty decoded path is invalid and cannot be reversed") } - //@ fold s.Base.Mem() - //@ fold s.Mem(ubuf) - //@ ghost base := s.GetBase(ubuf) // Reverse order of InfoFields and SegLens - //@ invariant s.Mem(ubuf) - //@ invariant isValid ==> s.ValidCurrIdxs(ubuf) - //@ invariant 0 <= i && i < s.GetNumINF(ubuf) - //@ invariant 0 <= j && j < s.GetNumINF(ubuf) - //@ decreases j-i - for i, j := 0, ( /*@ unfolding s.Mem(ubuf) in (unfolding s.Base.Mem() in @*/ s.NumINF - 1 /*@) @*/); i < j; i, j = i+1, j-1 { - //@ unfold s.Mem(ubuf) - s.InfoFields[i], s.InfoFields[j] = s.InfoFields[j], s.InfoFields[i] - //@ unfold s.Base.Mem() - s.PathMeta.SegLen[i], s.PathMeta.SegLen[j] = s.PathMeta.SegLen[j], s.PathMeta.SegLen[i] - //@ fold s.Base.Mem() - //@ fold s.Mem(ubuf) + if s.NumINF > 1 { + lastIdx := s.NumINF - 1 + s.InfoFields[0], s.InfoFields[lastIdx] = s.InfoFields[lastIdx], s.InfoFields[0] + s.PathMeta.SegLen[0], s.PathMeta.SegLen[lastIdx] = s.PathMeta.SegLen[lastIdx], s.PathMeta.SegLen[0] } - //@ preserves s.Mem(ubuf) - //@ preserves isValid ==> s.ValidCurrIdxs(ubuf) - //@ decreases - //@ outline( - //@ unfold s.Mem(ubuf) + //@ fold s.Base.Mem() //@ invariant acc(s.Base.Mem(), R10) //@ invariant 0 <= i && i <= s.Base.GetNumINF() //@ invariant acc(&s.InfoFields, R10) //@ invariant len(s.InfoFields) == s.Base.GetNumINF() - //@ invariant forall i int :: { &s.InfoFields[i] } 0 <= i && i < len(s.InfoFields) ==> (acc(&s.InfoFields[i].ConsDir)) - //@ invariant isValid ==> s.Base.ValidCurrIdxs() + //@ invariant forall i int :: { &s.InfoFields[i] } 0 <= i && i < len(s.InfoFields) ==> + //@ (acc(&s.InfoFields[i].ConsDir)) //@ decreases MaxINFs-i // Reverse cons dir flags for i := 0; i < ( /*@ unfolding acc(s.Base.Mem(), R11) in @*/ s.NumINF); i++ { @@ -268,13 +257,12 @@ func (s *Decoded) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, r error) { info.ConsDir = !info.ConsDir } //@ fold s.Mem(ubuf) - //@ ) // Reverse order of hop fields //@ invariant s.Mem(ubuf) //@ invariant 0 <= i && i <= s.GetNumHops(ubuf) //@ invariant -1 <= j && j < s.GetNumHops(ubuf) - //@ invariant isValid ==> s.ValidCurrIdxs(ubuf) + //@ invariant s.GetBase(ubuf) == baseAfterReversingSegLen //@ decreases j-i for i, j := 0, ( /*@ unfolding s.Mem(ubuf) in (unfolding s.Base.Mem() in @*/ s.NumHops - 1 /*@ ) @*/); i < j; i, j = i+1, j-1 { //@ unfold s.Mem(ubuf) @@ -289,23 +277,21 @@ func (s *Decoded) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, r error) { //@ fold s.Mem(ubuf) } // Update CurrINF and CurrHF and SegLens - //@ preserves s.Mem(ubuf) - //@ preserves isValid ==> s.ValidCurrIdxs(ubuf) - //@ decreases - //@ outline( //@ unfold s.Mem(ubuf) //@ unfold s.Base.Mem() s.PathMeta.CurrINF = uint8(s.NumINF) - s.PathMeta.CurrINF - 1 s.PathMeta.CurrHF = uint8(s.NumHops) - s.PathMeta.CurrHF - 1 + //@ assert s.Base == base.ReverseSpec() + //@ ghost if isValid { base.ReversingBaseValidSegLenHasValidSegLen() } + //@ assert isValid ==> s.Base.Valid() //@ fold s.Base.Mem() //@ fold s.Mem(ubuf) - //@ ) return s, nil } // ToRaw tranforms scion.Decoded into scion.Raw. // @ preserves s.Mem(ubuf1) -// @ preserves sl.AbsSlice_Bytes(ubuf1, 0, len(ubuf1)) +// @ preserves sl.Bytes(ubuf1, 0, len(ubuf1)) // @ ensures err == nil ==> r.Mem(ubuf2) // @ ensures err != nil ==> err.ErrorMem() // @ decreases @@ -314,7 +300,7 @@ func (s *Decoded) ToRaw( /*@ ghost ubuf1 []byte @*/ ) (r *Raw, err error /*@, gh // make cannot contain ghost subexpressions tmp := s.Len( /*@ ubuf1 @*/ ) b := make([]byte, tmp) - //@ fold sl.AbsSlice_Bytes(b, 0, len(b)) + //@ fold sl.Bytes(b, 0, len(b)) if err := s.SerializeTo(b /*@, ubuf1 @*/); err != nil { return nil, err /*@, b @*/ } diff --git a/pkg/slayers/path/scion/decoded_spec.gobra b/pkg/slayers/path/scion/decoded_spec.gobra index fda3419f3..34e98ca0c 100644 --- a/pkg/slayers/path/scion/decoded_spec.gobra +++ b/pkg/slayers/path/scion/decoded_spec.gobra @@ -22,6 +22,13 @@ import ( "github.com/scionproto/scion/verification/utils/slices" ) +ghost +pure +decreases +func (p *Decoded) IsValidResultOfDecoding(b []byte, err error) (res bool) { + return true +} + (*Decoded) implements path.Path /**** Predicates ****/ @@ -51,16 +58,22 @@ pred (d *Decoded) Mem(ubuf []byte) { * Unfortunately, Gobra does not fully support them yet, so we * introduced this method which acts as a wrapper. */ - // TODO: can this spec be simplified (by removing the access to d.Mem(...))? -pure -requires acc(d.Mem(ubuf), _) -ensures unfolding acc(d.Mem(ubuf), _) in l == d.Base.Len() -ensures l >= 0 +preserves acc(d.Mem(ubuf), R50) +ensures l == d.LenSpec(ubuf) decreases func (d *Decoded) Len(ghost ubuf []byte) (l int) { return unfolding acc(d.Mem(ubuf), _) in d.Base.Len() } +ghost +pure +requires acc(d.Mem(ub), _) +ensures unfolding acc(d.Mem(ub), _) in l == d.Base.Len() +decreases +func (d *Decoded) LenSpec(ghost ub []byte) (l int) { + return unfolding acc(d.Mem(ub), _) in d.Base.Len() +} + /** * This method is not part of the original SCION codebase. * Instead, `Len` was defined in `*Decoded` via embedded structs. @@ -99,8 +112,8 @@ func (d *Decoded) IsXover(ghost ubuf []byte) bool { requires d.Mem(ubuf) ensures e == nil ==> ( d.Mem(ubuf) && - d.Len(ubuf) == old(d.Len(ubuf)) && - (old(d.ValidCurrIdxs(ubuf)) ==> d.ValidCurrIdxs(ubuf))) + d.LenSpec(ubuf) == old(d.LenSpec(ubuf)) && + (old(d.GetBase(ubuf).Valid()) ==> d.GetBase(ubuf).Valid())) ensures e != nil ==> d.NonInitMem() && e.ErrorMem() decreases func (d *Decoded) IncPath(ghost ubuf []byte) (e error) { @@ -114,27 +127,6 @@ func (d *Decoded) IncPath(ghost ubuf []byte) (e error) { return e } -ghost -requires acc(d.Mem(ub), _) -decreases -pure func (d *Decoded) ValidCurrINF(ub []byte) bool { - return unfolding acc(d.Mem(ub), _) in d.Base.ValidCurrINF() -} - -ghost -requires acc(d.Mem(ub), _) -decreases -pure func (d *Decoded) ValidCurrHF(ub []byte) bool { - return unfolding acc(d.Mem(ub), _) in d.Base.ValidCurrHF() -} - -ghost -requires acc(d.Mem(ub), _) -decreases -pure func (d *Decoded) ValidCurrIdxs(ub []byte) bool { - return unfolding acc(d.Mem(ub), _) in d.Base.ValidCurrIdxs() -} - ghost requires acc(d.Mem(ub), _) decreases @@ -156,13 +148,6 @@ pure func (s *Decoded) GetMetaHdr(ub []byte) MetaHdr { return unfolding acc(s.Mem(ub), _) in s.Base.GetMetaHdr() } -ghost -requires acc(d.Mem(ub), _) -decreases -pure func (d *Decoded) InfsMatchHfs(ub []byte) bool { - return unfolding acc(d.Mem(ub), _) in d.Base.InfsMatchHfs() -} - /**** End of Stubs ****/ /**** Auxiliary Functions ****/ diff --git a/pkg/slayers/path/scion/decoded_spec_test.gobra b/pkg/slayers/path/scion/decoded_spec_test.gobra index a794b2dcb..4ea0575aa 100644 --- a/pkg/slayers/path/scion/decoded_spec_test.gobra +++ b/pkg/slayers/path/scion/decoded_spec_test.gobra @@ -18,7 +18,7 @@ package scion import ( "github.com/scionproto/scion/pkg/slayers/path" - "github.com/scionproto/scion/verification/utils/slices" + sl "github.com/scionproto/scion/verification/utils/slices" ) func testAllocateNonInitDecoded() { @@ -32,6 +32,6 @@ func testAllocateDecoded() { fold d.Base.Mem() assert d.Base.Len() == MetaLen b := make([]byte, MetaLen) - fold slices.AbsSlice_Bytes(b, 0, MetaLen) + fold sl.Bytes(b, 0, MetaLen) fold d.Mem(b) } diff --git a/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra b/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra new file mode 100644 index 000000000..04aa00308 --- /dev/null +++ b/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra @@ -0,0 +1,699 @@ +// Copyright 2022 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package scion + +import ( + "github.com/scionproto/scion/pkg/slayers/path" + . "verification/utils/definitions" + sl "verification/utils/slices" + "verification/io" +) + +/*** This file contains helpful lemmas for proving SetInfoField and SetHopfield. ***/ +// Our abstract translation functions (CurrSeg, LeftSeg, RightSeg, MidSeg) are defined based on the +// entire byte slice of the concrete packet. This approach makes proving updates to the bytes very difficult. +// In this file, we introduce new translation functions that rely only on the hopfields byte slice and +// the infofield of a segment. We prove that these new functions are equivalent to the original ones +// and can be translated to each other. With these new functions, the proofs for SetInfoField and SetHopfield +// are greatly simplified. + + +// InfofieldByteSlice returns the byte slice of the infofield corresponding to the +// specified currInfIdx argument. Although a packet can have only three infofields, +// we use currInfIdx == 4 to represent the first infofield in our translation from +// concrete packets to abstract packets. This requires the special case that +// currInfIdx == 4 returns the same as currInfIdx == 0. +ghost +requires 0 <= currInfIdx +requires path.InfoFieldOffset(currInfIdx, MetaLen) + path.InfoLen <= len(raw) +decreases +pure func InfofieldByteSlice(raw []byte, currInfIdx int) ([]byte) { + return let infOffset := currInfIdx == 4 ? + path.InfoFieldOffset(0, MetaLen) : + path.InfoFieldOffset(currInfIdx, MetaLen) in + raw[infOffset:infOffset + path.InfoLen] +} + +// HopfieldsStartIdx returns index of the first byte of the hopfields of a segment +// specified by the currInfIdx argument. Although a packet can have only three segments, +// we use currInfIdx == 4 to represent the first segment in our translation from +// concrete packets to abstract packets. This requires the special case that +// currInfIdx == 4 returns the same as currInfIdx == 0. +ghost +requires segs.Valid() +requires 0 <= currInfIdx +decreases +pure func HopfieldsStartIdx(currInfIdx int, segs io.SegLens) int { + return let numInf := segs.NumInfoFields() in + let infOffset := path.InfoFieldOffset(numInf, MetaLen) in + (currInfIdx == 0 || currInfIdx == 4) ? infOffset : + currInfIdx == 1 ? infOffset + segs.Seg1Len * path.HopLen : + infOffset + (segs.Seg1Len + segs.Seg2Len) * path.HopLen +} + +// HopfieldsStartIdx returns index of the last byte of the hopfields of a segment +// specified by the currInfIdx argument. Although a packet can have only three segments, +// we use currInfIdx == 4 to represent the first segment in our translation from +// concrete packets to abstract packets. This requires the special case that +// currInfIdx == 4 returns the same as currInfIdx == 0. +ghost +requires segs.Valid() +requires 0 <= currInfIdx +decreases +pure func HopfieldsEndIdx(currInfIdx int, segs io.SegLens) int { + return let numInf := segs.NumInfoFields() in + let infOffset := path.InfoFieldOffset(numInf, MetaLen) in + (currInfIdx == 0 || currInfIdx == 4) ? infOffset + segs.Seg1Len * path.HopLen : + currInfIdx == 1 ? infOffset + (segs.Seg1Len + segs.Seg2Len) * path.HopLen : + infOffset + (segs.Seg1Len + segs.Seg2Len + segs.Seg3Len) * path.HopLen +} + +// HopfieldsStartIdx returns returns the byte slice of the hopfields of a segment +// specified by the currInfIdx argument. Although a packet can have only three segments, +// we use currInfIdx == 4 to represent the first segment in our translation from +// concrete packets to abstract packets. This requires the special case that +// currInfIdx == 4 returns the same as currInfIdx == 0. +ghost +requires segs.Valid() +requires 0 <= currInfIdx +requires PktLen(segs, MetaLen) <= len(raw) +decreases +pure func HopfieldsByteSlice(raw []byte, currInfIdx int, segs io.SegLens) ([]byte) { + return let numInf := segs.NumInfoFields() in + let infOffset := path.InfoFieldOffset(numInf, MetaLen) in + let start := HopfieldsStartIdx(currInfIdx, segs) in + let end := HopfieldsEndIdx(currInfIdx, segs) in + raw[start:end] +} + +// SliceBytesIntoSegments splits the raw bytes of a packet into its hopfield segments +ghost +requires 0 < p +requires segs.Valid() +requires PktLen(segs, MetaLen) <= len(raw) +requires acc(sl.Bytes(raw, 0, len(raw)), p) +ensures acc(sl.Bytes(raw[:HopfieldsStartIdx(0, segs)], 0, HopfieldsStartIdx(0, segs)), p) +ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 0, segs), 0, segs.Seg1Len * path.HopLen), p) +ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 1, segs), 0, segs.Seg2Len * path.HopLen), p) +ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 2, segs), 0, segs.Seg3Len * path.HopLen), p) +ensures acc(sl.Bytes(raw[HopfieldsEndIdx(2, segs):], 0, len(raw[HopfieldsEndIdx(2, segs):])), p) +decreases +func SliceBytesIntoSegments(raw []byte, segs io.SegLens, p perm) { + sl.SplitByIndex_Bytes(raw, 0, len(raw), HopfieldsStartIdx(0, segs), p) + sl.SplitByIndex_Bytes(raw, HopfieldsStartIdx(0, segs), len(raw), HopfieldsEndIdx(0, segs), p) + sl.SplitByIndex_Bytes(raw, HopfieldsStartIdx(1, segs), len(raw), HopfieldsEndIdx(1, segs), p) + sl.SplitByIndex_Bytes(raw, HopfieldsStartIdx(2, segs), len(raw), HopfieldsEndIdx(2, segs), p) + sl.Reslice_Bytes(raw, 0, HopfieldsStartIdx(0, segs), p) + sl.Reslice_Bytes(raw, HopfieldsStartIdx(0, segs), HopfieldsEndIdx(0, segs), p) + sl.Reslice_Bytes(raw, HopfieldsStartIdx(1, segs), HopfieldsEndIdx(1, segs), p) + sl.Reslice_Bytes(raw, HopfieldsStartIdx(2, segs), HopfieldsEndIdx(2, segs), p) + sl.Reslice_Bytes(raw, HopfieldsEndIdx(2, segs), len(raw), p) +} + +// CombineBytesFromSegments combines the three hopfield segments of a packet into a single slice of bytes. +ghost +requires 0 < p +requires segs.Valid() +requires PktLen(segs, MetaLen) <= len(raw) +requires acc(sl.Bytes(raw[:HopfieldsStartIdx(0, segs)], 0, HopfieldsStartIdx(0, segs)), p) +requires acc(sl.Bytes(HopfieldsByteSlice(raw, 0, segs), 0, segs.Seg1Len*path.HopLen), p) +requires acc(sl.Bytes(HopfieldsByteSlice(raw, 1, segs), 0, segs.Seg2Len*path.HopLen), p) +requires acc(sl.Bytes(HopfieldsByteSlice(raw, 2, segs), 0, segs.Seg3Len*path.HopLen), p) +requires acc(sl.Bytes(raw[HopfieldsEndIdx(2, segs):], 0, len(raw[HopfieldsEndIdx(2, segs):])), p) +ensures acc(sl.Bytes(raw, 0, len(raw)), p) +decreases +func CombineBytesFromSegments(raw []byte, segs io.SegLens, p perm) { + sl.Unslice_Bytes(raw, HopfieldsEndIdx(2, segs), len(raw), p) + sl.Unslice_Bytes(raw, HopfieldsStartIdx(2, segs), HopfieldsEndIdx(2, segs), p) + sl.Unslice_Bytes(raw, HopfieldsStartIdx(1, segs), HopfieldsEndIdx(1, segs), p) + sl.Unslice_Bytes(raw, HopfieldsStartIdx(0, segs), HopfieldsEndIdx(0, segs), p) + sl.Unslice_Bytes(raw, 0, HopfieldsStartIdx(0, segs), p) + sl.CombineAtIndex_Bytes(raw, HopfieldsStartIdx(2, segs), len(raw), HopfieldsEndIdx(2, segs), p) + sl.CombineAtIndex_Bytes(raw, HopfieldsStartIdx(1, segs), len(raw), HopfieldsEndIdx(1, segs), p) + sl.CombineAtIndex_Bytes(raw, HopfieldsStartIdx(0, segs), len(raw), HopfieldsEndIdx(0, segs), p) + sl.CombineAtIndex_Bytes(raw, 0, len(raw), HopfieldsStartIdx(0, segs), p) +} + +// SliceBytesIntoInfoFields splits the raw bytes of a packet into its infofields +ghost +requires 0 < p +requires segs.Valid() +requires MetaLen + numInf * path.InfoLen <= len(raw) +requires numInf == segs.NumInfoFields() +requires acc(sl.Bytes(raw, 0, len(raw)), p) +ensures acc(sl.Bytes(raw[:MetaLen], 0, MetaLen), p) +ensures acc(sl.Bytes(InfofieldByteSlice(raw, 0), 0, path.InfoLen), p) +ensures 1 < numInf ==> acc(sl.Bytes(InfofieldByteSlice(raw, 1), 0, path.InfoLen), p) +ensures 2 < numInf ==> acc(sl.Bytes(InfofieldByteSlice(raw, 2), 0, path.InfoLen), p) +ensures acc(sl.Bytes(raw[HopfieldsStartIdx(0, segs):], 0, len(raw[HopfieldsStartIdx(0, segs):])), p) +decreases +func SliceBytesIntoInfoFields(raw []byte, numInf int, segs io.SegLens, p perm) { + sl.SplitByIndex_Bytes(raw, 0, len(raw), MetaLen, p) + sl.SplitByIndex_Bytes(raw, MetaLen, len(raw), path.InfoFieldOffset(1, MetaLen), p) + sl.Reslice_Bytes(raw, 0, MetaLen, p) + sl.Reslice_Bytes(raw, MetaLen, path.InfoFieldOffset(1, MetaLen), p) + if(numInf > 1) { + sl.SplitByIndex_Bytes(raw, path.InfoFieldOffset(1, MetaLen), len(raw), + path.InfoFieldOffset(2, MetaLen), p) + sl.Reslice_Bytes(raw, path.InfoFieldOffset(1, MetaLen), + path.InfoFieldOffset(2, MetaLen), p) + } + if(numInf > 2) { + sl.SplitByIndex_Bytes(raw, path.InfoFieldOffset(2, MetaLen), len(raw), + HopfieldsStartIdx(0, segs), p) + sl.Reslice_Bytes(raw, path.InfoFieldOffset(2, MetaLen), HopfieldsStartIdx(0, segs), p) + } + sl.Reslice_Bytes(raw, HopfieldsStartIdx(0, segs), len(raw), p) +} + +// CombineBytesFromInfoFields combines the infofields of a packet into a single slice of bytes. +ghost +requires 0 < p +requires segs.Valid() +requires MetaLen + numInf * path.InfoLen <= len(raw) +requires numInf == segs.NumInfoFields() +requires acc(sl.Bytes(raw[:MetaLen], 0, MetaLen), p) +requires acc(sl.Bytes(InfofieldByteSlice(raw, 0), 0, path.InfoLen), p) +requires 1 < numInf ==> acc(sl.Bytes(InfofieldByteSlice(raw, 1), 0, path.InfoLen), p) +requires 2 < numInf ==> acc(sl.Bytes(InfofieldByteSlice(raw, 2), 0, path.InfoLen), p) +requires acc(sl.Bytes(raw[HopfieldsStartIdx(0, segs):], 0, len(raw[HopfieldsStartIdx(0, segs):])), p) +ensures acc(sl.Bytes(raw, 0, len(raw)), p) +decreases +func CombineBytesFromInfoFields(raw []byte, numInf int, segs io.SegLens, p perm) { + sl.Unslice_Bytes(raw, HopfieldsStartIdx(0, segs), len(raw), p) + if(numInf > 2) { + sl.Unslice_Bytes(raw, path.InfoFieldOffset(2, MetaLen), HopfieldsStartIdx(0, segs), p) + sl.CombineAtIndex_Bytes(raw, path.InfoFieldOffset(2, MetaLen), len(raw), + HopfieldsStartIdx(0, segs), p) + } + if(numInf > 1) { + sl.Unslice_Bytes(raw, path.InfoFieldOffset(1, MetaLen), + path.InfoFieldOffset(2, MetaLen), p) + sl.CombineAtIndex_Bytes(raw, path.InfoFieldOffset(1, MetaLen), len(raw), + path.InfoFieldOffset(2, MetaLen), p) + } + sl.Unslice_Bytes(raw, MetaLen, path.InfoFieldOffset(1, MetaLen), p) + sl.Unslice_Bytes(raw, 0, MetaLen, p) + sl.CombineAtIndex_Bytes(raw, MetaLen, len(raw), path.InfoFieldOffset(1, MetaLen), p) + sl.CombineAtIndex_Bytes(raw, 0, len(raw), MetaLen, p) +} + +// CurrSegWithInfo returns the abstract representation of the current segment of a packet. +// Unlike CurrSeg, it relies solely on the hopfield byte slice and an infofield instead of +// the entire raw bytes of the packet. This approach simplifies the verification of changes +// within a segment after updates to the packet's raw bytes. +ghost +opaque +requires 0 < SegLen +requires 0 <= currHfIdx && currHfIdx <= SegLen +requires SegLen * path.HopLen == len(hopfields) +requires acc(sl.Bytes(hopfields, 0, len(hopfields)), R56) +decreases +pure func CurrSegWithInfo(hopfields []byte, currHfIdx int, SegLen int, inf io.AbsInfoField) io.IO_seg3 { + return segment(hopfields, 0, currHfIdx, inf.AInfo, inf.UInfo, inf.ConsDir, inf.Peer, SegLen) +} + + +// LeftSegWithInfo returns the abstract representation of the next segment of a packet. +// Unlike LeftSeg, it relies solely on the hopfields byte slice and an infofield instead of +// the entire bytes of the packet. Whenever the return value is not none, LeftSegWithInfo +// requires permissions to the hopfields byte slice of the segment specified by currInfIdx. +ghost +opaque +requires segs.Valid() +requires (currInfIdx == 1 && segs.Seg2Len > 0) || + (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> + let start := HopfieldsStartIdx(currInfIdx, segs) in + let end := HopfieldsEndIdx(currInfIdx, segs) in + inf != none[io.AbsInfoField] && + len(hopfields) == end - start && + acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) +decreases +pure func LeftSegWithInfo( + hopfields []byte, + currInfIdx int, + segs io.SegLens, + inf option[io.AbsInfoField]) option[io.IO_seg3] { + return (currInfIdx == 1 && segs.Seg2Len > 0) ? + some(CurrSegWithInfo(hopfields, 0, segs.Seg2Len, get(inf))) : + (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + some(CurrSegWithInfo(hopfields, 0, segs.Seg3Len, get(inf))) : + none[io.IO_seg3] +} + +// RightSegWithInfo returns the abstract representation of the previous segment of a packet. +// Unlike RightSeg, it relies solely on the hopfields byte slice and an infofield instead of +// the entire bytes of the packet. Whenever the return value is not none, RightSegWithInfo +// requires permissions to the hopfields byte slice of the segment specified by currInfIdx. +ghost +opaque +requires segs.Valid() +requires (currInfIdx == 0 && segs.Seg2Len > 0) || + (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> + let start := HopfieldsStartIdx(currInfIdx, segs) in + let end := HopfieldsEndIdx(currInfIdx, segs) in + inf != none[io.AbsInfoField] && + len(hopfields) == end - start && + acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) +decreases +pure func RightSegWithInfo( + hopfields []byte, + currInfIdx int, + segs io.SegLens, + inf option[io.AbsInfoField]) option[io.IO_seg3] { + return (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + some(CurrSegWithInfo(hopfields, segs.Seg2Len, segs.Seg2Len, get(inf))) : + (currInfIdx == 0 && segs.Seg2Len > 0) ? + some(CurrSegWithInfo(hopfields, segs.Seg1Len, segs.Seg1Len, get(inf))) : + none[io.IO_seg3] +} + +// MidSegWithInfo returns the abstract representation of the last or first segment of a packet. +// Unlike MidSeg, it relies solely on the hopfields byte slice and an infofield instead of +// the entire bytes of the packet. Whenever the return value is not none, MidSegWithInfo +// requires permissions to the hopfields byte slice of the segment specified by currInfIdx. +ghost +opaque +requires segs.Valid() +requires (segs.Seg2Len > 0 && segs.Seg3Len > 0 && + (currInfIdx == 2 || currInfIdx == 4)) ==> + let start := HopfieldsStartIdx(currInfIdx, segs) in + let end := HopfieldsEndIdx(currInfIdx, segs) in + inf != none[io.AbsInfoField] && + len(hopfields) == end - start && + acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) +decreases +pure func MidSegWithInfo( + hopfields []byte, + currInfIdx int, + segs io.SegLens, + inf option[io.AbsInfoField]) option[io.IO_seg3] { + return (currInfIdx == 4 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + some(CurrSegWithInfo(hopfields, segs.Seg1Len, segs.Seg1Len, get(inf))) : + (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + some(CurrSegWithInfo(hopfields, 0, segs.Seg3Len, get(inf))) : + none[io.IO_seg3] +} + +// CurrSegEquality ensures that the two definitions of abstract segments, CurrSegWithInfo(..) +// and CurrSeg(..), represent the same abstract segment. +ghost +requires path.InfoFieldOffset(currInfIdx, MetaLen) + path.InfoLen <= offset +requires 0 < SegLen +requires offset + path.HopLen * SegLen <= len(raw) +requires 0 <= currHfIdx && currHfIdx <= SegLen +requires 0 <= currInfIdx && currInfIdx < 3 +preserves acc(sl.Bytes(raw, 0, len(raw)), R50) +preserves acc(sl.Bytes(raw[offset:offset + SegLen * path.HopLen], 0, SegLen * path.HopLen), R50) +preserves acc(sl.Bytes(InfofieldByteSlice(raw, currInfIdx), 0, path.InfoLen), R50) +ensures let inf := path.BytesToAbsInfoField(InfofieldByteSlice(raw, currInfIdx), 0) in + CurrSegWithInfo(raw[offset:offset + SegLen * path.HopLen], currHfIdx, SegLen, inf) == + CurrSeg(raw, offset, currInfIdx, currHfIdx, SegLen, MetaLen) +decreases +func CurrSegEquality(raw []byte, offset int, currInfIdx int, currHfIdx int, SegLen int) { + infoBytes := InfofieldByteSlice(raw, currInfIdx) + inf := path.BytesToAbsInfoField(infoBytes, 0) + infOffset := path.InfoFieldOffset(currInfIdx, MetaLen) + unfold acc(sl.Bytes(raw, 0, len(raw)), R56) + unfold acc(sl.Bytes(infoBytes, 0, path.InfoLen), R56) + assert reveal path.BytesToAbsInfoField(raw, infOffset) == + reveal path.BytesToAbsInfoField(infoBytes, 0) + reveal CurrSeg(raw, offset, currInfIdx, currHfIdx, SegLen, MetaLen) + reveal CurrSegWithInfo(raw[offset:offset + SegLen * path.HopLen], currHfIdx, SegLen, inf) + fold acc(sl.Bytes(raw, 0, len(raw)), R56) + fold acc(sl.Bytes(infoBytes, 0, path.InfoLen), R56) + widenSegment(raw, offset, currHfIdx, inf.AInfo, inf.UInfo, inf.ConsDir, + inf.Peer, SegLen, offset, offset + SegLen * path.HopLen) +} + +// UpdateCurrSegInfo proves that updating the infofield from inf1 to inf2 does not alter the hopfields +// of the current segment. +ghost +requires 0 < SegLen +requires 0 <= currHfIdx && currHfIdx <= SegLen +requires SegLen * path.HopLen == len(raw) +preserves acc(sl.Bytes(raw, 0, len(raw)), R50) +ensures CurrSegWithInfo(raw, currHfIdx, SegLen, inf1).UpdateCurrSeg(inf2) == + CurrSegWithInfo(raw, currHfIdx, SegLen, inf2) +decreases +func UpdateCurrSegInfo(raw []byte, currHfIdx int, SegLen int, + inf1 io.AbsInfoField, inf2 io.AbsInfoField) { + seg1 := reveal CurrSegWithInfo(raw, currHfIdx, SegLen, inf1) + seg2 := reveal CurrSegWithInfo(raw, currHfIdx, SegLen, inf2) +} + + +// LeftSegEqualitySpec defines the conditions that must hold for LeftSegWithInfo(..) +// and LeftSeg(..) to represent the same abstract segment. +ghost +requires segs.Valid() +requires PktLen(segs, MetaLen) <= len(raw) +requires 1 <= currInfIdx && currInfIdx < 4 +requires acc(sl.Bytes(raw, 0, len(raw)), R49) +requires (currInfIdx == 1 && segs.Seg2Len > 0) || + (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in + acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && + acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) +decreases +pure func LeftSegEqualitySpec(raw []byte, currInfIdx int, segs io.SegLens) bool { + return (currInfIdx == 1 && segs.Seg2Len > 0) || + (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in + let inf := some(path.BytesToAbsInfoField(infoBytes, 0)) in + LeftSeg(raw, currInfIdx, segs, MetaLen) == + LeftSegWithInfo(hopBytes, currInfIdx, segs, inf) : + LeftSeg(raw, currInfIdx, segs, MetaLen) == + LeftSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) +} + +// LeftSegEquality ensures that the two definitions of abstract segments, LeftSegWithInfo(..) +// and LeftSeg(..), represent the same abstract segment. +// The left segment corresponds to different segments of the packet depending on the currInfIdx. +// To address this, we need to consider all possible cases of currInfIdx. This results in fairly +// complex preconditions and postconditions because, for every currInfIdx, we need an offset for +// its infofield and one for its hopfields. +ghost +requires segs.Valid() +requires PktLen(segs, MetaLen) <= len(raw) +requires 1 <= currInfIdx && currInfIdx < 4 +preserves acc(sl.Bytes(raw, 0, len(raw)), R49) +preserves (currInfIdx == 1 && segs.Seg2Len > 0) || + (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in + acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && + acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) +ensures LeftSegEqualitySpec(raw, currInfIdx, segs) +decreases +func LeftSegEquality(raw []byte, currInfIdx int, segs io.SegLens) { + reveal LeftSeg(raw, currInfIdx, segs, MetaLen) + if ((currInfIdx == 1 && segs.Seg2Len > 0) || + (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0)) { + infoBytes := InfofieldByteSlice(raw, currInfIdx) + hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) + inf := some(reveal path.BytesToAbsInfoField(infoBytes, 0)) + offset := HopfieldsStartIdx(currInfIdx, segs) + segLen := currInfIdx == 1 ? segs.Seg2Len : segs.Seg3Len + reveal LeftSegWithInfo(hopBytes, currInfIdx, segs, inf) + CurrSegEquality(raw, offset, currInfIdx, 0, segLen) + } else { + reveal LeftSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) + } +} + +// RightSegEqualitySpec defines the conditions that must hold for RightSegWithInfo(..) +// and RightSeg(..) to represent the same abstract segment. +ghost +requires segs.Valid() +requires PktLen(segs, MetaLen) <= len(raw) +requires -1 <= currInfIdx && currInfIdx < 2 +requires acc(sl.Bytes(raw, 0, len(raw)), R49) +requires (currInfIdx == 0 && segs.Seg2Len > 0) || + (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in + acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && + acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) +decreases +pure func RightSegEqualitySpec(raw []byte, currInfIdx int, segs io.SegLens) bool { + return (currInfIdx == 0 && segs.Seg2Len > 0) || + (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in + let inf := some(path.BytesToAbsInfoField(infoBytes, 0)) in + RightSeg(raw, currInfIdx, segs, MetaLen) == + RightSegWithInfo(hopBytes, currInfIdx, segs, inf) : + RightSeg(raw, currInfIdx, segs, MetaLen) == + RightSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) +} + +// RightSegEquality ensures that the two definitions of abstract segments, RightSegWithInfo(..) +// and RightSeg(..), represent the same abstract segment. +// The right segment corresponds to different segments of the packet depending on the currInfIdx. +// To address this, we need to consider all possible cases of currInfIdx. This results in fairly +// complex preconditions and postconditions because, for every currInfIdx, we need an offset for +// its infofield and one for its hopfields. +ghost +requires segs.Valid() +requires PktLen(segs, MetaLen) <= len(raw) +requires -1 <= currInfIdx && currInfIdx < 2 +preserves acc(sl.Bytes(raw, 0, len(raw)), R49) +preserves (currInfIdx == 0 && segs.Seg2Len > 0) || + (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ==> + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in + acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && + acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) +ensures RightSegEqualitySpec(raw, currInfIdx, segs) +decreases +func RightSegEquality(raw []byte, currInfIdx int, segs io.SegLens) { + reveal RightSeg(raw, currInfIdx, segs, MetaLen) + if ((currInfIdx == 0 && segs.Seg2Len > 0) || + (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0)) { + infoBytes := InfofieldByteSlice(raw, currInfIdx) + hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) + inf := some(reveal path.BytesToAbsInfoField(infoBytes, 0)) + offset := HopfieldsStartIdx(currInfIdx, segs) + segLen := currInfIdx == 0 ? segs.Seg1Len : segs.Seg2Len + reveal RightSegWithInfo(hopBytes, currInfIdx, segs, inf) + CurrSegEquality(raw, offset, currInfIdx, segLen, segLen) + } else { + reveal RightSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) + } +} + +// MidSegEqualitySpec defines the conditions that must hold for MidSegWithInfo(..) +// and MidSeg(..) to represent the same abstract segment. +ghost +requires segs.Valid() +requires PktLen(segs, MetaLen) <= len(raw) +requires 2 <= currInfIdx && currInfIdx < 5 +requires acc(sl.Bytes(raw, 0, len(raw)), R49) +requires (segs.Seg2Len > 0 && segs.Seg3Len > 0 && + (currInfIdx == 2 || currInfIdx == 4)) ==> + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in + acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && + acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) +decreases +pure func MidSegEqualitySpec(raw []byte, currInfIdx int, segs io.SegLens) bool { + return (segs.Seg2Len > 0 && segs.Seg3Len > 0 && + (currInfIdx == 2 || currInfIdx == 4)) ? + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in + let inf := some(path.BytesToAbsInfoField(infoBytes, 0)) in + MidSeg(raw, currInfIdx, segs, MetaLen) == + MidSegWithInfo(hopBytes, currInfIdx, segs, inf) : + MidSeg(raw, currInfIdx, segs, MetaLen) == + MidSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) +} + +// MidSegEquality ensures that the two definitions of abstract segments, MidSegWithInfo(..) +// and MidSeg(..), represent the same abstract segment. +// The mid segment corresponds to different segments of the packet depending on the currInfIdx. +// To address this, we need to consider all possible cases of currInfIdx. This results in fairly +// complex preconditions and postconditions because, for every currInfIdx, we need an offset for +// its infofield and one for its hopfields. +ghost +requires segs.Valid() +requires PktLen(segs, MetaLen) <= len(raw) +requires 2 <= currInfIdx && currInfIdx < 5 +preserves acc(sl.Bytes(raw, 0, len(raw)), R49) +preserves (segs.Seg2Len > 0 && segs.Seg3Len > 0 && + (currInfIdx == 2 || currInfIdx == 4)) ==> + let infoBytes := InfofieldByteSlice(raw, currInfIdx) in + let hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) in + acc(sl.Bytes(infoBytes, 0, path.InfoLen), R49) && + acc(sl.Bytes(hopBytes, 0, len(hopBytes)), R49) +ensures MidSegEqualitySpec(raw, currInfIdx, segs) +decreases +func MidSegEquality(raw []byte, currInfIdx int, segs io.SegLens) { + reveal MidSeg(raw, currInfIdx, segs, MetaLen) + if (currInfIdx == 4 && segs.Seg2Len > 0 && segs.Seg3Len > 0) { + infoBytes := InfofieldByteSlice(raw, 0) + hopBytes := HopfieldsByteSlice(raw, 0, segs) + inf := some(reveal path.BytesToAbsInfoField(infoBytes, 0)) + offset := HopfieldsStartIdx(currInfIdx, segs) + reveal MidSegWithInfo(hopBytes, currInfIdx, segs, inf) + CurrSegEquality(raw, offset, 0, segs.Seg1Len, segs.Seg1Len) + } else if (currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) { + infoBytes := InfofieldByteSlice(raw, currInfIdx) + hopBytes := HopfieldsByteSlice(raw, currInfIdx, segs) + inf := some(reveal path.BytesToAbsInfoField(infoBytes, 0)) + offset := HopfieldsStartIdx(currInfIdx, segs) + reveal MidSegWithInfo(hopBytes, currInfIdx, segs, inf) + CurrSegEquality(raw, offset, currInfIdx, 0, segs.Seg3Len) + } else { + reveal MidSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) + } +} + +// `BytesStoreCurrSeg(hopfields, currHfIdx, segLen, inf)` holds iff `hopfields` contains the +// serialization of the hopfields of the current segment, which has been traversed until the +// `currHfIdx`-th hop (out of `segLen` hops in total). +ghost +requires 0 <= currHfIdx && currHfIdx < segLen +requires segLen * path.HopLen == len(hopfields) +requires acc(sl.Bytes(hopfields, 0, len(hopfields)), R50) +requires let currHfStart := currHfIdx * path.HopLen in + let currHfEnd := currHfStart + path.HopLen in + acc(sl.Bytes(hopfields[:currHfStart], 0, currHfStart), R50) && + acc(sl.Bytes(hopfields[currHfStart:currHfEnd], 0, path.HopLen), R50) && + acc(sl.Bytes(hopfields[currHfEnd:], 0, (segLen - currHfIdx - 1) * path.HopLen), R50) +decreases +pure func BytesStoreCurrSeg(hopfields []byte, currHfIdx int, segLen int, inf io.AbsInfoField) bool { + return let currseg := CurrSegWithInfo(hopfields, currHfIdx, segLen, inf) in + let currHfStart := currHfIdx * path.HopLen in + let currHfEnd := currHfStart + path.HopLen in + len(currseg.Future) > 0 && + currseg.Future[0] == path.BytesToIO_HF(hopfields[currHfStart:currHfEnd], 0, 0, path.HopLen) && + currseg.Future[1:] == hopFields(hopfields[currHfEnd:], 0, 0, (segLen - currHfIdx - 1)) && + currseg.Past == segPast(hopFields(hopfields[:currHfStart], 0, 0, currHfIdx)) && + currseg.History == segHistory(hopFields(hopfields[:currHfStart], 0, 0, currHfIdx)) && + currseg.AInfo == inf.AInfo && + currseg.UInfo == inf.UInfo && + currseg.ConsDir == inf.ConsDir && + currseg.Peer == inf.Peer +} + +// `EstablishBytesStoreCurrSeg` shows that the raw bytes containing all hopfields +// can be split into three slices, one that exclusively contains all past hopfields, one +// that exclusively contains all future ones and another one for the current hopfield. +// This helps in proving that the future and past hopfields remain unchanged when the +// current hopfield is modified. +ghost +requires 0 <= currHfIdx && currHfIdx < segLen +requires segLen * path.HopLen == len(hopfields) +preserves acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) +preserves let currHfStart := currHfIdx * path.HopLen in + let currHfEnd := currHfStart + path.HopLen in + acc(sl.Bytes(hopfields[:currHfStart], 0, currHfStart), R49) && + acc(sl.Bytes(hopfields[currHfStart:currHfEnd], 0, path.HopLen), R49) && + acc(sl.Bytes(hopfields[currHfEnd:], 0, (segLen - currHfIdx - 1) * path.HopLen), R49) +ensures BytesStoreCurrSeg(hopfields, currHfIdx, segLen, inf) +decreases +func EstablishBytesStoreCurrSeg(hopfields []byte, currHfIdx int, segLen int, inf io.AbsInfoField) { + currseg := reveal CurrSegWithInfo(hopfields, currHfIdx, segLen, inf) + currHfStart := currHfIdx * path.HopLen + currHfEnd := currHfStart + path.HopLen + unfold acc(sl.Bytes(hopfields, 0, len(hopfields)), R56) + unfold acc(sl.Bytes(hopfields[currHfStart:currHfEnd], 0, path.HopLen), R56) + hf := hopFields(hopfields, 0, 0, segLen) + hopFieldsBytePositionsLemma(hopfields, 0, 0, segLen, R54) + reveal hopFieldsBytePositions(hopfields, 0, 0, segLen, hf) + assert len(currseg.Future) > 0 + assert currseg.Future[0] == path.BytesToIO_HF(hopfields[currHfStart:currHfEnd], 0, 0, path.HopLen) + splitHopFieldsInPastAndFuture(hopfields, currHfIdx, segLen) + assert currseg.Past == segPast(hopFields(hopfields[:currHfStart], 0, 0, currHfIdx)) + assert currseg.Future[0] == hf[currHfIdx] + assert hf[currHfIdx:][1:] == hf[currHfIdx + 1:] + assert currseg.Future == hf[currHfIdx:] + assert currseg.Future[1:] == hopFields(hopfields[currHfEnd:], 0, 0, (segLen - currHfIdx- 1)) + assert currseg.History == segHistory(hopFields(hopfields[:currHfStart], 0, 0, currHfIdx)) + fold acc(sl.Bytes(hopfields[currHfStart:currHfEnd], 0, path.HopLen), R56) + fold acc(sl.Bytes(hopfields, 0, len(hopfields)), R56) +} + +// `splitHopFieldsInPastAndFuture` shows that the raw bytes containing all hopfields +// can be split into two slices, one that exclusively contains all past hopfields and another +// that exclusively contains all future ones. This helps in proving that the future and past +// hopfields remain unchanged when the current hopfield is modified. +ghost +requires 0 < segLen +requires 0 <= currHfIdx && currHfIdx < segLen +requires segLen * path.HopLen == len(hopfields) +preserves acc(sl.Bytes(hopfields, 0, len(hopfields)), R50) +preserves let currHfStart := currHfIdx * path.HopLen in + let currHfEnd := currHfStart + path.HopLen in + acc(sl.Bytes(hopfields[:currHfStart], 0, currHfStart), R50) && + acc(sl.Bytes(hopfields[currHfEnd:], 0, (segLen - currHfIdx - 1) * path.HopLen), R50) +ensures let currHfStart := currHfIdx * path.HopLen in + let currHfEnd := currHfStart + path.HopLen in + hopFields(hopfields, 0, 0, segLen)[:currHfIdx] == + hopFields(hopfields[:currHfStart], 0, 0, currHfIdx) && + hopFields(hopfields, 0, 0, segLen)[currHfIdx + 1:] == + hopFields(hopfields[currHfEnd:], 0, 0, segLen - currHfIdx - 1) +decreases +func splitHopFieldsInPastAndFuture(hopfields []byte, currHfIdx int, segLen int) { + currHfStart := currHfIdx * path.HopLen + currHfEnd := currHfStart + path.HopLen + hf := hopFields(hopfields, 0, 0, segLen) + hopFieldsBytePositionsLemma(hopfields, 0, 0, segLen, R54) + reveal hopFieldsBytePositions(hopfields, 0, 0, segLen, hf) + + hfPast := hopFields(hopfields, 0, 0, currHfIdx) + hopFieldsBytePositionsLemma(hopfields, 0, 0, currHfIdx, R54) + reveal hopFieldsBytePositions(hopfields, 0, 0, currHfIdx, hfPast) + widenHopFields(hopfields, 0, 0, currHfIdx, 0, currHfStart, R52) + + hfFuture := hopFields(hopfields, currHfEnd, 0, segLen - currHfIdx - 1) + hopFieldsBytePositionsLemma(hopfields, currHfEnd, 0, segLen - currHfIdx - 1, R54) + reveal hopFieldsBytePositions(hopfields, currHfEnd, 0, segLen - currHfIdx - 1, hfFuture) + widenHopFields(hopfields, currHfEnd, 0, segLen - currHfIdx - 1, + currHfEnd, segLen * path.HopLen, R52) +} + +// `SplitHopfields` splits the permission to the raw bytes of a segment into the permission +// to the subslice containing all past hopfields, to the sublice containing the current hopfield, +// and to another containing all future hopfields. +ghost +requires 0 < p +requires 0 <= currHfIdx && currHfIdx < segLen +requires segLen * path.HopLen == len(hopfields) +requires acc(sl.Bytes(hopfields, 0, len(hopfields)), p) +ensures let currHfStart := currHfIdx * path.HopLen in + let currHfEnd := currHfStart + path.HopLen in + acc(sl.Bytes(hopfields[:currHfStart], 0, currHfStart), p) && + acc(sl.Bytes(hopfields[currHfStart:currHfEnd], 0, path.HopLen), p) && + acc(sl.Bytes(hopfields[currHfEnd:], 0, (segLen - currHfIdx - 1) * path.HopLen), p) +decreases +func SplitHopfields(hopfields []byte, currHfIdx int, segLen int, p perm) { + currHfStart := currHfIdx * path.HopLen + currHfEnd := currHfStart + path.HopLen + sl.SplitByIndex_Bytes(hopfields, 0, len(hopfields), currHfStart, p) + sl.SplitByIndex_Bytes(hopfields, currHfStart, len(hopfields), currHfEnd, p) + sl.Reslice_Bytes(hopfields, 0, currHfStart, p) + sl.Reslice_Bytes(hopfields, currHfStart, currHfEnd, p) + sl.Reslice_Bytes(hopfields, currHfEnd, len(hopfields), p) +} + +// `CombineHopfields` combines the permissions to the slices of bytes storing the past hopfields, +// current hopfield, and future hopfields of a segment into a single permission to the slice +// containing all hopfields of that segment. +ghost +requires 0 < p +requires 0 <= currHfIdx && currHfIdx < segLen +requires segLen * path.HopLen == len(hopfields) +requires let currHfStart := currHfIdx * path.HopLen in + let currHfEnd := currHfStart + path.HopLen in + acc(sl.Bytes(hopfields[:currHfStart], 0, currHfStart), p) && + acc(sl.Bytes(hopfields[currHfStart:currHfEnd], 0, path.HopLen), p) && + acc(sl.Bytes(hopfields[currHfEnd:], 0, (segLen - currHfIdx - 1) * path.HopLen), p) +ensures acc(sl.Bytes(hopfields, 0, len(hopfields)), p) +decreases +func CombineHopfields(hopfields []byte, currHfIdx int, segLen int, p perm) { + currHfStart := currHfIdx * path.HopLen + currHfEnd := currHfStart + path.HopLen + sl.Unslice_Bytes(hopfields, currHfEnd, len(hopfields), p) + sl.Unslice_Bytes(hopfields, currHfStart, currHfEnd, p) + sl.Unslice_Bytes(hopfields, 0, currHfStart, p) + sl.CombineAtIndex_Bytes(hopfields, currHfStart, len(hopfields), currHfEnd, p) + sl.CombineAtIndex_Bytes(hopfields, 0, len(hopfields), currHfStart, p) +} \ No newline at end of file diff --git a/pkg/slayers/path/scion/raw.go b/pkg/slayers/path/scion/raw.go index 68dd61154..4ee0008c6 100644 --- a/pkg/slayers/path/scion/raw.go +++ b/pkg/slayers/path/scion/raw.go @@ -22,6 +22,7 @@ import ( "github.com/scionproto/scion/pkg/slayers/path" //@ . "github.com/scionproto/scion/verification/utils/definitions" //@ sl "github.com/scionproto/scion/verification/utils/slices" + //@ io "verification/io" ) // Raw is a raw representation of the SCION (data-plane) path type. It is designed to parse as @@ -34,8 +35,11 @@ type Raw struct { // DecodeFromBytes only decodes the PathMetaHeader. Otherwise the nothing is decoded and simply kept // as raw bytes. // @ requires s.NonInitMem() -// @ preserves sl.AbsSlice_Bytes(data, 0, len(data)) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R42) // @ ensures res == nil ==> s.Mem(data) +// @ ensures res == nil ==> +// @ s.GetBase(data).WeaklyValid() && +// @ s.GetBase(data).EqAbsHeader(data) // @ ensures res != nil ==> (s.NonInitMem() && res.ErrorMem()) // @ decreases func (s *Raw) DecodeFromBytes(data []byte) (res error) { @@ -60,8 +64,8 @@ func (s *Raw) DecodeFromBytes(data []byte) (res error) { // SerializeTo writes the path to a slice. The slice must be big enough to hold the entire data, // otherwise an error is returned. // @ preserves acc(s.Mem(ubuf), R1) -// @ preserves sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) -// @ preserves sl.AbsSlice_Bytes(b, 0, len(b)) +// @ preserves sl.Bytes(ubuf, 0, len(ubuf)) +// @ preserves sl.Bytes(b, 0, len(b)) // @ ensures r != nil ==> r.ErrorMem() // @ decreases func (s *Raw) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { @@ -76,7 +80,7 @@ func (s *Raw) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { // directly. //@ unfold acc(s.Base.Mem(), R1) //@ sl.SplitRange_Bytes(ubuf, 0, len(s.Raw), writePerm) - //@ assert sl.AbsSlice_Bytes(s.Raw, 0, len(s.Raw)) + //@ assert sl.Bytes(s.Raw, 0, len(s.Raw)) //@ sl.SplitRange_Bytes(s.Raw, 0, MetaLen, writePerm) if err := s.PathMeta.SerializeTo(s.Raw[:MetaLen]); err != nil { // @ Unreachable() @@ -84,11 +88,11 @@ func (s *Raw) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { } //@ fold acc(s.Base.Mem(), R1) //@ sl.CombineRange_Bytes(s.Raw, 0, MetaLen, writePerm) - //@ unfold acc(sl.AbsSlice_Bytes(s.Raw, 0, len(s.Raw)), R2) - //@ unfold sl.AbsSlice_Bytes(b, 0, len(b)) + //@ unfold acc(sl.Bytes(s.Raw, 0, len(s.Raw)), R2) + //@ unfold sl.Bytes(b, 0, len(b)) copy(b, s.Raw /*@ , R2 @*/) - //@ fold sl.AbsSlice_Bytes(b, 0, len(b)) - //@ fold acc(sl.AbsSlice_Bytes(s.Raw, 0, len(s.Raw)), R2) + //@ fold sl.Bytes(b, 0, len(b)) + //@ fold acc(sl.Bytes(s.Raw, 0, len(s.Raw)), R2) //@ sl.CombineRange_Bytes(ubuf, 0, len(s.Raw), writePerm) //@ fold acc(s.Mem(ubuf), R1) return nil @@ -96,7 +100,7 @@ func (s *Raw) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { // Reverse reverses the path such that it can be used in the reverse direction. // @ requires s.Mem(ubuf) -// @ preserves sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ preserves sl.Bytes(ubuf, 0, len(ubuf)) // @ ensures err == nil ==> typeOf(p) == type[*Raw] // @ ensures err == nil ==> p != nil && p != (*Raw)(nil) // @ ensures err == nil ==> p.Mem(ubuf) @@ -131,11 +135,11 @@ func (s *Raw) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, err error) { // ToDecoded transforms a scion.Raw to a scion.Decoded. // @ preserves acc(s.Mem(ubuf), R5) -// @ preserves sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ preserves sl.Bytes(ubuf, 0, len(ubuf)) // @ ensures err == nil ==> ( // @ let newUb := s.RawBufferMem(ubuf) in // @ d.Mem(newUb) && -// @ (old(s.ValidCurrIdxs(ubuf)) ==> d.ValidCurrIdxs(newUb))) +// @ (old(s.GetBase(ubuf).Valid()) ==> d.GetBase(newUb).Valid())) // @ ensures err != nil ==> err.ErrorMem() // @ decreases func (s *Raw) ToDecoded( /*@ ghost ubuf []byte @*/ ) (d *Decoded, err error) { @@ -143,9 +147,9 @@ func (s *Raw) ToDecoded( /*@ ghost ubuf []byte @*/ ) (d *Decoded, err error) { //@ unfold acc(s.Base.Mem(), R6) //@ ghost var base Base = s.Base //@ ghost var pathMeta MetaHdr = s.Base.PathMeta - //@ ghost validIdxs := s.ValidCurrIdxs(ubuf) + //@ ghost validIdxs := s.GetBase(ubuf).Valid() //@ assert validIdxs ==> s.Base.PathMeta.InBounds() - //@ assert validIdxs ==> base.ValidCurrIdxsSpec() + //@ assert validIdxs ==> base.Valid() //@ assert s.Raw[:MetaLen] === ubuf[:MetaLen] // (VerifiedSCION) In this method, many slice operations are done in two @@ -157,40 +161,40 @@ func (s *Raw) ToDecoded( /*@ ghost ubuf []byte @*/ ) (d *Decoded, err error) { // @ Unreachable() return nil, err } - //@ ghost b0 := (unfolding acc(sl.AbsSlice_Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in s.Raw[0]) - //@ ghost b1 := (unfolding acc(sl.AbsSlice_Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in s.Raw[1]) - //@ ghost b2 := (unfolding acc(sl.AbsSlice_Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in s.Raw[2]) - //@ ghost b3 := (unfolding acc(sl.AbsSlice_Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in s.Raw[3]) + //@ ghost b0 := (unfolding acc(sl.Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in s.Raw[0]) + //@ ghost b1 := (unfolding acc(sl.Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in s.Raw[1]) + //@ ghost b2 := (unfolding acc(sl.Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in s.Raw[2]) + //@ ghost b3 := (unfolding acc(sl.Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in s.Raw[3]) //@ assert let line := s.PathMeta.SerializedToLine() in binary.BigEndian.PutUint32Spec(b0, b1, b2, b3, line) //@ sl.CombineRange_Bytes(ubuf, 0, MetaLen, HalfPerm) //@ assert &ubuf[0] == &s.Raw[:MetaLen][0] //@ assert &ubuf[1] == &s.Raw[:MetaLen][1] //@ assert &ubuf[2] == &s.Raw[:MetaLen][2] //@ assert &ubuf[3] == &s.Raw[:MetaLen][3] - //@ assert b0 == (unfolding acc(sl.AbsSlice_Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in ubuf[0]) + //@ assert b0 == (unfolding acc(sl.Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in ubuf[0]) // (VerifiedSCION): for some reason, silicon requires the following line to be able to prove // bX == ubuf[X]. - //@ assert unfolding acc(sl.AbsSlice_Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in - //@ (ubuf[0] == (unfolding acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), _) in ubuf[0])) + //@ assert unfolding acc(sl.Bytes(s.Raw[:MetaLen], 0, MetaLen), _) in + //@ (ubuf[0] == (unfolding acc(sl.Bytes(ubuf, 0, len(ubuf)), _) in ubuf[0])) //@ sl.CombineRange_Bytes(ubuf, 0, MetaLen, HalfPerm) decoded := &Decoded{} //@ fold decoded.Base.NonInitMem() //@ fold decoded.NonInitMem() //@ sl.SplitByIndex_Bytes(ubuf, 0, len(ubuf), len(s.Raw), HalfPerm) - //@ assert unfolding acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), _) in - //@ (ubuf[0] == (unfolding acc(sl.AbsSlice_Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[0])) + //@ assert unfolding acc(sl.Bytes(ubuf, 0, len(ubuf)), _) in + //@ (ubuf[0] == (unfolding acc(sl.Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[0])) //@ sl.SplitByIndex_Bytes(ubuf, 0, len(ubuf), len(s.Raw), HalfPerm) //@ sl.Reslice_Bytes(ubuf, 0, len(s.Raw), HalfPerm) //@ assert &ubuf[0] == &ubuf[:len(s.Raw)][0] //@ assert &ubuf[1] == &ubuf[:len(s.Raw)][1] //@ assert &ubuf[2] == &ubuf[:len(s.Raw)][2] //@ assert &ubuf[3] == &ubuf[:len(s.Raw)][3] - //@ assert unfolding acc(sl.AbsSlice_Bytes(ubuf[:len(s.Raw)], 0, len(s.Raw)), _) in - //@ (ubuf[0] == (unfolding acc(sl.AbsSlice_Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[0])) - //@ assert b0 == (unfolding acc(sl.AbsSlice_Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[0]) - //@ assert b1 == (unfolding acc(sl.AbsSlice_Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[1]) - //@ assert b2 == (unfolding acc(sl.AbsSlice_Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[2]) - //@ assert b3 == (unfolding acc(sl.AbsSlice_Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[3]) + //@ assert unfolding acc(sl.Bytes(ubuf[:len(s.Raw)], 0, len(s.Raw)), _) in + //@ (ubuf[0] == (unfolding acc(sl.Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[0])) + //@ assert b0 == (unfolding acc(sl.Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[0]) + //@ assert b1 == (unfolding acc(sl.Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[1]) + //@ assert b2 == (unfolding acc(sl.Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[2]) + //@ assert b3 == (unfolding acc(sl.Bytes(ubuf, 0, len(s.Raw)), _) in ubuf[3]) //@ sl.Reslice_Bytes(ubuf, 0, len(s.Raw), HalfPerm) if err := decoded.DecodeFromBytes(s.Raw); err != nil { //@ sl.Unslice_Bytes(ubuf, 0, len(s.Raw), writePerm) @@ -203,8 +207,7 @@ func (s *Raw) ToDecoded( /*@ ghost ubuf []byte @*/ ) (d *Decoded, err error) { //@ ghost if validIdxs { //@ s.PathMeta.SerializeAndDeserializeLemma(b0, b1, b2, b3) //@ assert pathMeta == decoded.GetMetaHdr(s.Raw) - //@ assert decoded.GetBase(s.Raw).ValidCurrIdxsSpec() - //@ assert decoded.ValidCurrIdxs(s.Raw) + //@ assert decoded.GetBase(s.Raw).Valid() //@ } //@ sl.Unslice_Bytes(ubuf, 0, len(s.Raw), HalfPerm) //@ sl.Unslice_Bytes(ubuf, 0, len(s.Raw), HalfPerm) @@ -217,63 +220,157 @@ func (s *Raw) ToDecoded( /*@ ghost ubuf []byte @*/ ) (d *Decoded, err error) { // IncPath increments the path and writes it to the buffer. // @ requires s.Mem(ubuf) -// @ preserves sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ requires sl.Bytes(ubuf, 0, len(ubuf)) +// pres for IO: +// @ requires s.GetBase(ubuf).EqAbsHeader(ubuf) +// @ requires validPktMetaHdr(ubuf) +// @ requires s.absPkt(ubuf).PathNotFullyTraversed() +// @ requires s.GetBase(ubuf).IsXoverSpec() ==> +// @ s.absPkt(ubuf).LeftSeg != none[io.IO_seg3] +// @ ensures sl.Bytes(ubuf, 0, len(ubuf)) // @ ensures old(unfolding s.Mem(ubuf) in unfolding -// @ s.Base.Mem() in (s.NumINF <= 0 || int(s.PathMeta.CurrHF) >= s.NumHops-1)) ==> r != nil +// @ s.Base.Mem() in (s.NumINF <= 0 || int(s.PathMeta.CurrHF) >= s.NumHops-1)) ==> r != nil // @ ensures r == nil ==> s.Mem(ubuf) // @ ensures r != nil ==> s.NonInitMem() // @ ensures r != nil ==> r.ErrorMem() +// post for IO: +// @ ensures r == nil ==> s.GetBase(ubuf).EqAbsHeader(ubuf) && validPktMetaHdr(ubuf) +// @ ensures r == nil && old(s.GetBase(ubuf).IsXoverSpec()) ==> +// @ s.absPkt(ubuf) == AbsXover(old(s.absPkt(ubuf))) +// @ ensures r == nil && !old(s.GetBase(ubuf).IsXoverSpec()) ==> +// @ s.absPkt(ubuf) == AbsIncPath(old(s.absPkt(ubuf))) // @ decreases func (s *Raw) IncPath( /*@ ghost ubuf []byte @*/ ) (r error) { //@ unfold s.Mem(ubuf) + //@ reveal validPktMetaHdr(ubuf) + //@ unfold acc(s.Base.Mem(), R56) + //@ oldCurrInfIdx := int(s.PathMeta.CurrINF) + //@ oldCurrHfIdx := int(s.PathMeta.CurrHF) + //@ oldSeg1Len := int(s.PathMeta.SegLen[0]) + //@ oldSeg2Len := int(s.PathMeta.SegLen[1]) + //@ oldSeg3Len := int(s.PathMeta.SegLen[2]) + //@ oldSegs := io.CombineSegLens(oldSeg1Len, oldSeg2Len, oldSeg3Len) + //@ oldSegLen := oldSegs.LengthOfCurrSeg(oldCurrHfIdx) + //@ oldPrevSegLen := oldSegs.LengthOfPrevSeg(oldCurrHfIdx) + //@ oldOffset := HopFieldOffset(s.Base.NumINF, oldPrevSegLen, 0) + //@ fold acc(s.Base.Mem(), R56) if err := s.Base.IncPath(); err != nil { //@ fold s.NonInitMem() return err } - //@ fold s.Mem(ubuf) - //@ s.RawIdxPerm(ubuf, MetaLen, writePerm) - //@ unfold acc(s.Base.Mem(), 1/2) + //@ fold acc(s.Mem(ubuf), HalfPerm) + //@ sl.SplitRange_Bytes(ubuf, 0, MetaLen, HalfPerm) + //@ ValidPktMetaHdrSublice(ubuf, MetaLen) + //@ sl.Reslice_Bytes(ubuf, MetaLen, len(ubuf), HalfPerm) + //@ tail := ubuf[MetaLen:] + //@ unfold acc(sl.Bytes(tail, 0, len(tail)), R50) + //@ oldHfIdxSeg := oldCurrHfIdx-oldPrevSegLen + //@ WidenCurrSeg(ubuf, oldOffset + MetaLen, oldCurrInfIdx, oldHfIdxSeg, oldSegLen, MetaLen, MetaLen, len(ubuf)) + //@ WidenLeftSeg(ubuf, oldCurrInfIdx + 1, oldSegs, MetaLen, MetaLen, len(ubuf)) + //@ WidenMidSeg(ubuf, oldCurrInfIdx + 2, oldSegs, MetaLen, MetaLen, len(ubuf)) + //@ WidenRightSeg(ubuf, oldCurrInfIdx - 1, oldSegs, MetaLen, MetaLen, len(ubuf)) + //@ LenCurrSeg(tail, oldOffset, oldCurrInfIdx, oldHfIdxSeg, oldSegLen) + //@ oldAbsPkt := reveal s.absPkt(ubuf) + //@ sl.SplitRange_Bytes(ubuf, 0, MetaLen, HalfPerm) + //@ unfold acc(s.Base.Mem(), R2) err := s.PathMeta.SerializeTo(s.Raw[:MetaLen]) - //@ fold acc(s.Base.Mem(), 1/2) - //@ s.UndoRawIdxPerm(ubuf, MetaLen, writePerm) + //@ assert s.Base.Valid() + //@ assert s.PathMeta.InBounds() + //@ v := s.Raw[:MetaLen] + //@ b0 := sl.GetByte(v, 0, MetaLen, 0) + //@ b1 := sl.GetByte(v, 0, MetaLen, 1) + //@ b2 := sl.GetByte(v, 0, MetaLen, 2) + //@ b3 := sl.GetByte(v, 0, MetaLen, 3) + //@ s.PathMeta.SerializeAndDeserializeLemma(b0, b1, b2, b3) + //@ assert s.PathMeta.EqAbsHeader(v) + //@ assert RawBytesToBase(v).Valid() + //@ sl.CombineRange_Bytes(ubuf, 0, MetaLen, HalfPerm) + //@ ValidPktMetaHdrSublice(ubuf, MetaLen) + //@ assert s.EqAbsHeader(ubuf) == s.PathMeta.EqAbsHeader(ubuf) + //@ assert reveal validPktMetaHdr(ubuf) + //@ currInfIdx := int(s.PathMeta.CurrINF) + //@ currHfIdx := int(s.PathMeta.CurrHF) + //@ assert currHfIdx == oldCurrHfIdx + 1 + + //@ ghost if(currInfIdx == oldCurrInfIdx) { + //@ IncCurrSeg(tail, oldOffset, oldCurrInfIdx, oldHfIdxSeg, oldSegLen) + //@ WidenCurrSeg(ubuf, oldOffset + MetaLen, oldCurrInfIdx, oldHfIdxSeg + 1, + //@ oldSegLen, MetaLen, MetaLen, len(ubuf)) + //@ WidenLeftSeg(ubuf, oldCurrInfIdx + 1, oldSegs, MetaLen, MetaLen, len(ubuf)) + //@ WidenMidSeg(ubuf, oldCurrInfIdx + 2, oldSegs, MetaLen, MetaLen, len(ubuf)) + //@ WidenRightSeg(ubuf, oldCurrInfIdx - 1, oldSegs, MetaLen, MetaLen, len(ubuf)) + //@ assert reveal s.absPkt(ubuf) == AbsIncPath(oldAbsPkt) + //@ } else { + //@ segLen := oldSegs.LengthOfCurrSeg(currHfIdx) + //@ prevSegLen := oldSegs.LengthOfPrevSeg(currHfIdx) + //@ offsetWithHops := HopFieldOffset(s.Base.NumINF, prevSegLen, MetaLen) + //@ hfIdxSeg := currHfIdx-prevSegLen + //@ XoverSegNotNone(tail, oldCurrInfIdx, oldSegs) + //@ XoverCurrSeg(tail, oldCurrInfIdx + 1, oldCurrHfIdx, oldSegs) + //@ XoverLeftSeg(tail, oldCurrInfIdx + 2, oldSegs) + //@ XoverMidSeg(tail, oldCurrInfIdx - 1, oldSegs) + //@ XoverRightSeg(tail, oldCurrInfIdx, oldCurrHfIdx, oldSegs) + //@ WidenCurrSeg(ubuf, offsetWithHops, currInfIdx, hfIdxSeg, segLen, MetaLen, MetaLen, len(ubuf)) + //@ WidenLeftSeg(ubuf, currInfIdx + 1, oldSegs, MetaLen, MetaLen, len(ubuf)) + //@ WidenMidSeg(ubuf, currInfIdx + 2, oldSegs, MetaLen, MetaLen, len(ubuf)) + //@ WidenRightSeg(ubuf, currInfIdx - 1, oldSegs, MetaLen, MetaLen, len(ubuf)) + //@ assert reveal s.absPkt(ubuf) == AbsXover(oldAbsPkt) + //@ } + + //@ fold acc(sl.Bytes(tail, 0, len(tail)), R50) + //@ sl.Unslice_Bytes(ubuf, MetaLen, len(ubuf), HalfPerm) + //@ sl.CombineRange_Bytes(ubuf, 0, MetaLen, HalfPerm) + //@ fold acc(s.Base.Mem(), R2) + //@ fold acc(s.Mem(ubuf), HalfPerm) return err } // GetInfoField returns the InfoField at a given index. -// @ requires acc(s.Mem(ubuf), R10) // @ requires 0 <= idx -// @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R10) -// @ ensures acc(s.Mem(ubuf), R10) -// @ ensures (idx < old(s.GetNumINF(ubuf))) == (err == nil) +// @ preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R10) +// @ preserves acc(s.Mem(ubuf), R10) +// @ ensures (idx < s.GetNumINF(ubuf)) == (err == nil) +// @ ensures err == nil ==> s.CorrectlyDecodedInfWithIdx(ubuf, idx, ifield) // @ ensures err != nil ==> err.ErrorMem() // @ decreases func (s *Raw) GetInfoField(idx int /*@, ghost ubuf []byte @*/) (ifield path.InfoField, err error) { - //@ unfold acc(s.Mem(ubuf), R10) - //@ unfold acc(s.Base.Mem(), R11) + //@ unfold acc(s.Mem(ubuf), R11) + //@ unfold acc(s.Base.Mem(), R12) if idx >= s.NumINF { e := serrors.New("InfoField index out of bounds", "max", s.NumINF-1, "actual", idx) - //@ fold acc(s.Base.Mem(), R11) - //@ fold acc(s.Mem(ubuf), R10) + //@ fold acc(s.Base.Mem(), R12) + //@ fold acc(s.Mem(ubuf), R11) return path.InfoField{}, e } - //@ fold acc(s.Base.Mem(), R11) - //@ fold acc(s.Mem(ubuf), R10) + //@ fold acc(s.Base.Mem(), R12) infOffset := MetaLen + idx*path.InfoLen info /*@@@*/ := path.InfoField{} - //@ s.RawRangePerm(ubuf, infOffset, infOffset+path.InfoLen, R10) + //@ sl.SplitRange_Bytes(ubuf, infOffset, infOffset+path.InfoLen, R20) if err := info.DecodeFromBytes(s.Raw[infOffset : infOffset+path.InfoLen]); err != nil { //@ Unreachable() return path.InfoField{}, err } - //@ s.UndoRawRangePerm(ubuf, infOffset, infOffset+path.InfoLen, R10) + //@ sl.CombineRange_Bytes(ubuf, infOffset, infOffset+path.InfoLen, R21) + //@ unfold acc(sl.Bytes(ubuf, 0, len(ubuf)), R56) + //@ unfold acc(sl.Bytes(ubuf[infOffset : infOffset+path.InfoLen], 0, path.InfoLen), R56) + //@ assert reveal path.BytesToAbsInfoField(ubuf, infOffset) == + //@ reveal path.BytesToAbsInfoField(ubuf[infOffset : infOffset+path.InfoLen], 0) + //@ assert info.ToAbsInfoField() == + //@ reveal path.BytesToAbsInfoField(ubuf, infOffset) + //@ fold acc(sl.Bytes(ubuf, 0, len(ubuf)), R56) + //@ fold acc(sl.Bytes(ubuf[infOffset : infOffset+path.InfoLen], 0, path.InfoLen), R56) + //@ sl.CombineRange_Bytes(ubuf, infOffset, infOffset+path.InfoLen, R21) + //@ fold acc(s.Mem(ubuf), R11) + //@ assert reveal s.CorrectlyDecodedInfWithIdx(ubuf, idx, info) return info, nil } // GetCurrentInfoField is a convenience method that returns the current hop field pointed to by the // CurrINF index in the path meta header. // @ preserves acc(s.Mem(ubuf), R8) -// @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R1) -// @ ensures (r == nil) == (s.GetCurrINF(ubuf) < s.GetNumINF(ubuf)) +// @ preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R9) +// @ ensures (r == nil) == s.GetBase(ubuf).ValidCurrInfSpec() +// @ ensures r == nil ==> s.CorrectlyDecodedInf(ubuf, res) // @ ensures r != nil ==> r.ErrorMem() // @ decreases func (s *Raw) GetCurrentInfoField( /*@ ghost ubuf []byte @*/ ) (res path.InfoField, r error) { @@ -285,72 +382,135 @@ func (s *Raw) GetCurrentInfoField( /*@ ghost ubuf []byte @*/ ) (res path.InfoFie //@ assume 0 <= idx //@ fold acc(s.Base.Mem(), R10) //@ fold acc(s.Mem(ubuf), R9) + //@ assert forall res path.InfoField :: { s.CorrectlyDecodedInf(ubuf, res) } s.GetBase(ubuf).ValidCurrInfSpec() ==> + //@ reveal s.CorrectlyDecodedInf(ubuf, res) == reveal s.CorrectlyDecodedInfWithIdx(ubuf, idx, res) return s.GetInfoField(idx /*@, ubuf @*/) } // SetInfoField updates the InfoField at a given index. -// @ requires 0 <= idx -// @ preserves acc(s.Mem(ubuf), R20) -// @ preserves sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) -// @ ensures r != nil ==> r.ErrorMem() +// @ requires 0 <= idx +// @ requires sl.Bytes(ubuf, 0, len(ubuf)) +// @ requires acc(s.Mem(ubuf), R20) +// pres for IO: +// @ requires validPktMetaHdr(ubuf) +// @ requires s.GetBase(ubuf).EqAbsHeader(ubuf) +// @ ensures acc(s.Mem(ubuf), R20) +// @ ensures sl.Bytes(ubuf, 0, len(ubuf)) +// @ ensures r != nil ==> r.ErrorMem() +// posts for IO: +// @ ensures r == nil ==> +// @ validPktMetaHdr(ubuf) && s.GetBase(ubuf).EqAbsHeader(ubuf) +// @ ensures r == nil && idx == int(old(s.GetCurrINF(ubuf))) ==> +// @ let oldPkt := old(s.absPkt(ubuf)) in +// @ let newPkt := oldPkt.UpdateInfoField(info.ToAbsInfoField()) in +// @ s.absPkt(ubuf) == newPkt // @ decreases +// @ #backend[exhaleMode(1)] func (s *Raw) SetInfoField(info path.InfoField, idx int /*@, ghost ubuf []byte @*/) (r error) { //@ share info - //@ unfold acc(s.Mem(ubuf), R20) - //@ unfold acc(s.Base.Mem(), R20) + //@ reveal validPktMetaHdr(ubuf) + //@ unfold acc(s.Mem(ubuf), R50) + //@ unfold acc(s.Base.Mem(), R50) + //@ currInfIdx := int(s.PathMeta.CurrINF) + //@ currHfIdx := int(s.PathMeta.CurrHF) + //@ seg1Len := int(s.PathMeta.SegLen[0]) + //@ seg2Len := int(s.PathMeta.SegLen[1]) + //@ seg3Len := int(s.PathMeta.SegLen[2]) + //@ segLens := io.CombineSegLens(seg1Len, seg2Len, seg3Len) + //@ segLen := segLens.LengthOfCurrSeg(currHfIdx) + //@ prevSegLen := segLens.LengthOfPrevSeg(currHfIdx) + //@ offset := HopFieldOffset(s.Base.NumINF, prevSegLen, MetaLen) + //@ hopfieldOffset := MetaLen + s.NumINF*path.InfoLen if idx >= s.NumINF { err := serrors.New("InfoField index out of bounds", "max", s.NumINF-1, "actual", idx) - //@ fold acc(s.Base.Mem(), R20) - //@ fold acc(s.Mem(ubuf), R20) + //@ fold acc(s.Base.Mem(), R50) + //@ fold acc(s.Mem(ubuf), R50) return err } infOffset := MetaLen + idx*path.InfoLen - //@ sl.SplitRange_Bytes(ubuf, 0, len(s.Raw), writePerm) - //@ assert sl.AbsSlice_Bytes(s.Raw, 0, len(s.Raw)) - //@ sl.SplitRange_Bytes(s.Raw, infOffset, infOffset+path.InfoLen, writePerm) + + //@ SliceBytesIntoInfoFields(ubuf, s.NumINF, segLens, HalfPerm) + //@ SliceBytesIntoSegments(ubuf, segLens, R40) + + //@ ValidPktMetaHdrSublice(ubuf, MetaLen) + //@ oldInfo := path.BytesToAbsInfoField(ubuf[infOffset : infOffset+path.InfoLen], 0) + //@ newInfo := info.ToAbsInfoField() + //@ hfIdxSeg := currHfIdx-prevSegLen + //@ hopfields := ubuf[offset:offset + segLen*path.HopLen] + //@ ghost if idx == currInfIdx { + //@ CurrSegEquality(ubuf, offset, currInfIdx, hfIdxSeg, segLen) + //@ LeftSegEquality(ubuf, currInfIdx+1, segLens) + //@ MidSegEquality(ubuf, currInfIdx+2, segLens) + //@ RightSegEquality(ubuf, currInfIdx-1, segLens) + //@ } + //@ reveal s.absPkt(ubuf) + //@ sl.SplitRange_Bytes(ubuf[:hopfieldOffset], infOffset, infOffset+path.InfoLen, R40) + //@ sl.SplitRange_Bytes(ubuf, infOffset, infOffset+path.InfoLen, HalfPerm-R40) ret := info.SerializeTo(s.Raw[infOffset : infOffset+path.InfoLen]) - //@ sl.CombineRange_Bytes(s.Raw, infOffset, infOffset+path.InfoLen, writePerm) - //@ sl.CombineRange_Bytes(ubuf, 0, len(s.Raw), writePerm) - //@ fold acc(s.Base.Mem(), R20) - //@ fold acc(s.Mem(ubuf), R20) + //@ sl.CombineRange_Bytes(ubuf[:hopfieldOffset], infOffset, infOffset+path.InfoLen, R40) + //@ sl.CombineRange_Bytes(ubuf, infOffset, infOffset+path.InfoLen, HalfPerm-R40) + //@ ValidPktMetaHdrSublice(ubuf, MetaLen) + //@ assert reveal validPktMetaHdr(ubuf) + //@ ghost if idx == currInfIdx { + //@ CurrSegEquality(ubuf, offset, currInfIdx, hfIdxSeg, segLen) + //@ UpdateCurrSegInfo(hopfields, hfIdxSeg, segLen, oldInfo, newInfo) + //@ LeftSegEquality(ubuf, currInfIdx+1, segLens) + //@ MidSegEquality(ubuf, currInfIdx+2, segLens) + //@ RightSegEquality(ubuf, currInfIdx-1, segLens) + //@ reveal s.absPkt(ubuf) + //@ } + //@ CombineBytesFromSegments(ubuf, segLens, R40) + //@ CombineBytesFromInfoFields(ubuf, s.NumINF, segLens, HalfPerm) + //@ fold acc(s.Base.Mem(), R50) + //@ fold acc(s.Mem(ubuf), R50) return ret } // GetHopField returns the HopField at a given index. // @ requires 0 <= idx +// @ preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R10) // @ preserves acc(s.Mem(ubuf), R10) -// @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R10) -// @ ensures (idx < old(s.GetNumHops(ubuf))) == (r == nil) +// @ ensures (idx < s.GetNumHops(ubuf)) == (r == nil) +// @ ensures r == nil ==> s.CorrectlyDecodedHfWithIdx(ubuf, idx, res) // @ ensures r != nil ==> r.ErrorMem() // @ decreases func (s *Raw) GetHopField(idx int /*@, ghost ubuf []byte @*/) (res path.HopField, r error) { - //@ unfold acc(s.Mem(ubuf), R10) - //@ unfold acc(s.Base.Mem(), R11) + //@ unfold acc(s.Mem(ubuf), R11) + //@ unfold acc(s.Base.Mem(), R12) if idx >= s.NumHops { err := serrors.New("HopField index out of bounds", "max", s.NumHops-1, "actual", idx) - //@ fold acc(s.Base.Mem(), R11) - //@ fold acc(s.Mem(ubuf), R10) + //@ fold acc(s.Base.Mem(), R12) + //@ fold acc(s.Mem(ubuf), R11) return path.HopField{}, err } hopOffset := MetaLen + s.NumINF*path.InfoLen + idx*path.HopLen - //@ fold acc(s.Base.Mem(), R11) - //@ fold acc(s.Mem(ubuf), R10) + //@ fold acc(s.Base.Mem(), R12) hop /*@@@*/ := path.HopField{} - //@ s.RawRangePerm(ubuf, hopOffset, hopOffset+path.HopLen, R10) + //@ sl.SplitRange_Bytes(ubuf, hopOffset, hopOffset+path.HopLen, R20) if err := hop.DecodeFromBytes(s.Raw[hopOffset : hopOffset+path.HopLen]); err != nil { //@ Unreachable() return path.HopField{}, err } - //@ s.UndoRawRangePerm(ubuf, hopOffset, hopOffset+path.HopLen, R10) //@ unfold hop.Mem() + //@ sl.CombineRange_Bytes(ubuf, hopOffset, hopOffset+path.HopLen, R21) + //@ unfold acc(sl.Bytes(ubuf, 0, len(ubuf)), R56) + //@ unfold acc(sl.Bytes(ubuf[hopOffset : hopOffset+path.HopLen], 0, path.HopLen), R56) + //@ assert hop.ToIO_HF() == + //@ path.BytesToIO_HF(ubuf, 0, hopOffset, len(ubuf)) + //@ fold acc(sl.Bytes(ubuf, 0, len(ubuf)), R56) + //@ fold acc(sl.Bytes(ubuf[hopOffset : hopOffset+path.HopLen], 0, path.HopLen), R56) + //@ sl.CombineRange_Bytes(ubuf, hopOffset, hopOffset+path.HopLen, R21) + //@ fold acc(s.Mem(ubuf), R11) + //@ assert reveal s.CorrectlyDecodedHfWithIdx(ubuf, idx, hop) return hop, nil } // GetCurrentHopField is a convenience method that returns the current hop field pointed to by the // CurrHF index in the path meta header. // @ preserves acc(s.Mem(ubuf), R8) -// @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R1) -// @ ensures (r == nil) == (s.GetCurrHF(ubuf) < s.GetNumHops(ubuf)) +// @ preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R9) +// @ ensures (r == nil) == s.GetBase(ubuf).ValidCurrHfSpec() +// @ ensures r == nil ==> s.CorrectlyDecodedHf(ubuf, res) // @ ensures r != nil ==> r.ErrorMem() // @ decreases func (s *Raw) GetCurrentHopField( /*@ ghost ubuf []byte @*/ ) (res path.HopField, r error) { @@ -362,25 +522,45 @@ func (s *Raw) GetCurrentHopField( /*@ ghost ubuf []byte @*/ ) (res path.HopField //@ assume 0 <= idx //@ fold acc(s.Base.Mem(), R10) //@ fold acc(s.Mem(ubuf), R9) + //@ assert forall res path.HopField :: { s.CorrectlyDecodedHf(ubuf, res) } s.GetBase(ubuf).ValidCurrHfSpec() ==> + //@ reveal s.CorrectlyDecodedHf(ubuf, res) == reveal s.CorrectlyDecodedHfWithIdx(ubuf, idx, res) return s.GetHopField(idx /*@, ubuf @*/) } // SetHopField updates the HopField at a given index. // @ requires 0 <= idx -// @ preserves acc(s.Mem(ubuf), R20) -// @ preserves sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) -// @ ensures r != nil ==> r.ErrorMem() +// @ requires acc(s.Mem(ubuf), R20) +// @ requires sl.Bytes(ubuf, 0, len(ubuf)) +// pres for IO: +// @ requires validPktMetaHdr(ubuf) +// @ requires s.GetBase(ubuf).EqAbsHeader(ubuf) +// @ requires s.absPkt(ubuf).PathNotFullyTraversed() +// @ ensures acc(s.Mem(ubuf), R20) +// @ ensures sl.Bytes(ubuf, 0, len(ubuf)) +// @ ensures r != nil ==> r.ErrorMem() +// posts for IO: +// @ ensures r == nil ==> +// @ validPktMetaHdr(ubuf) && +// @ s.GetBase(ubuf).EqAbsHeader(ubuf) +// @ ensures r == nil && idx == int(old(s.GetCurrHF(ubuf))) ==> +// @ let oldPkt := old(s.absPkt(ubuf)) in +// @ let newPkt := oldPkt.UpdateHopField(hop.ToIO_HF()) in +// @ s.absPkt(ubuf) == newPkt // @ decreases +// @ #backend[exhaleMode(1)] func (s *Raw) SetHopField(hop path.HopField, idx int /*@, ghost ubuf []byte @*/) (r error) { - //@ share hop + // (VerifiedSCION) Due to an incompleteness (https://github.com/viperproject/gobra/issues/770), + // we introduce a temporary variable to be able to call `path.AbsMacArrayCongruence()`. + tmpHopField /*@@@*/ := hop + //@ path.AbsMacArrayCongruence(hop.Mac, tmpHopField.Mac) // (VerifiedSCION) Cannot assert bounds of uint: // https://github.com/viperproject/gobra/issues/192 - //@ assume 0 <= hop.ConsIngress && 0 <= hop.ConsEgress - //@ fold hop.Mem() + //@ assume 0 <= tmpHopField.ConsIngress && 0 <= tmpHopField.ConsEgress + //@ fold acc(tmpHopField.Mem(), R9) //@ unfold acc(s.Mem(ubuf), R20) //@ unfold acc(s.Base.Mem(), R20) if idx >= s.NumHops { - // (gavin) introduced `err` + // (VerifiedSCION) introduced `err` err := serrors.New("HopField index out of bounds", "max", s.NumHops-1, "actual", idx) //@ fold acc(s.Base.Mem(), R20) //@ fold acc(s.Mem(ubuf), R20) @@ -388,13 +568,21 @@ func (s *Raw) SetHopField(hop path.HopField, idx int /*@, ghost ubuf []byte @*/) } hopOffset := MetaLen + s.NumINF*path.InfoLen + idx*path.HopLen //@ sl.SplitRange_Bytes(ubuf, 0, len(s.Raw), writePerm) - //@ assert sl.AbsSlice_Bytes(s.Raw, 0, len(s.Raw)) + //@ assert sl.Bytes(s.Raw, 0, len(s.Raw)) //@ sl.SplitRange_Bytes(s.Raw, hopOffset, hopOffset+path.HopLen, writePerm) - ret := hop.SerializeTo(s.Raw[hopOffset : hopOffset+path.HopLen]) + ret := tmpHopField.SerializeTo(s.Raw[hopOffset : hopOffset+path.HopLen]) //@ sl.CombineRange_Bytes(s.Raw, hopOffset, hopOffset+path.HopLen, writePerm) //@ sl.CombineRange_Bytes(ubuf, 0, len(s.Raw), writePerm) //@ fold acc(s.Base.Mem(), R20) //@ fold acc(s.Mem(ubuf), R20) + // (VerifiedSCION) The proof for these assumptions is provided in PR #361 + // (https://github.com/viperproject/VerifiedSCION/pull/361), which will + // be merged once the performance issues are resolved. + //@ TemporaryAssumeForIO(validPktMetaHdr(ubuf) && s.GetBase(ubuf).EqAbsHeader(ubuf)) + //@ TemporaryAssumeForIO(idx == int(old(s.GetCurrHF(ubuf))) ==> + //@ let oldPkt := old(s.absPkt(ubuf)) in + //@ let newPkt := oldPkt.UpdateHopField(hop.ToIO_HF()) in + //@ s.absPkt(ubuf) == newPkt) return ret } @@ -418,12 +606,26 @@ func (s *Raw) IsPenultimateHop( /*@ ghost ubuf []byte @*/ ) bool { } // IsLastHop returns whether the current hop is the last hop on the path. -// @ preserves acc(s.Mem(ubuf), R20) +// @ preserves acc(s.Mem(ubuf), R40) +// @ ensures res == s.IsLastHopSpec(ubuf) // @ decreases -func (s *Raw) IsLastHop( /*@ ghost ubuf []byte @*/ ) bool { - //@ unfold acc(s.Mem(ubuf), R20) - //@ defer fold acc(s.Mem(ubuf), R20) - //@ unfold acc(s.Base.Mem(), R20) - //@ defer fold acc(s.Base.Mem(), R20) +func (s *Raw) IsLastHop( /*@ ghost ubuf []byte @*/ ) (res bool) { + //@ unfold acc(s.Mem(ubuf), R40) + //@ defer fold acc(s.Mem(ubuf), R40) + //@ unfold acc(s.Base.Mem(), R40) + //@ defer fold acc(s.Base.Mem(), R40) return int(s.PathMeta.CurrHF) == (s.NumHops - 1) } + +// CurrINFMatchesCurrHF returns whether the the path's current hopfield +// is in the path's current segment. +// @ preserves acc(s.Mem(ub), R40) +// @ ensures res == s.GetBase(ub).CurrInfMatchesCurrHFSpec() +// @ decreases +func (s *Raw) CurrINFMatchesCurrHF( /*@ ghost ub []byte @*/ ) (res bool) { + // @ unfold acc(s.Mem(ub), R40) + // @ defer fold acc(s.Mem(ub), R40) + // @ unfold acc(s.Base.Mem(), R40) + // @ defer fold acc(s.Base.Mem(), R40) + return s.PathMeta.CurrINF == s.infIndexForHF(s.PathMeta.CurrHF) +} diff --git a/pkg/slayers/path/scion/raw_spec.gobra b/pkg/slayers/path/scion/raw_spec.gobra index da1a0d05a..9bf4537db 100644 --- a/pkg/slayers/path/scion/raw_spec.gobra +++ b/pkg/slayers/path/scion/raw_spec.gobra @@ -18,8 +18,10 @@ package scion import ( "github.com/scionproto/scion/pkg/slayers/path" - . "github.com/scionproto/scion/verification/utils/definitions" - sl "github.com/scionproto/scion/verification/utils/slices" + . "verification/utils/definitions" + sl "verification/utils/slices" + "verification/dependencies/encoding/binary" + "verification/io" ) /**** Predicates ****/ @@ -39,6 +41,17 @@ pred (s *Raw) Mem(buf []byte) { (*Raw) implements path.Path +ghost +pure +requires acc(s.Mem(buf), _) +requires acc(sl.Bytes(buf, 0, len(buf)), R42) +decreases +func (s *Raw) IsValidResultOfDecoding(buf []byte, err error) (res bool) { + return let base := s.GetBase(buf) in + base.EqAbsHeader(buf) && + base.WeaklyValid() +} + /**** Stubs ****/ /** * This method is not part of the original SCION codebase. @@ -60,15 +73,22 @@ func (s *Raw) Type(ghost buf []byte) (t path.Type) { * Unfortunately, Gobra does not fully support them yet, so we * introduced this wrapper method which acts as a wrapper. */ -pure -requires acc(s.Mem(buf), _) -ensures unfolding acc(s.Mem(buf), _) in l == s.Base.Len() -ensures l >= 0 +preserves acc(s.Mem(buf), R50) +ensures l == s.LenSpec(buf) decreases func (s *Raw) Len(ghost buf []byte) (l int) { return unfolding acc(s.Mem(buf), _) in s.Base.Len() } +ghost +pure +requires acc(s.Mem(ub), _) +ensures unfolding acc(s.Mem(ub), _) in l == s.Base.Len() +decreases +func (s *Raw) LenSpec(ghost ub []byte) (l int) { + return unfolding acc(s.Mem(ub), _) in s.Base.Len() +} + /** * This method is not part of the original SCION codebase. * Instead, `IsFirstHopAfterXover` was defined in `*Base` via embedded structs. @@ -91,37 +111,14 @@ func (s *Raw) IsFirstHopAfterXover(ghost ub []byte) (res bool) { * introduced this wrapper method which acts as a wrapper. */ preserves acc(s.Mem(ub), R9) +ensures res == s.GetBase(ub).IsXoverSpec() decreases -func (s *Raw) IsXover(ghost ub []byte) bool { +func (s *Raw) IsXover(ghost ub []byte) (res bool) { unfold acc(s.Mem(ub), R9) defer fold acc(s.Mem(ub), R9) return s.Base.IsXover() } -ghost -requires acc(s.Mem(ub), _) -decreases -pure func (s *Raw) ValidCurrINF(ghost ub []byte) bool { - return unfolding acc(s.Mem(ub), _) in - s.Base.ValidCurrINF() -} - -ghost -requires acc(s.Mem(ub), _) -decreases -pure func (s *Raw) ValidCurrHF(ghost ub []byte) bool { - return unfolding acc(s.Mem(ub), _) in - s.Base.ValidCurrHF() -} - -ghost -requires acc(s.Mem(ub), _) -decreases -pure func (s *Raw) ValidCurrIdxs(ghost ub []byte) bool { - return unfolding acc(s.Mem(ub), _) in - s.Base.ValidCurrIdxs() -} - /**** End of Stubs ****/ /**** Lemmas ****/ @@ -138,149 +135,6 @@ func (s *Raw) DowngradePerm(buf []byte) { fold s.NonInitMem() } -/******** Lemma: RawPerm ********/ -pred (r *Raw) RawPermRemainder(ubuf []byte, p perm) { - 0 < p && - acc(r.Base.Mem(), p/2) && - acc(&r.Raw, p/2) && - len(r.Raw) <= len(ubuf) && - r.Raw === ubuf[:len(r.Raw)] && - acc(sl.AbsSlice_Bytes(ubuf, len(r.Raw), len(ubuf)), p) && - len(r.Raw) == r.Base.Len() -} - -ghost -requires 0 < p -requires acc(&r.Raw, p/2) && acc(sl.AbsSlice_Bytes(r.Raw, 0, len(r.Raw)), p) && acc(r.Base.Mem(), p/2) -requires r.RawPermRemainder(ubuf, p) -ensures acc(r.Mem(ubuf), p) -ensures acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), p) -decreases -func (r *Raw) UndoRawPerm(ubuf []byte, p perm) { - unfold r.RawPermRemainder(ubuf, p) - sl.Unslice_Bytes(ubuf, 0, len(r.Raw), p) - sl.CombineAtIndex_Bytes(ubuf, 0, len(ubuf), len(r.Raw), p) - fold acc(r.Mem(ubuf), p) -} - -ghost -requires 0 < p -requires acc(r.Mem(ubuf), p) -requires acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), p) -ensures acc(&r.Raw, p/2) -ensures acc(sl.AbsSlice_Bytes(r.Raw, 0, len(r.Raw)), p) -ensures acc(r.Base.Mem(), p/2) -ensures r.RawPermRemainder(ubuf, p) -ensures r.Raw === old(unfolding acc(r.Mem(ubuf), p) in r.Raw) -decreases -func (r *Raw) RawPerm(ubuf []byte, p perm) { - unfold acc(r.Mem(ubuf), p) - sl.SplitByIndex_Bytes(ubuf, 0, len(ubuf), len(r.Raw), p) - sl.Reslice_Bytes(ubuf, 0, len(r.Raw), p) - fold r.RawPermRemainder(ubuf, p) -} -/******** End of Lemma: RawPerm ********/ - -/******** Lemma: RawIdxPerm ********/ -pred (r *Raw) RawIdxPermRemainder(ubuf []byte, idx int, p perm) { - 0 < p && - acc(r.Base.Mem(), p/2) && - acc(&r.Raw, p/2) && - len(r.Raw) <= len(ubuf) && - r.Raw === ubuf[:len(r.Raw)] && - acc(sl.AbsSlice_Bytes(ubuf, idx, len(ubuf)), p) && - len(r.Raw) == r.Base.Len() && - idx <= len(r.Raw) -} - -ghost -requires 0 < p -requires acc(&r.Raw, p/2) -requires 0 <= idx && idx <= len(r.Raw) -requires acc(sl.AbsSlice_Bytes(r.Raw[:idx], 0, idx), p) && acc(r.Base.Mem(), p/2) -requires r.RawIdxPermRemainder(ubuf, idx, p) -ensures acc(r.Mem(ubuf), p) -ensures acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), p) -decreases -func (r *Raw) UndoRawIdxPerm(ubuf []byte, idx int, p perm) { - unfold r.RawIdxPermRemainder(ubuf, idx, p) - sl.Unslice_Bytes(ubuf, 0, idx, p) - sl.CombineAtIndex_Bytes(ubuf, 0, len(ubuf), idx, p) - fold acc(r.Mem(ubuf), p) -} - -ghost -requires 0 < p -requires acc(r.Mem(ubuf), p) -requires acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), p) -requires 0 <= idx && idx <= unfolding acc(r.Mem(ubuf), p) in len(r.Raw) -ensures acc(&r.Raw, p/2) -ensures r.Raw === old(unfolding acc(r.Mem(ubuf), p) in r.Raw) -ensures acc(sl.AbsSlice_Bytes(r.Raw[:idx], 0, idx), p) && acc(r.Base.Mem(), p/2) -ensures r.RawIdxPermRemainder(ubuf, idx, p) -decreases -func (r *Raw) RawIdxPerm(ubuf []byte, idx int, p perm) { - unfold acc(r.Mem(ubuf), p) - sl.SplitByIndex_Bytes(ubuf, 0, len(ubuf), idx, p) - sl.Reslice_Bytes(ubuf, 0, idx, p) - fold r.RawIdxPermRemainder(ubuf, idx, p) -} -/******** End of Lemma: RawIdxPerm ********/ - -/******** Lemma: RawRangePerm ********/ -pred (r *Raw) RawRangePermRemainder(ubuf []byte, start, end int, p perm) { - 0 < p && - acc(r.Base.Mem(), p) && - acc(&r.Raw, p/2) && - 0 <= start && start <= end && end <= len(r.Raw) && - len(r.Raw) <= len(ubuf) && - r.Raw === ubuf[:len(r.Raw)] && - acc(sl.AbsSlice_Bytes(r.Raw, 0, start), p) && - acc(sl.AbsSlice_Bytes(r.Raw, end, len(r.Raw)), p) && - acc(sl.AbsSlice_Bytes(ubuf, len(r.Raw), len(ubuf)), p) && - len(r.Raw) == r.Base.Len() -} - -ghost -requires 0 < p -requires acc(&r.Raw, p/2) -requires 0 <= start && start <= end && end <= len(r.Raw) -requires acc(sl.AbsSlice_Bytes(r.Raw[start:end], 0, end-start), p) -requires r.RawRangePermRemainder(ubuf, start, end, p) -ensures acc(r.Mem(ubuf), p) -ensures acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), p) -decreases -func (r *Raw) UndoRawRangePerm(ubuf []byte, start, end int, p perm) { - unfold r.RawRangePermRemainder(ubuf, start, end, p) - sl.Unslice_Bytes(r.Raw, start, end, p) - sl.CombineAtIndex_Bytes(r.Raw, 0, end, start, p) - sl.CombineAtIndex_Bytes(r.Raw, 0, len(r.Raw), end, p) - fold r.RawPermRemainder(ubuf, p) - r.UndoRawPerm(ubuf, p) -} - -// Notice that no permission to r.Base.Mem() is provided, unlike the previous methods -ghost -requires 0 < p -requires acc(r.Mem(ubuf), p) -requires acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), p) -requires 0 <= start && start <= end && end <= unfolding acc(r.Mem(ubuf), p) in len(r.Raw) -ensures acc(&r.Raw, p/2) -ensures r.Raw === old(unfolding acc(r.Mem(ubuf), p) in r.Raw) -ensures acc(sl.AbsSlice_Bytes(r.Raw[start:end], 0, end-start), p) -ensures r.RawRangePermRemainder(ubuf, start, end, p) -decreases -func (r *Raw) RawRangePerm(ubuf []byte, start, end int, p perm) { - unfold acc(r.Mem(ubuf), p) - sl.SplitByIndex_Bytes(ubuf, 0, len(ubuf), len(r.Raw), p) - sl.Reslice_Bytes(ubuf, 0, len(r.Raw), p) - sl.SplitByIndex_Bytes(r.Raw, 0, len(r.Raw), start, p) - sl.SplitByIndex_Bytes(r.Raw, start, len(r.Raw), end, p) - sl.Reslice_Bytes(r.Raw, start, end, p) - fold r.RawRangePermRemainder(ubuf, start, end, p) -} -/******** End of Lemma: RawRangePerm ********/ - ghost requires r.Mem(ubuf1) requires len(ubuf1) <= len(ubuf2) @@ -295,6 +149,13 @@ func (r *Raw) Widen(ubuf1, ubuf2 []byte) { /**** End of Lemmas ****/ /**** Start of helpful pure functions ****/ +ghost +requires acc(r.Mem(ub), _) +decreases +pure func (r *Raw) GetBase(ub []byte) Base { + return unfolding acc(r.Mem(ub), _) in r.Base.GetBase() +} + ghost requires acc(r.Mem(ub), _) decreases @@ -320,7 +181,8 @@ ghost requires acc(r.Mem(ub), _) decreases pure func (r *Raw) GetCurrHF(ghost ub []byte) uint8 { - return unfolding acc(r.Mem(ub), _) in (unfolding acc(r.Base.Mem(), _) in r.PathMeta.CurrHF) + return unfolding acc(r.Mem(ub), _) in + (unfolding acc(r.Base.Mem(), _) in r.PathMeta.CurrHF) } ghost @@ -338,4 +200,688 @@ decreases func (s *Raw) RawBufferNonInitMem() []byte { return unfolding acc(s.NonInitMem(), _) in s.Raw } -/**** End of helpful pure functions ****/ \ No newline at end of file +/**** End of helpful pure functions ****/ + +ghost +decreases +pure func HopFieldOffset(numINF int, currHF int, headerOffset int) int { + return path.InfoFieldOffset(numINF, headerOffset) + path.HopLen * currHF +} + +ghost +decreases +pure func PktLen(segs io.SegLens, headerOffset int) int { + return HopFieldOffset(segs.NumInfoFields(), 0, headerOffset) + + path.HopLen * segs.TotalHops() +} + +ghost +requires 0 <= offset +requires 0 <= currHfIdx && currHfIdx <= segLen +requires offset + path.HopLen * segLen <= len(raw) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) +ensures len(res) == segLen - currHfIdx +decreases segLen - currHfIdx +pure func hopFields( + raw []byte, + offset int, + currHfIdx int, + segLen int) (res seq[io.IO_HF]) { + return currHfIdx == segLen ? seq[io.IO_HF]{} : + let hf := path.BytesToIO_HF(raw, 0, offset + path.HopLen * currHfIdx, len(raw)) in + seq[io.IO_HF]{hf} ++ hopFields(raw, offset, currHfIdx + 1, segLen) +} + +ghost +ensures len(res) == len(hopfields) +decreases len(hopfields) +pure func segPast(hopfields seq[io.IO_HF]) (res seq[io.IO_HF]) { + return len(hopfields) == 0 ? seq[io.IO_HF]{} : + seq[io.IO_HF]{hopfields[len(hopfields)-1]} ++ segPast( + hopfields[:len(hopfields)-1]) +} + +ghost +ensures len(res) == len(hopfields) +decreases len(hopfields) +pure func segHistory(hopfields seq[io.IO_HF]) (res seq[io.IO_ahi]) { + return len(hopfields) == 0 ? seq[io.IO_ahi]{} : + seq[io.IO_ahi]{hopfields[len(hopfields)-1].Toab()} ++ segHistory( + hopfields[:len(hopfields)-1]) +} + +ghost +requires 0 <= offset +requires 0 < segLen +requires 0 <= currHfIdx && currHfIdx <= segLen +requires offset + path.HopLen * segLen <= len(raw) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) +ensures len(res.Future) == segLen - currHfIdx +ensures len(res.History) == currHfIdx +ensures len(res.Past) == currHfIdx +decreases +pure func segment(raw []byte, + offset int, + currHfIdx int, + ainfo io.IO_ainfo, + uinfo set[io.IO_msgterm], + consDir bool, + peer bool, + segLen int) (res io.IO_seg2) { + return let hopfields := hopFields(raw, offset, 0, segLen) in + io.IO_seg3_ { + AInfo :ainfo, + UInfo : uinfo, + ConsDir : consDir, + Peer : peer, + Past : segPast(hopfields[:currHfIdx]), + Future : hopfields[currHfIdx:], + History : segHistory(hopfields[:currHfIdx]), + } +} + +ghost +opaque +requires 0 <= headerOffset +requires path.InfoFieldOffset(currInfIdx, headerOffset) + path.InfoLen <= offset +requires 0 < segLen +requires offset + path.HopLen * segLen <= len(raw) +requires 0 <= currHfIdx && currHfIdx <= segLen +requires 0 <= currInfIdx && currInfIdx < 3 +requires acc(sl.Bytes(raw, 0, len(raw)), R56) +decreases +pure func CurrSeg(raw []byte, + offset int, + currInfIdx int, + currHfIdx int, + segLen int, + headerOffset int) io.IO_seg3 { + return let ainfo := path.Timestamp(raw, currInfIdx, headerOffset) in + let consDir := path.ConsDir(raw, currInfIdx, headerOffset) in + let peer := path.Peer(raw, currInfIdx, headerOffset) in + let uinfo := path.AbsUinfo(raw, currInfIdx, headerOffset) in + segment(raw, offset, currHfIdx, ainfo, uinfo, consDir, peer, segLen) +} + +ghost +opaque +requires 0 <= headerOffset +requires segs.Valid() +requires PktLen(segs, headerOffset) <= len(raw) +requires 1 <= currInfIdx && currInfIdx < 4 +requires acc(sl.Bytes(raw, 0, len(raw)), R56) +decreases +pure func LeftSeg( + raw []byte, + currInfIdx int, + segs io.SegLens, + headerOffset int) option[io.IO_seg3] { + return let offset := HopFieldOffset(segs.NumInfoFields(), 0, headerOffset) in + (currInfIdx == 1 && segs.Seg2Len > 0) ? + some(CurrSeg(raw, offset + path.HopLen * segs.Seg1Len, currInfIdx, 0, segs.Seg2Len, headerOffset)) : + ((currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + some(CurrSeg(raw, offset + path.HopLen * (segs.Seg1Len + segs.Seg2Len), currInfIdx, 0, segs.Seg3Len, headerOffset)) : + none[io.IO_seg3]) +} + +ghost +opaque +requires 0 <= headerOffset +requires segs.Valid() +requires PktLen(segs, headerOffset) <= len(raw) +requires -1 <= currInfIdx && currInfIdx < 2 +requires acc(sl.Bytes(raw, 0, len(raw)), R56) +decreases +pure func RightSeg( + raw []byte, + currInfIdx int, + segs io.SegLens, + headerOffset int) option[io.IO_seg3] { + return let offset := HopFieldOffset(segs.NumInfoFields(), 0, headerOffset) in + (currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + some(CurrSeg(raw, offset + path.HopLen * segs.Seg1Len, currInfIdx, segs.Seg2Len, segs.Seg2Len, headerOffset)) : + (currInfIdx == 0 && segs.Seg2Len > 0) ? + some(CurrSeg(raw, offset, currInfIdx, segs.Seg1Len, segs.Seg1Len, headerOffset)) : + none[io.IO_seg3] +} + +ghost +opaque +requires 0 <= headerOffset +requires segs.Valid() +requires PktLen(segs, headerOffset) <= len(raw) +requires 2 <= currInfIdx && currInfIdx < 5 +requires acc(sl.Bytes(raw, 0, len(raw)), R56) +decreases +pure func MidSeg( + raw []byte, + currInfIdx int, + segs io.SegLens, + headerOffset int) option[io.IO_seg3] { + return let offset := HopFieldOffset(segs.NumInfoFields(), 0, headerOffset) in + (currInfIdx == 4 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + some(CurrSeg(raw, offset, 0, segs.Seg1Len, segs.Seg1Len, headerOffset)) : + ((currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0) ? + some(CurrSeg(raw, offset + path.HopLen * (segs.Seg1Len + segs.Seg2Len), currInfIdx, 0, segs.Seg3Len, headerOffset)) : + none[io.IO_seg3]) +} + +ghost +opaque +requires acc(sl.Bytes(raw, 0, len(raw)), R56) +requires validPktMetaHdr(raw) +decreases +pure func (s *Raw) absPkt(raw []byte) (res io.IO_pkt2) { + return let _ := reveal validPktMetaHdr(raw) in + let metaHdr := RawBytesToMetaHdr(raw) in + let currInfIdx := int(metaHdr.CurrINF) in + let currHfIdx := int(metaHdr.CurrHF) in + let seg1Len := int(metaHdr.SegLen[0]) in + let seg2Len := int(metaHdr.SegLen[1]) in + let seg3Len := int(metaHdr.SegLen[2]) in + let segs := io.CombineSegLens(seg1Len, seg2Len, seg3Len) in + let segLen := segs.LengthOfCurrSeg(currHfIdx) in + let prevSegLen := segs.LengthOfPrevSeg(currHfIdx) in + let numINF := segs.NumInfoFields() in + let offset := HopFieldOffset(numINF, prevSegLen, MetaLen) in + io.IO_Packet2 { + CurrSeg : CurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen, segLen, MetaLen), + LeftSeg : LeftSeg(raw, currInfIdx + 1, segs, MetaLen), + MidSeg : MidSeg(raw, currInfIdx + 2, segs, MetaLen), + RightSeg : RightSeg(raw, currInfIdx - 1, segs, MetaLen), + } +} + +ghost +requires MetaLen <= len(raw) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) +decreases +pure func RawBytesToMetaHdr(raw []byte) MetaHdr { + return unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in + let hdr := binary.BigEndian.Uint32(raw[:MetaLen]) in + DecodedFrom(hdr) +} + +ghost +requires MetaLen <= len(raw) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) +decreases +pure func RawBytesToBase(raw []byte) Base { + return let metaHdr := RawBytesToMetaHdr(raw) in + let seg1 := int(metaHdr.SegLen[0]) in + let seg2 := int(metaHdr.SegLen[1]) in + let seg3 := int(metaHdr.SegLen[2]) in + let segs := io.CombineSegLens(seg1, seg2, seg3) in + Base{metaHdr, segs.NumInfoFields(), segs.TotalHops()} +} + +ghost +opaque +requires acc(sl.Bytes(raw, 0, len(raw)), R56) +decreases +pure func validPktMetaHdr(raw []byte) bool { + return MetaLen <= len(raw) && + let metaHdr := RawBytesToMetaHdr(raw) in + let seg1 := int(metaHdr.SegLen[0]) in + let seg2 := int(metaHdr.SegLen[1]) in + let seg3 := int(metaHdr.SegLen[2]) in + let segs := io.CombineSegLens(seg1, seg2, seg3) in + let base := RawBytesToBase(raw) in + 0 < metaHdr.SegLen[0] && + base.Valid() && + PktLen(segs, MetaLen) <= len(raw) +} + +ghost +requires MetaLen <= idx && idx <= len(raw) +preserves acc(sl.Bytes(raw, 0, len(raw)), R56) +preserves acc(sl.Bytes(raw[:idx], 0, idx), R56) +ensures RawBytesToMetaHdr(raw) == RawBytesToMetaHdr(raw[:idx]) +ensures RawBytesToBase(raw) == RawBytesToBase(raw[:idx]) +decreases +func ValidPktMetaHdrSublice(raw []byte, idx int) { + reveal validPktMetaHdr(raw) + reveal validPktMetaHdr(raw[:idx]) + unfold acc(sl.Bytes(raw, 0, len(raw)), R56) + unfold acc(sl.Bytes(raw[:idx], 0, idx), R56) + assert forall i int :: { &raw[:MetaLen][i] } 0 <= i && i < MetaLen ==> + &raw[:MetaLen][i] == &raw[:idx][:MetaLen][i] + fold acc(sl.Bytes(raw, 0, len(raw)), R56) + fold acc(sl.Bytes(raw[:idx], 0, idx), R56) +} + +ghost +requires acc(s.Mem(ub), R54) +requires acc(sl.Bytes(ub, 0, len(ub)), R55) +requires s.GetBase(ub).Valid() +requires s.GetBase(ub).EqAbsHeader(ub) +ensures acc(sl.Bytes(ub, 0, len(ub)), R55) +ensures acc(s.Mem(ub), R54) +ensures validPktMetaHdr(ub) +ensures s.GetBase(ub).EqAbsHeader(ub) +decreases +func (s *Raw) EstablishValidPktMetaHdr(ghost ub []byte) { + unfold acc(s.Mem(ub), R55) + unfold acc(s.Base.Mem(), R56) + assert MetaLen <= len(ub) + assert s.Base.GetBase() == RawBytesToBase(ub) + seg1 := int(s.Base.PathMeta.SegLen[0]) + seg2 := int(s.Base.PathMeta.SegLen[1]) + seg3 := int(s.Base.PathMeta.SegLen[2]) + segs := io.CombineSegLens(seg1, seg2, seg3) + assert 0 < seg1 + assert s.GetBase(ub).NumsCompatibleWithSegLen() + assert PktLen(segs, MetaLen) <= len(ub) + assert reveal validPktMetaHdr(ub) + fold acc(s.Base.Mem(), R56) + fold acc(s.Mem(ub), R55) +} + +ghost +requires oldPkt.LeftSeg != none[io.IO_seg2] +requires oldPkt.PathNotFullyTraversed() +decreases +pure func AbsXover(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { + return io.IO_Packet2 { + get(oldPkt.LeftSeg), + oldPkt.MidSeg, + oldPkt.RightSeg, + some(absIncPathSeg(oldPkt.CurrSeg)), + } +} + +ghost +requires oldPkt.PathNotFullyTraversed() +decreases +pure func AbsIncPath(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { + return io.IO_Packet2 { + absIncPathSeg(oldPkt.CurrSeg), + oldPkt.LeftSeg, + oldPkt.MidSeg, + oldPkt.RightSeg, + } +} + +ghost +requires len(currseg.Future) > 0 +decreases +pure func absIncPathSeg(currseg io.IO_seg3) io.IO_seg3 { + return io.IO_seg3_ { + AInfo: currseg.AInfo, + UInfo: currseg.UInfo, + ConsDir: currseg.ConsDir, + Peer: currseg.Peer, + Past: seq[io.IO_HF]{currseg.Future[0]} ++ currseg.Past, + Future: currseg.Future[1:], + History: seq[io.IO_ahi]{currseg.Future[0].Toab()} ++ currseg.History, + } +} + +ghost +requires acc(s.Mem(ub), _) +decreases +pure func (s *Raw) IsLastHopSpec(ub []byte) bool { + return unfolding acc(s.Mem(ub), _) in + unfolding acc(s.Base.Mem(), _) in + int(s.PathMeta.CurrHF) == (s.NumHops - 1) +} + +ghost +opaque +requires acc(s.Mem(ub), _) +requires 0 <= idx && idx < s.GetNumINF(ub) +requires acc(sl.Bytes(ub, 0, len(ub)), R56) +decreases +pure func (s *Raw) CorrectlyDecodedInfWithIdx(ub []byte, idx int, info path.InfoField) bool { + return unfolding acc(s.Mem(ub), _) in + unfolding acc(s.Base.Mem(), _) in + let infOffset := MetaLen + idx*path.InfoLen in + infOffset+path.InfoLen <= len(ub) && + info.ToAbsInfoField() == + reveal path.BytesToAbsInfoField(ub, infOffset) +} + +ghost +opaque +requires acc(s.Mem(ub), _) +requires s.GetBase(ub).ValidCurrInfSpec() +requires acc(sl.Bytes(ub, 0, len(ub)), R56) +decreases +pure func (s *Raw) CorrectlyDecodedInf(ub []byte, info path.InfoField) bool { + return unfolding acc(s.Mem(ub), _) in + unfolding acc(s.Base.Mem(), _) in + let infOffset := MetaLen + int(s.Base.PathMeta.CurrINF)*path.InfoLen in + infOffset+path.InfoLen <= len(ub) && + info.ToAbsInfoField() == + reveal path.BytesToAbsInfoField(ub, infOffset) +} + +ghost +opaque +requires acc(s.Mem(ub), _) +requires 0 <= idx && idx < s.GetNumHops(ub) +requires acc(sl.Bytes(ub, 0, len(ub)), R56) +decreases +pure func (s *Raw) CorrectlyDecodedHfWithIdx(ub []byte, idx int, hop path.HopField) bool { + return unfolding acc(s.Mem(ub), _) in + unfolding acc(s.Base.Mem(), _) in + let hopOffset := MetaLen + int(s.NumINF)*path.InfoLen + idx*path.HopLen in + hopOffset+path.HopLen <= len(ub) && + hop.ToIO_HF() == path.BytesToIO_HF(ub, 0, hopOffset, len(ub)) +} + +ghost +opaque +requires acc(s.Mem(ub), _) +requires s.GetBase(ub).ValidCurrHfSpec() +requires acc(sl.Bytes(ub, 0, len(ub)), R56) +decreases +pure func (s *Raw) CorrectlyDecodedHf(ub []byte, hop path.HopField) bool { + return unfolding acc(s.Mem(ub), _) in + unfolding acc(s.Base.Mem(), _) in + let hopOffset := MetaLen + int(s.NumINF)*path.InfoLen + + int(s.Base.PathMeta.CurrHF)*path.HopLen in + hopOffset+path.HopLen <= len(ub) && + hop.ToIO_HF() == path.BytesToIO_HF(ub, 0, hopOffset, len(ub)) +} + +ghost +preserves acc(s.Mem(ubuf), R55) +preserves s.IsLastHopSpec(ubuf) +preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R56) +preserves validPktMetaHdr(ubuf) +preserves s.GetBase(ubuf).EqAbsHeader(ubuf) +ensures len(s.absPkt(ubuf).CurrSeg.Future) == 1 +decreases +func (s *Raw) LastHopLemma(ubuf []byte) { + reveal validPktMetaHdr(ubuf) + metaHdr := RawBytesToMetaHdr(ubuf) + currInfIdx := int(metaHdr.CurrINF) + currHfIdx := int(metaHdr.CurrHF) + seg1Len := int(metaHdr.SegLen[0]) + seg2Len := int(metaHdr.SegLen[1]) + seg3Len := int(metaHdr.SegLen[2]) + segs := io.CombineSegLens(seg1Len, seg2Len, seg3Len) + segLen := segs.LengthOfCurrSeg(currHfIdx) + prevSegLen := segs.LengthOfPrevSeg(currHfIdx) + numINF := segs.NumInfoFields() + offset := HopFieldOffset(numINF, prevSegLen, MetaLen) + pkt := reveal s.absPkt(ubuf) + assert pkt.CurrSeg == reveal CurrSeg(ubuf, offset, currInfIdx, currHfIdx-prevSegLen, segLen, MetaLen) + assert len(pkt.CurrSeg.Future) == 1 +} + +ghost +preserves acc(s.Mem(ubuf), R55) +preserves s.GetBase(ubuf).IsXoverSpec() +preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R56) +preserves validPktMetaHdr(ubuf) +preserves s.GetBase(ubuf).EqAbsHeader(ubuf) +ensures s.absPkt(ubuf).LeftSeg != none[io.IO_seg2] +ensures len(s.absPkt(ubuf).CurrSeg.Future) == 1 +decreases +func (s *Raw) XoverLemma(ubuf []byte) { + reveal validPktMetaHdr(ubuf) + metaHdr := RawBytesToMetaHdr(ubuf) + currInfIdx := int(metaHdr.CurrINF) + currHfIdx := int(metaHdr.CurrHF) + seg1Len := int(metaHdr.SegLen[0]) + seg2Len := int(metaHdr.SegLen[1]) + seg3Len := int(metaHdr.SegLen[2]) + segs := io.CombineSegLens(seg1Len, seg2Len, seg3Len) + segLen := segs.LengthOfCurrSeg(currHfIdx) + prevSegLen := segs.LengthOfPrevSeg(currHfIdx) + numINF := segs.NumInfoFields() + offset := HopFieldOffset(numINF, prevSegLen, MetaLen) + pkt := reveal s.absPkt(ubuf) + assert pkt.CurrSeg == reveal CurrSeg(ubuf, offset, currInfIdx, currHfIdx-prevSegLen, segLen, MetaLen) + assert pkt.LeftSeg == reveal LeftSeg(ubuf, currInfIdx + 1, segs, MetaLen) + assert len(pkt.CurrSeg.Future) == 1 + assert pkt.LeftSeg != none[io.IO_seg2] +} + +ghost +opaque +requires pkt.PathNotFullyTraversed() +decreases +pure func (s *Raw) EqAbsHopField(pkt io.IO_pkt2, hop io.IO_HF) bool { + return let currHF := pkt.CurrSeg.Future[0] in + hop == currHF +} + +ghost +opaque +decreases +pure func (s *Raw) EqAbsInfoField(pkt io.IO_pkt2, info io.AbsInfoField) bool { + return let currseg := pkt.CurrSeg in + info.AInfo == currseg.AInfo && + info.UInfo == currseg.UInfo && + info.ConsDir == currseg.ConsDir && + info.Peer == currseg.Peer +} + +ghost +preserves acc(s.Mem(ubuf), R53) +preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R53) +preserves validPktMetaHdr(ubuf) +preserves s.GetBase(ubuf).EqAbsHeader(ubuf) +preserves s.absPkt(ubuf).PathNotFullyTraversed() +preserves s.GetBase(ubuf).ValidCurrInfSpec() +preserves s.GetBase(ubuf).ValidCurrHfSpec() +preserves s.CorrectlyDecodedInf(ubuf, info) +preserves s.CorrectlyDecodedHf(ubuf, hop) +ensures s.EqAbsInfoField(s.absPkt(ubuf), info.ToAbsInfoField()) +ensures s.EqAbsHopField(s.absPkt(ubuf), hop.ToIO_HF()) +decreases +func (s *Raw) DecodingLemma(ubuf []byte, info path.InfoField, hop path.HopField) { + reveal validPktMetaHdr(ubuf) + metaHdr := RawBytesToMetaHdr(ubuf) + currInfIdx := int(metaHdr.CurrINF) + currHfIdx := int(metaHdr.CurrHF) + seg1Len := int(metaHdr.SegLen[0]) + seg2Len := int(metaHdr.SegLen[1]) + seg3Len := int(metaHdr.SegLen[2]) + segs := io.CombineSegLens(seg1Len, seg2Len, seg3Len) + segLen := segs.LengthOfCurrSeg(currHfIdx) + prevSegLen := segs.LengthOfPrevSeg(currHfIdx) + numINF := segs.NumInfoFields() + offset := HopFieldOffset(numINF, prevSegLen, MetaLen) + hfIdxSeg := currHfIdx-prevSegLen + reveal s.CorrectlyDecodedInf(ubuf, info) + reveal s.CorrectlyDecodedHf(ubuf, hop) + pkt := reveal s.absPkt(ubuf) + currseg := reveal CurrSeg(ubuf, offset, currInfIdx, hfIdxSeg, segLen, MetaLen) + hopFields := hopFields(ubuf, offset, 0, segLen) + hopFieldsBytePositionsLemma(ubuf, offset, 0, segLen, R54) + reveal hopFieldsBytePositions(ubuf, offset, 0, segLen, hopFields) + assert currseg.Future[0] == hopFields[hfIdxSeg] + assert hopFields[hfIdxSeg] == + path.BytesToIO_HF(ubuf, 0, offset + path.HopLen * hfIdxSeg, len(ubuf)) + assert currseg.Future[0] == path.BytesToIO_HF(ubuf, 0, offset + path.HopLen * hfIdxSeg, len(ubuf)) + assert reveal s.EqAbsInfoField(s.absPkt(ubuf), info.ToAbsInfoField()) + assert reveal s.EqAbsHopField(s.absPkt(ubuf), hop.ToIO_HF()) +} + +ghost +requires path.InfoFieldOffset(currInfIdx, 0) + path.InfoLen <= offset +requires 0 < segLen +requires offset + path.HopLen * segLen <= len(raw) +requires 0 <= currHfIdx && currHfIdx < segLen +requires 0 <= currInfIdx && currInfIdx < 3 +preserves acc(sl.Bytes(raw, 0, len(raw)), R56) +ensures len(CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, 0).Future) > 0 +decreases +func LenCurrSeg(raw []byte, offset int, currInfIdx int, currHfIdx int, segLen int) { + reveal CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, 0) +} + +ghost +requires segs.Valid() +requires 0 < segs.Seg2Len +requires PktLen(segs, 0) <= len(raw) +requires 0 <= currInfIdx && currInfIdx < 2 +requires 1 <= currInfIdx ==> 0 < segs.Seg3Len +preserves acc(sl.Bytes(raw, 0, len(raw)), R56) +ensures LeftSeg(raw, currInfIdx+1, segs, 0) != none[io.IO_seg3] +ensures RightSeg(raw, currInfIdx, segs, 0) != none[io.IO_seg3] +decreases +func XoverSegNotNone(raw []byte, currInfIdx int, segs io.SegLens) { + reveal LeftSeg(raw, currInfIdx+1, segs, 0) + reveal RightSeg(raw, currInfIdx, segs, 0) +} + +ghost +requires path.InfoFieldOffset(currInfIdx, 0) + path.InfoLen <= offset +requires 0 < segLen +requires offset + path.HopLen * segLen <= len(raw) +requires 0 <= currHfIdx && currHfIdx < segLen +requires 0 <= currInfIdx && currInfIdx < 3 +preserves acc(sl.Bytes(raw, 0, len(raw)), R56) +preserves len(CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, 0).Future) > 0 +ensures CurrSeg(raw, offset, currInfIdx, currHfIdx+1, segLen, 0) == + absIncPathSeg(CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, 0)) +decreases +func IncCurrSeg(raw []byte, offset int, currInfIdx int, currHfIdx int, segLen int) { + currseg := reveal CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, 0) + incseg := reveal CurrSeg(raw, offset, currInfIdx, currHfIdx+1, segLen, 0) + hf := hopFields(raw, offset, 0, segLen) + hfPast := hf[:currHfIdx+1] + assert hfPast[:len(hfPast)-1] == hf[:currHfIdx] + assert currseg.AInfo == incseg.AInfo + assert currseg.UInfo == incseg.UInfo + assert currseg.ConsDir == incseg.ConsDir + assert currseg.Peer == incseg.Peer + assert seq[io.IO_HF]{currseg.Future[0]} ++ currseg.Past == incseg.Past + assert currseg.Future[1:] == incseg.Future + assert seq[io.IO_ahi]{currseg.Future[0].Toab()} ++ currseg.History == incseg.History + assert incseg == absIncPathSeg(currseg) +} + +ghost +requires segs.Valid() +requires 0 < segs.Seg2Len +requires PktLen(segs, 0) <= len(raw) +requires 1 <= currInfIdx && currInfIdx < 3 +requires 1 == currInfIdx ==> currHfIdx+1 == segs.Seg1Len +requires 2 == currInfIdx ==> 0 < segs.Seg3Len && currHfIdx+1 == segs.Seg1Len + segs.Seg2Len +requires PktLen(segs, 0) <= len(raw) +preserves acc(sl.Bytes(raw, 0, len(raw)), R56) +preserves LeftSeg(raw, currInfIdx, segs, 0) != none[io.IO_seg3] +ensures + let prevSegLen := segs.LengthOfPrevSeg(currHfIdx+1) in + let segLen := segs.LengthOfCurrSeg(currHfIdx+1) in + let numInf := segs.NumInfoFields() in + let offset := HopFieldOffset(numInf, prevSegLen, 0) in + CurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen+1, segLen, 0) == + get(LeftSeg(raw, currInfIdx, segs, 0)) +decreases +func XoverCurrSeg(raw []byte, currInfIdx int, currHfIdx int, segs io.SegLens) { + prevSegLen := segs.LengthOfPrevSeg(currHfIdx+1) + segLen := segs.LengthOfCurrSeg(currHfIdx+1) + numInf := segs.NumInfoFields() + offset := HopFieldOffset(numInf, prevSegLen, 0) + currseg := reveal CurrSeg(raw, offset, currInfIdx, 0, segLen, 0) + leftseg := reveal LeftSeg(raw, currInfIdx, segs, 0) + assert currseg == get(leftseg) +} + +ghost +requires segs.Valid() +requires PktLen(segs, 0) <= len(raw) +requires 2 <= currInfIdx && currInfIdx < 4 +preserves acc(sl.Bytes(raw, 0, len(raw)), R56) +ensures LeftSeg(raw, currInfIdx, segs, 0) == + MidSeg(raw, currInfIdx, segs, 0) +decreases +func XoverLeftSeg(raw []byte, currInfIdx int, segs io.SegLens) { + leftseg := reveal LeftSeg(raw, currInfIdx, segs, 0) + midseg := reveal MidSeg(raw, currInfIdx, segs, 0) + assert leftseg == midseg +} + +ghost +requires segs.Valid() +requires 0 < segs.Seg2Len +requires PktLen(segs, 0) <= len(raw) +requires -1 <= currInfIdx && currInfIdx < 1 +requires 0 == currInfIdx ==> 0 < segs.Seg3Len +preserves acc(sl.Bytes(raw, 0, len(raw)), R56) +ensures MidSeg(raw, currInfIdx+4, segs, 0) == + RightSeg(raw, currInfIdx, segs, 0) +decreases +func XoverMidSeg(raw []byte, currInfIdx int, segs io.SegLens) { + midseg := reveal MidSeg(raw, currInfIdx+4, segs, 0) + rightseg := reveal RightSeg(raw, currInfIdx, segs, 0) + assert midseg == rightseg +} + +ghost +requires segs.Valid() +requires 0 < segs.Seg2Len +requires PktLen(segs, 0) <= len(raw) +requires 0 <= currInfIdx && currInfIdx < 2 +requires 0 == currInfIdx ==> currHfIdx+1 == segs.Seg1Len +requires 1 == currInfIdx ==> 0 < segs.Seg3Len && currHfIdx+1 == segs.Seg1Len + segs.Seg2Len +requires PktLen(segs, 0) <= len(raw) +preserves acc(sl.Bytes(raw, 0, len(raw)), R56) +preserves RightSeg(raw, currInfIdx, segs, 0) != none[io.IO_seg3] +ensures + let prevSegLen := segs.LengthOfPrevSeg(currHfIdx) in + let segLen := segs.LengthOfCurrSeg(currHfIdx) in + let numInf := segs.NumInfoFields() in + let offset := HopFieldOffset(numInf, prevSegLen, 0) in + let currseg := CurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen, segLen, 0) in + len(currseg.Future) > 0 && + get(RightSeg(raw, currInfIdx, segs, 0)) == absIncPathSeg(currseg) +decreases +func XoverRightSeg(raw []byte, currInfIdx int, currHfIdx int, segs io.SegLens) { + prevSegLen := segs.LengthOfPrevSeg(currHfIdx) + segLen := segs.LengthOfCurrSeg(currHfIdx) + numInf := segs.NumInfoFields() + offset := HopFieldOffset(numInf, prevSegLen, 0) + LenCurrSeg(raw, offset, currInfIdx, segLen - 1, segLen) + IncCurrSeg(raw, offset, currInfIdx, segLen - 1, segLen) + currseg := CurrSeg(raw, offset, currInfIdx, segLen - 1, segLen, 0) + nextseg := CurrSeg(raw, offset, currInfIdx, segLen, segLen, 0) + rightseg := reveal RightSeg(raw, currInfIdx, segs, 0) + assert absIncPathSeg(currseg) == nextseg + assert nextseg == get(rightseg) + assert absIncPathSeg(currseg) == get(rightseg) +} + +ghost +opaque +requires 0 <= offset +requires 0 <= currHFIdx && currHFIdx <= segLen +requires len(hops) == segLen - currHFIdx +requires offset + path.HopLen * segLen <= len(raw) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) +decreases +pure func hopFieldsBytePositions(raw []byte, offset int, currHFIdx int, segLen int, hops seq[io.IO_HF]) bool { + return forall i int :: { hops[i] } 0 <= i && i < len(hops) ==> + hops[i] == path.BytesToIO_HF(raw, 0, offset + path.HopLen * (currHFIdx + i), len(raw)) +} + +ghost +requires R55 < p +requires 0 <= offset +requires 0 <= currHFIdx && currHFIdx <= segLen +requires offset + path.HopLen * segLen <= len(raw) +preserves acc(sl.Bytes(raw, 0, len(raw)), p) +ensures hopFieldsBytePositions(raw, offset, currHFIdx, segLen, hopFields(raw, offset, currHFIdx, segLen)) +decreases segLen - currHFIdx +func hopFieldsBytePositionsLemma( + raw []byte, + offset int, + currHFIdx int, + segLen int, + p perm) { + newP := (p + R55)/2 + hopfields := hopFields(raw, offset, currHFIdx, segLen) + if (currHFIdx != segLen) { + hopFieldsBytePositionsLemma(raw, offset, currHFIdx + 1, segLen, newP) + hopfieldsInc := hopFields(raw, offset, currHFIdx + 1, segLen) + assert reveal hopFieldsBytePositions(raw, offset, currHFIdx + 1, segLen, hopfieldsInc) + } + assert reveal hopFieldsBytePositions(raw, offset, currHFIdx, segLen, hopfields) +} \ No newline at end of file diff --git a/pkg/slayers/path/scion/raw_spec_test.gobra b/pkg/slayers/path/scion/raw_spec_test.gobra index 5ab5b8c25..d051ab530 100644 --- a/pkg/slayers/path/scion/raw_spec_test.gobra +++ b/pkg/slayers/path/scion/raw_spec_test.gobra @@ -16,7 +16,7 @@ package scion -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" func testAllocateNonInitRaw() { r := &Raw{} @@ -29,7 +29,7 @@ func testAllocateRaw() { fold r.Base.Mem() assert r.Base.Len() == MetaLen s := make([]byte, MetaLen) - fold slices.AbsSlice_Bytes(s, 0, len(s)) + fold sl.Bytes(s, 0, len(s)) r.Raw = s fold r.Mem(s) } diff --git a/pkg/slayers/path/scion/raw_test.go b/pkg/slayers/path/scion/raw_test.go index ff527d939..3a869b016 100644 --- a/pkg/slayers/path/scion/raw_test.go +++ b/pkg/slayers/path/scion/raw_test.go @@ -21,6 +21,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/scionproto/scion/pkg/private/serrors" "github.com/scionproto/scion/pkg/slayers/path" "github.com/scionproto/scion/pkg/slayers/path/scion" ) @@ -51,6 +52,19 @@ var emptyRawTestPath = &scion.Raw{ Raw: make([]byte, scion.MetaLen), } +var overlongPath = &scion.Raw{ + Base: scion.Base{ + PathMeta: scion.MetaHdr{ + CurrINF: 0, + CurrHF: 0, + SegLen: [3]uint8{24, 24, 17}, + }, + NumINF: 3, + NumHops: 65, + }, + Raw: rawPath, +} + func TestRawSerialize(t *testing.T) { b := make([]byte, rawTestPath.Len()) assert.NoError(t, rawTestPath.SerializeTo(b)) @@ -63,7 +77,7 @@ func TestRawDecodeFromBytes(t *testing.T) { assert.Equal(t, rawTestPath, s) } -func TestRawSerliazeDecode(t *testing.T) { +func TestRawSerializeDecode(t *testing.T) { b := make([]byte, rawTestPath.Len()) assert.NoError(t, rawTestPath.SerializeTo(b)) s := &scion.Raw{} @@ -71,6 +85,15 @@ func TestRawSerliazeDecode(t *testing.T) { assert.Equal(t, rawTestPath, s) } +func TestOverlongSerializeDecode(t *testing.T) { + b := make([]byte, overlongPath.Len()) + assert.NoError(t, overlongPath.SerializeTo(b)) // permitted, if only to enable this test. + s := &scion.Raw{} + expected := serrors.New("NumHops too large", "NumHops", 65, "Maximum", scion.MaxHops) + err := s.DecodeFromBytes(b) + assert.Equal(t, expected.Error(), err.Error()) +} + func TestRawReverse(t *testing.T) { for name, tc := range pathReverseCases { name, tc := name, tc diff --git a/pkg/slayers/path/scion/widen-lemma.gobra b/pkg/slayers/path/scion/widen-lemma.gobra new file mode 100644 index 000000000..0297715a6 --- /dev/null +++ b/pkg/slayers/path/scion/widen-lemma.gobra @@ -0,0 +1,222 @@ +// Copyright 2022 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package scion + +import ( + sl "verification/utils/slices" + "verification/io" + . "verification/utils/definitions" + "verification/dependencies/encoding/binary" + "github.com/scionproto/scion/pkg/slayers/path" +) + +ghost +requires 0 <= start && start <= headerOffset +requires path.InfoFieldOffset(currInfIdx, headerOffset) + path.InfoLen <= offset +requires 0 < segLen +requires offset + path.HopLen * segLen <= length +requires length <= len(raw) +requires 0 <= currHfIdx && currHfIdx <= segLen +requires 0 <= currInfIdx && currInfIdx < 3 +preserves acc(sl.Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R51) +ensures CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, headerOffset) == + CurrSeg(raw[start:length], offset-start, currInfIdx, currHfIdx, segLen, headerOffset-start) +decreases +func WidenCurrSeg(raw []byte, + offset int, + currInfIdx int, + currHfIdx int, + segLen int, + headerOffset int, + start int, + length int) { + unfold acc(sl.Bytes(raw, 0, len(raw)), R53) + unfold acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R53) + + ainfo1 := path.Timestamp(raw, currInfIdx, headerOffset) + ainfo2 := path.Timestamp(raw[start:length], currInfIdx, headerOffset-start) + assert ainfo1 == ainfo2 + + uinfo1 := path.AbsUinfo(raw, currInfIdx, headerOffset) + uinfo2 := path.AbsUinfo(raw[start:length], currInfIdx, headerOffset-start) + assert uinfo1 == uinfo2 + + consDir1 := path.ConsDir(raw, currInfIdx, headerOffset) + consDir2 := path.ConsDir(raw[start:length], currInfIdx, headerOffset-start) + assert consDir1 == consDir2 + + peer1 := path.Peer(raw, currInfIdx, headerOffset) + peer2 := path.Peer(raw[start:length], currInfIdx, headerOffset-start) + assert peer1 == peer2 + + widenSegment(raw, offset, currHfIdx, ainfo1, uinfo1, consDir1, peer1, segLen, start, length) + reveal CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, headerOffset) + reveal CurrSeg(raw[start:length], offset-start, currInfIdx, currHfIdx, segLen, headerOffset-start) + fold acc(sl.Bytes(raw, 0, len(raw)), R53) + fold acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R53) +} + +ghost +requires 0 <= start && start <= offset +requires 0 < segLen +requires 0 <= currHfIdx && currHfIdx <= segLen +requires length <= len(raw) +requires offset + path.HopLen * segLen <= length +preserves acc(sl.Bytes(raw, 0, len(raw)), R52) +preserves acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R52) +ensures segment(raw, offset, currHfIdx, ainfo, uinfo, consDir, peer, segLen) == + segment(raw[start:length], offset-start, currHfIdx, ainfo, uinfo, consDir, peer, segLen) +decreases +func widenSegment(raw []byte, + offset int, + currHfIdx int, + ainfo io.IO_ainfo, + uinfo set[io.IO_msgterm], + consDir bool, + peer bool, + segLen int, + start int, + length int) { + newP := (R52 + R53)/2 + widenHopFields(raw, offset, 0, segLen, start, length, newP) +} + +ghost +requires 0 <= start && start <= middle +requires middle + path.HopLen <= length +requires length <= len(raw) +preserves acc(sl.Bytes(raw, 0, len(raw)), R54) +preserves acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R54) +ensures path.BytesToIO_HF(raw, 0, middle, len(raw)) == + path.BytesToIO_HF(raw[start:length], 0, middle-start, length-start) +decreases +func widenBytesToIO_HF(raw []byte, middle int, start int, length int) { + unfold acc(sl.Bytes(raw, 0, len(raw)), R55) + unfold acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R55) + hfBytes1 := path.BytesToIO_HF(raw, 0, middle, len(raw)) + hfBytes2 := path.BytesToIO_HF(raw[start:length], 0, middle-start, length-start) + assert hfBytes1 == hfBytes2 + fold acc(sl.Bytes(raw, 0, len(raw)), R55) + fold acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R55) +} + +ghost +requires R53 < p +requires 0 <= start && start <= offset +requires 0 <= currHfIdx && currHfIdx <= segLen +requires offset + path.HopLen * segLen <= length +requires length <= len(raw) +preserves acc(sl.Bytes(raw, 0, len(raw)), p) +preserves acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), p) +ensures hopFields(raw, offset, currHfIdx, segLen) == + hopFields(raw[start:length], offset-start, currHfIdx, segLen) +decreases segLen - currHfIdx +func widenHopFields(raw []byte, offset int, currHfIdx int, segLen int, start int, length int, p perm) { + if (currHfIdx != segLen) { + widenBytesToIO_HF(raw, offset + path.HopLen * currHfIdx, start, length) + hf1 := path.BytesToIO_HF(raw, 0, offset + path.HopLen * currHfIdx, len(raw)) + hf2 := path.BytesToIO_HF(raw[start:length], 0, offset + path.HopLen * currHfIdx - start, length - start) + newP := (p + R53)/2 + widenHopFields(raw, offset, currHfIdx + 1, segLen, start, length, newP) + } +} + +ghost +requires 0 <= start && start <= headerOffset +requires segs.Valid() +requires 0 <= length && length <= len(raw) +requires PktLen(segs, headerOffset) <= length +requires 1 <= currInfIdx && currInfIdx < 4 +preserves acc(sl.Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R51) +ensures LeftSeg(raw, currInfIdx, segs, headerOffset) == + LeftSeg(raw[start:length], currInfIdx, segs, headerOffset-start) +decreases +func WidenLeftSeg(raw []byte, + currInfIdx int, + segs io.SegLens, + headerOffset int, + start int, + length int) { + offset := HopFieldOffset(segs.NumInfoFields(), 0, headerOffset) + if currInfIdx == 1 && segs.Seg2Len > 0 { + offsetWithHopfields := offset + path.HopLen * segs.Seg1Len + WidenCurrSeg(raw, offsetWithHopfields, currInfIdx, 0, segs.Seg2Len, headerOffset, start, length) + } else if currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0 { + offsetWithHopfields := offset + path.HopLen * (segs.Seg1Len + segs.Seg2Len) + WidenCurrSeg(raw, offsetWithHopfields, currInfIdx, 0, segs.Seg3Len, headerOffset, start, length) + } + reveal LeftSeg(raw, currInfIdx, segs, headerOffset) + reveal LeftSeg(raw[start:length], currInfIdx, segs, headerOffset- start) +} + +ghost +requires 0 <= start && start <= headerOffset +requires segs.Valid() +requires 0 <= length && length <= len(raw) +requires PktLen(segs, headerOffset) <= length +requires -1 <= currInfIdx && currInfIdx < 2 +preserves acc(sl.Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R51) +ensures RightSeg(raw, currInfIdx, segs, headerOffset) == + RightSeg(raw[start:length], currInfIdx, segs, headerOffset-start) +decreases +func WidenRightSeg(raw []byte, + currInfIdx int, + segs io.SegLens, + headerOffset int, + start int, + length int) { + offset := HopFieldOffset(segs.NumInfoFields(), 0, headerOffset) + if currInfIdx == 1 && segs.Seg2Len > 0 && segs.Seg3Len > 0 { + offsetWithHopfields := offset + path.HopLen * segs.Seg1Len + WidenCurrSeg(raw, offsetWithHopfields, currInfIdx, segs.Seg2Len, segs.Seg2Len, headerOffset, start, length) + } else if currInfIdx == 0 && segs.Seg2Len > 0 { + WidenCurrSeg(raw, offset, currInfIdx, segs.Seg1Len, segs.Seg1Len, headerOffset, start, length) + } + reveal RightSeg(raw, currInfIdx, segs, headerOffset) + reveal RightSeg(raw[start:length], currInfIdx, segs, headerOffset - start) +} + +ghost +requires 0 <= start && start <= headerOffset +requires segs.Valid() +requires 2 <= currInfIdx && currInfIdx < 5 +requires 0 <= length && length <= len(raw) +requires PktLen(segs, headerOffset) <= length +preserves acc(sl.Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.Bytes(raw[start:length], 0, len(raw[start:length])), R51) +ensures MidSeg(raw, currInfIdx, segs, headerOffset) == + MidSeg(raw[start:length], currInfIdx, segs, headerOffset - start) +decreases +func WidenMidSeg(raw []byte, + currInfIdx int, + segs io.SegLens, + headerOffset int, + start int, + length int) { + offset := HopFieldOffset(segs.NumInfoFields(), 0, headerOffset) + if currInfIdx == 4 && segs.Seg2Len > 0 { + WidenCurrSeg(raw, offset, 0, segs.Seg1Len, segs.Seg1Len, headerOffset, start, length) + } else if currInfIdx == 2 && segs.Seg2Len > 0 && segs.Seg3Len > 0 { + offsetWithHopfields := offset + path.HopLen * (segs.Seg1Len + segs.Seg2Len) + WidenCurrSeg(raw, offsetWithHopfields, currInfIdx, 0, segs.Seg3Len, headerOffset, start, length) + } + reveal MidSeg(raw, currInfIdx, segs, headerOffset) + reveal MidSeg(raw[start:length], currInfIdx, segs, headerOffset - start) +} \ No newline at end of file diff --git a/pkg/slayers/scion.go b/pkg/slayers/scion.go index 0e44da293..fdf5a4232 100644 --- a/pkg/slayers/scion.go +++ b/pkg/slayers/scion.go @@ -217,24 +217,27 @@ func (s *SCION) NetworkFlow() (res gopacket.Flow) { // @ requires !opts.FixLengths // @ requires b != nil && b.Mem() // @ requires acc(s.Mem(ubuf), R0) -// @ requires sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ requires sl.Bytes(ubuf, 0, len(ubuf)) // @ ensures b.Mem() // @ ensures acc(s.Mem(ubuf), R0) -// @ ensures sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ ensures sl.Bytes(ubuf, 0, len(ubuf)) // TODO: hide internal spec details // @ ensures e == nil && s.HasOneHopPath(ubuf) ==> // @ len(b.UBuf()) == old(len(b.UBuf())) + unfolding acc(s.Mem(ubuf), R55) in -// @ (CmnHdrLen + s.AddrHdrLenSpecInternal() + s.Path.Len(ubuf[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen])) +// @ (CmnHdrLen + s.AddrHdrLenSpecInternal() + s.Path.LenSpec(ubuf[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen])) // @ ensures e == nil && s.HasOneHopPath(ubuf) ==> -// @ (unfolding acc(s.Mem(ubuf), R55) in CmnHdrLen + s.AddrHdrLenSpecInternal() + s.Path.Len(ubuf[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen])) <= len(ubuf) +// @ (unfolding acc(s.Mem(ubuf), R55) in CmnHdrLen + s.AddrHdrLenSpecInternal() + s.Path.LenSpec(ubuf[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen])) <= len(ubuf) // @ ensures e != nil ==> e.ErrorMem() +// post for IO: +// @ ensures e == nil && old(s.EqPathType(ubuf)) ==> +// @ IsSupportedRawPkt(b.View()) == old(IsSupportedPkt(ubuf)) // @ decreases func (s *SCION) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions /* @ , ghost ubuf []byte @*/) (e error) { - // @ unfold acc(s.Mem(ubuf), R0) - // @ defer fold acc(s.Mem(ubuf), R0) - // @ sl.SplitRange_Bytes(ubuf, int(CmnHdrLen+s.AddrHdrLen(nil, true)), int(s.HdrLen*LineLen), writePerm) - // @ ghost defer sl.CombineRange_Bytes(ubuf, int(CmnHdrLen+s.AddrHdrLen(nil, true)), int(s.HdrLen*LineLen), writePerm) + // @ unfold acc(s.Mem(ubuf), R1) + // @ defer fold acc(s.Mem(ubuf), R1) + // @ sl.SplitRange_Bytes(ubuf, int(CmnHdrLen+s.AddrHdrLen(nil, true)), int(s.HdrLen*LineLen), R10) scnLen := CmnHdrLen + s.AddrHdrLen( /*@ nil, true @*/ ) + s.Path.Len( /*@ ubuf[CmnHdrLen+s.AddrHdrLen(nil, true) : s.HdrLen*LineLen] @*/ ) + // @ sl.CombineRange_Bytes(ubuf, int(CmnHdrLen+s.AddrHdrLenSpecInternal()), int(s.HdrLen*LineLen), R10) if scnLen > MaxHdrLen { return serrors.New("header length exceeds maximum", "max", MaxHdrLen, "actual", scnLen) @@ -255,60 +258,70 @@ func (s *SCION) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeO // @ ghost uSerBufN := b.UBuf() // @ assert buf === uSerBufN[:scnLen] // @ b.ExchangePred() - // @ sl.SplitRange_Bytes(uSerBufN, 0, scnLen, writePerm) + // @ unfold acc(sl.Bytes(uSerBufN, 0, len(uSerBufN)), writePerm) // Serialize common header. firstLine := uint32(s.Version&0xF)<<28 | uint32(s.TrafficClass)<<20 | s.FlowID&0xFFFFF - // @ sl.SplitRange_Bytes(buf, 0, 4, writePerm) - // @ unfold acc(sl.AbsSlice_Bytes(buf[:4], 0, 4), writePerm) binary.BigEndian.PutUint32(buf[:4], firstLine) - // @ fold acc(sl.AbsSlice_Bytes(buf[:4], 0, 4), writePerm) - // @ sl.CombineRange_Bytes(buf, 0, 4, writePerm) - // @ unfold acc(sl.AbsSlice_Bytes(buf, 0, len(buf)), writePerm) buf[4] = uint8(s.NextHdr) buf[5] = s.HdrLen - // @ fold acc(sl.AbsSlice_Bytes(buf, 0, len(buf)), writePerm) - // @ sl.SplitRange_Bytes(buf, 6, 8, writePerm) - // @ unfold acc(sl.AbsSlice_Bytes(buf[6:8], 0, 2), writePerm) + // @ assert &buf[6:8][0] == &buf[6] && &buf[6:8][1] == &buf[7] binary.BigEndian.PutUint16(buf[6:8], s.PayloadLen) - // @ fold acc(sl.AbsSlice_Bytes(buf[6:8], 0, 2), writePerm) - // @ sl.CombineRange_Bytes(buf, 6, 8, writePerm) - // @ unfold acc(sl.AbsSlice_Bytes(buf, 0, len(buf)), writePerm) buf[8] = uint8(s.PathType) buf[9] = uint8(s.DstAddrType&0x7)<<4 | uint8(s.SrcAddrType&0x7) - // @ fold acc(sl.AbsSlice_Bytes(buf, 0, len(buf)), writePerm) - // @ sl.SplitRange_Bytes(buf, 10, 12, writePerm) - // @ unfold acc(sl.AbsSlice_Bytes(buf[10:12], 0, 2), writePerm) + // @ assert &buf[10:12][0] == &buf[10] && &buf[10:12][1] == &buf[11] binary.BigEndian.PutUint16(buf[10:12], 0) - // @ fold acc(sl.AbsSlice_Bytes(buf[10:12], 0, 2), writePerm) - // @ sl.CombineRange_Bytes(buf, 10, 12, writePerm) - - // @ ghost sPath := s.Path - // @ ghost pathSlice := ubuf[CmnHdrLen+s.AddrHdrLen(nil, true) : s.HdrLen*LineLen] - // @ sl.CombineRange_Bytes(ubuf, CmnHdrLen+s.AddrHdrLen(nil, true), int(s.HdrLen*LineLen), R10) + // @ fold acc(sl.Bytes(uSerBufN, 0, len(uSerBufN)), writePerm) + // @ ghost if s.EqPathType(ubuf) { + // @ assert reveal s.EqPathTypeWithBuffer(ubuf, uSerBufN) + // @ s.IsSupportedPktLemma(ubuf, uSerBufN) + // @ } // Serialize address header. - // @ sl.SplitRange_Bytes(buf, CmnHdrLen, len(buf), writePerm) + // @ sl.SplitRange_Bytes(uSerBufN, CmnHdrLen, scnLen, HalfPerm) + // @ sl.Reslice_Bytes(uSerBufN, 0, CmnHdrLen, R54) + // @ IsSupportedPktSubslice(uSerBufN, CmnHdrLen) + // @ sl.SplitRange_Bytes(uSerBufN, CmnHdrLen, scnLen, HalfPerm) // @ sl.SplitRange_Bytes(ubuf, CmnHdrLen, len(ubuf), R10) if err := s.SerializeAddrHdr(buf[CmnHdrLen:] /*@ , ubuf[CmnHdrLen:] @*/); err != nil { - // @ sl.CombineRange_Bytes(buf, CmnHdrLen, len(buf), writePerm) + // @ sl.Unslice_Bytes(uSerBufN, 0, CmnHdrLen, R54) + // @ sl.CombineRange_Bytes(uSerBufN, CmnHdrLen, scnLen, writePerm) // @ sl.CombineRange_Bytes(ubuf, CmnHdrLen, len(ubuf), R10) - // @ sl.SplitRange_Bytes(ubuf, CmnHdrLen+s.AddrHdrLen(nil, true), int(s.HdrLen*LineLen), R10) - // @ sl.CombineRange_Bytes(uSerBufN, 0, scnLen, writePerm) // @ b.RestoreMem(uSerBufN) return err } offset := CmnHdrLen + s.AddrHdrLen( /*@ nil, true @*/ ) - // @ sl.CombineRange_Bytes(buf, CmnHdrLen, len(buf), writePerm) + // @ sl.CombineRange_Bytes(uSerBufN, CmnHdrLen, scnLen, HalfPerm) // @ sl.CombineRange_Bytes(ubuf, CmnHdrLen, len(ubuf), R10) - // @ sl.SplitRange_Bytes(ubuf, CmnHdrLen+s.AddrHdrLen(nil, true), int(s.HdrLen*LineLen), R10) + // @ IsSupportedPktSubslice(uSerBufN, CmnHdrLen) + // @ sl.Unslice_Bytes(uSerBufN, 0, CmnHdrLen, R54) + // @ sl.CombineRange_Bytes(uSerBufN, CmnHdrLen, scnLen, HalfPerm) + // Serialize path header. - // @ sl.SplitRange_Bytes(buf, offset, len(buf), writePerm) + // @ ghost startP := int(CmnHdrLen+s.AddrHdrLenSpecInternal()) + // @ ghost endP := int(s.HdrLen*LineLen) + // @ ghost pathSlice := ubuf[startP : endP] + // @ sl.SplitRange_Bytes(uSerBufN, offset, scnLen, HalfPerm) + // @ sl.SplitRange_Bytes(ubuf, startP, endP, HalfPerm) + // @ sl.Reslice_Bytes(uSerBufN, 0, offset, R54) + // @ sl.Reslice_Bytes(ubuf, 0, startP, R54) + // @ IsSupportedPktSubslice(uSerBufN, offset) + // @ IsSupportedPktSubslice(ubuf, startP) + // @ sl.SplitRange_Bytes(uSerBufN, offset, scnLen, HalfPerm) + // @ sl.SplitRange_Bytes(ubuf, startP, endP, HalfPerm) tmp := s.Path.SerializeTo(buf[offset:] /*@, pathSlice @*/) - // @ sl.CombineRange_Bytes(buf, offset, len(buf), writePerm) - // @ sl.CombineRange_Bytes(uSerBufN, 0, scnLen, writePerm) + // @ sl.CombineRange_Bytes(uSerBufN, offset, scnLen, HalfPerm) + // @ sl.CombineRange_Bytes(ubuf, startP, endP, HalfPerm) + // @ IsSupportedPktSubslice(uSerBufN, offset) + // @ IsSupportedPktSubslice(ubuf, startP) + // @ sl.Unslice_Bytes(uSerBufN, 0, offset, R54) + // @ sl.Unslice_Bytes(ubuf, 0, startP, R54) + // @ sl.CombineRange_Bytes(uSerBufN, offset, scnLen, HalfPerm) + // @ sl.CombineRange_Bytes(ubuf, startP, endP, HalfPerm) + // @ reveal IsSupportedPkt(uSerBufN) // @ b.RestoreMem(uSerBufN) + // @ reveal IsSupportedRawPkt(b.View()) return tmp } @@ -317,9 +330,12 @@ func (s *SCION) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeO // data, so care should be taken to copy it first should later modification of data be required // before the SCION layer is discarded. // @ requires s.NonInitMem() -// @ preserves sl.AbsSlice_Bytes(data, 0, len(data)) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R40) // @ preserves df != nil && df.Mem() // @ ensures res == nil ==> s.Mem(data) +// @ ensures res == nil && typeOf(s.GetPath(data)) == *scion.Raw ==> +// @ s.EqAbsHeader(data) && s.ValidScionInitSpec(data) +// @ ensures res == nil ==> s.EqPathType(data) // @ ensures res != nil ==> s.NonInitMem() && res.ErrorMem() // @ decreases func (s *SCION) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res error) { @@ -329,27 +345,32 @@ func (s *SCION) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res er return serrors.New("packet is shorter than the common header length", "min", CmnHdrLen, "actual", len(data)) } - // @ sl.SplitRange_Bytes(data, 0, 4, R15) - // @ preserves 4 <= len(data) && acc(sl.AbsSlice_Bytes(data[:4], 0, 4), R15) + // @ sl.SplitRange_Bytes(data, 0, 4, R41) + // @ preserves 4 <= len(data) && acc(sl.Bytes(data[:4], 0, 4), R41) // @ decreases // @ outline( - // @ unfold acc(sl.AbsSlice_Bytes(data[:4], 0, 4), R15) + // @ unfold acc(sl.Bytes(data[:4], 0, 4), R41) firstLine := binary.BigEndian.Uint32(data[:4]) - // @ fold acc(sl.AbsSlice_Bytes(data[:4], 0, 4), R15) + // @ fold acc(sl.Bytes(data[:4], 0, 4), R41) // @ ) - // @ sl.CombineRange_Bytes(data, 0, 4, R15) + // @ sl.CombineRange_Bytes(data, 0, 4, R41) // @ unfold s.NonInitMem() s.Version = uint8(firstLine >> 28) s.TrafficClass = uint8((firstLine >> 20) & 0xFF) s.FlowID = firstLine & 0xFFFFF // @ preserves acc(&s.NextHdr) && acc(&s.HdrLen) && acc(&s.PayloadLen) && acc(&s.PathType) // @ preserves acc(&s.DstAddrType) && acc(&s.SrcAddrType) - // @ preserves CmnHdrLen <= len(data) && acc(sl.AbsSlice_Bytes(data, 0, len(data)), R15) + // @ preserves CmnHdrLen <= len(data) && acc(sl.Bytes(data, 0, len(data)), R41) // @ ensures s.DstAddrType.Has3Bits() && s.SrcAddrType.Has3Bits() // @ ensures 0 <= s.PathType && s.PathType < 256 + // @ ensures path.Type(GetPathType(data)) == s.PathType + // @ ensures L4ProtocolType(GetNextHdr(data)) == s.NextHdr + // @ ensures GetLength(data) == int(s.HdrLen * LineLen) + // @ ensures GetAddressOffset(data) == + // @ CmnHdrLen + 2*addr.IABytes + s.DstAddrType.Length() + s.SrcAddrType.Length() // @ decreases // @ outline( - // @ unfold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R15) + // @ unfold acc(sl.Bytes(data, 0, len(data)), R41) s.NextHdr = L4ProtocolType(data[4]) s.HdrLen = data[5] // @ assert &data[6:8][0] == &data[6] && &data[6:8][1] == &data[7] @@ -361,20 +382,20 @@ func (s *SCION) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res er // @ assert int(s.DstAddrType) == b.BitAnd7(int(data[9] >> 4)) s.SrcAddrType = AddrType(data[9] & 0x7) // @ assert int(s.SrcAddrType) == b.BitAnd7(int(data[9])) - // @ fold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R15) + // @ fold acc(sl.Bytes(data, 0, len(data)), R41) // @ ) // Decode address header. - // @ sl.SplitByIndex_Bytes(data, 0, len(data), CmnHdrLen, R5) - // @ sl.Reslice_Bytes(data, CmnHdrLen, len(data), R5) + // @ sl.SplitByIndex_Bytes(data, 0, len(data), CmnHdrLen, R41) + // @ sl.Reslice_Bytes(data, CmnHdrLen, len(data), R41) if err := s.DecodeAddrHdr(data[CmnHdrLen:]); err != nil { // @ fold s.NonInitMem() - // @ sl.Unslice_Bytes(data, CmnHdrLen, len(data), R5) - // @ sl.CombineAtIndex_Bytes(data, 0, len(data), CmnHdrLen, R5) + // @ sl.Unslice_Bytes(data, CmnHdrLen, len(data), R41) + // @ sl.CombineAtIndex_Bytes(data, 0, len(data), CmnHdrLen, R41) df.SetTruncated() return err } - // @ sl.Unslice_Bytes(data, CmnHdrLen, len(data), R5) - // @ sl.CombineAtIndex_Bytes(data, 0, len(data), CmnHdrLen, R5) + // @ sl.Unslice_Bytes(data, CmnHdrLen, len(data), R41) + // @ sl.CombineAtIndex_Bytes(data, 0, len(data), CmnHdrLen, R41) // (VerifiedSCION) the first ghost parameter to AddrHdrLen is ignored when the second // is set to nil. As such, we pick the easiest possible value as a placeholder. addrHdrLen := s.AddrHdrLen( /*@ nil, true @*/ ) @@ -404,26 +425,37 @@ func (s *SCION) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res er // @ fold s.NonInitMem() return err } - // @ sl.SplitRange_Bytes(data, offset, offset+pathLen, writePerm) + // @ sl.SplitRange_Bytes(data, offset, offset+pathLen, R41) err = s.Path.DecodeFromBytes(data[offset : offset+pathLen]) if err != nil { - // @ sl.CombineRange_Bytes(data, offset, offset+pathLen, writePerm) + // @ sl.CombineRange_Bytes(data, offset, offset+pathLen, R41) // @ unfold s.HeaderMem(data[CmnHdrLen:]) // @ s.PathPoolMemExchange(s.PathType, s.Path) // @ fold s.NonInitMem() return err } - /*@ ghost if typeOf(s.Path) == type[*onehop.Path] { - s.Path.(*onehop.Path).InferSizeUb(data[offset : offset+pathLen]) - assert s.Path.Len(data[offset : offset+pathLen]) <= len(data[offset : offset+pathLen]) - assert CmnHdrLen + s.AddrHdrLenSpecInternal() + s.Path.Len(data[offset : offset+pathLen]) <= len(data) - } @*/ + // @ ghost if typeOf(s.Path) == type[*onehop.Path] { + // @ s.Path.(*onehop.Path).InferSizeUb(data[offset : offset+pathLen]) + // @ assert s.Path.LenSpec(data[offset : offset+pathLen]) <= len(data[offset : offset+pathLen]) + // @ assert CmnHdrLen + s.AddrHdrLenSpecInternal() + s.Path.LenSpec(data[offset : offset+pathLen]) <= len(data) + // @ } s.Contents = data[:hdrBytes] s.Payload = data[hdrBytes:] - - // @ sl.CombineRange_Bytes(data, offset, offset+pathLen, writePerm) - // @ fold s.Mem(data) - + // @ fold acc(s.Mem(data), R54) + // @ ghost if(typeOf(s.GetPath(data)) == (*scion.Raw)) { + // @ unfold acc(sl.Bytes(data, 0, len(data)), R56) + // @ unfold acc(sl.Bytes(data[offset : offset+pathLen], 0, len(data[offset : offset+pathLen])), R56) + // @ unfold acc(s.Path.(*scion.Raw).Mem(data[offset : offset+pathLen]), R55) + // @ assert reveal s.EqAbsHeader(data) + // @ assert reveal s.ValidScionInitSpec(data) + // @ fold acc(s.Path.Mem(data[offset : offset+pathLen]), R55) + // @ fold acc(sl.Bytes(data, 0, len(data)), R56) + // @ fold acc(sl.Bytes(data[offset : offset+pathLen], 0, len(data[offset : offset+pathLen])), R56) + // @ } + // @ sl.CombineRange_Bytes(data, offset, offset+pathLen, R41) + // @ assert typeOf(s.GetPath(data)) == *scion.Raw ==> s.EqAbsHeader(data) && s.ValidScionInitSpec(data) + // @ assert reveal s.EqPathType(data) + // @ fold acc(s.Mem(data), 1-R54) return nil } @@ -492,7 +524,7 @@ func (s *SCION) getPath(pathType path.Type) (res path.Path, err error) { } // @ requires pb != nil -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ preserves pb.Mem() // @ ensures res != nil ==> res.ErrorMem() // @ decreases @@ -577,15 +609,15 @@ func scionNextLayerTypeL4(t L4ProtocolType) gopacket.LayerType { // the destination address. // @ requires acc(&s.DstAddrType, R20) && acc(&s.RawDstAddr, R20) // @ requires s.DstAddrType == T4Svc ==> len(s.RawDstAddr) >= addr.HostLenSVC -// @ requires acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R15) +// @ requires acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R15) // @ ensures acc(&s.DstAddrType, R20) && acc(&s.RawDstAddr, R20) // @ ensures err == nil ==> acc(res.Mem(), R15) // @ ensures err == nil ==> typeOf(res) == *net.IPAddr || typeOf(res) == addr.HostSVC // @ ensures err == nil ==> // @ let rawDstAddr := s.RawDstAddr in -// @ (acc(res.Mem(), R15) --* acc(sl.AbsSlice_Bytes(rawDstAddr, 0, len(rawDstAddr)), R15)) +// @ (acc(res.Mem(), R15) --* acc(sl.Bytes(rawDstAddr, 0, len(rawDstAddr)), R15)) // @ ensures err != nil ==> -// @ acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R15) +// @ acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R15) // @ ensures err != nil ==> err.ErrorMem() // @ decreases func (s *SCION) DstAddr() (res net.Addr, err error) { @@ -598,15 +630,15 @@ func (s *SCION) DstAddr() (res net.Addr, err error) { // address. // @ requires acc(&s.SrcAddrType, R20) && acc(&s.RawSrcAddr, R20) // @ requires s.SrcAddrType == T4Svc ==> len(s.RawSrcAddr) >= addr.HostLenSVC -// @ requires acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R15) +// @ requires acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R15) // @ ensures acc(&s.SrcAddrType, R20) && acc(&s.RawSrcAddr, R20) // @ ensures err == nil ==> acc(res.Mem(), R15) // @ ensures err == nil ==> typeOf(res) == *net.IPAddr || typeOf(res) == addr.HostSVC // @ ensures err == nil ==> // @ let rawSrcAddr := s.RawSrcAddr in -// @ (acc(res.Mem(), R15) --* acc(sl.AbsSlice_Bytes(rawSrcAddr, 0, len(rawSrcAddr)), R15)) +// @ (acc(res.Mem(), R15) --* acc(sl.Bytes(rawSrcAddr, 0, len(rawSrcAddr)), R15)) // @ ensures err != nil ==> -// @ acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R15) +// @ acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R15) // @ ensures err != nil ==> err.ErrorMem() // @ decreases func (s *SCION) SrcAddr() (res net.Addr, err error) { @@ -625,9 +657,9 @@ func (s *SCION) SrcAddr() (res net.Addr, err error) { // @ ensures acc(&s.RawDstAddr) && acc(&s.DstAddrType) // @ ensures res != nil ==> res.ErrorMem() // @ ensures res == nil ==> isIP(dst) || isHostSVC(dst) -// @ ensures res == nil && wildcard && isIP(dst) ==> acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), _) -// @ ensures res == nil && wildcard && isHostSVC(dst) ==> sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) -// @ ensures res == nil && !wildcard && isHostSVC(dst) ==> sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) +// @ ensures res == nil && wildcard && isIP(dst) ==> acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), _) +// @ ensures res == nil && wildcard && isHostSVC(dst) ==> sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) +// @ ensures res == nil && !wildcard && isHostSVC(dst) ==> sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) // @ ensures res == nil && !wildcard ==> acc(dst.Mem(), R18) // @ ensures res == nil && !wildcard && isIP(dst) ==> (unfolding acc(dst.Mem(), R20) in (isIPv4(dst) ==> forall i int :: { &s.RawDstAddr[i] } 0 <= i && i < len(s.RawDstAddr) ==> &s.RawDstAddr[i] == &dst.(*net.IPAddr).IP[i])) // @ ensures res == nil && !wildcard && isIP(dst) ==> (unfolding acc(dst.Mem(), R20) in (isIPv6(dst) && isConvertibleToIPv4(dst) ==> forall i int :: { &s.RawDstAddr[i] } 0 <= i && i < len(s.RawDstAddr) ==> &s.RawDstAddr[i] == &dst.(*net.IPAddr).IP[12+i])) @@ -644,7 +676,7 @@ func (s *SCION) SetDstAddr(dst net.Addr /*@ , ghost wildcard bool @*/) (res erro var verScionTmp []byte s.DstAddrType, verScionTmp, err = packAddr(dst /*@ , wildcard @*/) // @ ghost if !wildcard && err == nil && isIP(dst) { - // @ apply acc(sl.AbsSlice_Bytes(verScionTmp, 0, len(verScionTmp)), R20) --* acc(dst.Mem(), R20) + // @ apply acc(sl.Bytes(verScionTmp, 0, len(verScionTmp)), R20) --* acc(dst.Mem(), R20) // @ } s.RawDstAddr = verScionTmp return err @@ -662,9 +694,9 @@ func (s *SCION) SetDstAddr(dst net.Addr /*@ , ghost wildcard bool @*/) (res erro // @ ensures acc(&s.RawSrcAddr) && acc(&s.SrcAddrType) // @ ensures res != nil ==> res.ErrorMem() // @ ensures res == nil ==> isIP(src) || isHostSVC(src) -// @ ensures res == nil && wildcard && isIP(src) ==> acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), _) -// @ ensures res == nil && wildcard && isHostSVC(src) ==> sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) -// @ ensures res == nil && !wildcard && isHostSVC(src) ==> sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) +// @ ensures res == nil && wildcard && isIP(src) ==> acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), _) +// @ ensures res == nil && wildcard && isHostSVC(src) ==> sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) +// @ ensures res == nil && !wildcard && isHostSVC(src) ==> sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) // @ ensures res == nil && !wildcard ==> acc(src.Mem(), R18) // @ ensures res == nil && !wildcard && isIP(src) ==> (unfolding acc(src.Mem(), R20) in (isIPv4(src) ==> forall i int :: { &s.RawSrcAddr[i] } 0 <= i && i < len(s.RawSrcAddr) ==> &s.RawSrcAddr[i] == &src.(*net.IPAddr).IP[i])) // @ ensures res == nil && !wildcard && isIP(src) ==> (unfolding acc(src.Mem(), R20) in (isIPv6(src) && isConvertibleToIPv4(src) ==> forall i int :: { &s.RawSrcAddr[i] } 0 <= i && i < len(s.RawSrcAddr) ==> &s.RawSrcAddr[i] == &src.(*net.IPAddr).IP[12+i])) @@ -681,48 +713,48 @@ func (s *SCION) SetSrcAddr(src net.Addr /*@, ghost wildcard bool @*/) (res error var verScionTmp []byte s.SrcAddrType, verScionTmp, err = packAddr(src /*@ , wildcard @*/) // @ ghost if !wildcard && err == nil && isIP(src) { - // @ apply acc(sl.AbsSlice_Bytes(verScionTmp, 0, len(verScionTmp)), R20) --* acc(src.Mem(), R20) + // @ apply acc(sl.Bytes(verScionTmp, 0, len(verScionTmp)), R20) --* acc(src.Mem(), R20) // @ } s.RawSrcAddr = verScionTmp return err } // @ requires addrType == T4Svc ==> len(raw) >= addr.HostLenSVC -// @ requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15) +// @ requires acc(sl.Bytes(raw, 0, len(raw)), R15) // @ ensures err == nil ==> acc(res.Mem(), R15) // @ ensures err == nil ==> typeOf(res) == *net.IPAddr || typeOf(res) == addr.HostSVC // @ ensures err == nil ==> -// @ (acc(res.Mem(), R15) --* acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15)) -// @ ensures err != nil ==> acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15) +// @ (acc(res.Mem(), R15) --* acc(sl.Bytes(raw, 0, len(raw)), R15)) +// @ ensures err != nil ==> acc(sl.Bytes(raw, 0, len(raw)), R15) // @ ensures err != nil ==> err.ErrorMem() // @ decreases func parseAddr(addrType AddrType, raw []byte) (res net.Addr, err error) { switch addrType { case T4Ip: verScionTmp := &net.IPAddr{IP: net.IP(raw)} - // @ unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15) + // @ unfold acc(sl.Bytes(raw, 0, len(raw)), R15) // @ fold acc(verScionTmp.Mem(), R15) - // @ package (acc((net.Addr)(verScionTmp).Mem(), R15) --* acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15)) { + // @ package (acc((net.Addr)(verScionTmp).Mem(), R15) --* acc(sl.Bytes(raw, 0, len(raw)), R15)) { // @ assert acc(&verScionTmp.IP, R50) && verScionTmp.IP === raw // @ unfold acc(verScionTmp.Mem(), R15) - // @ fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15) + // @ fold acc(sl.Bytes(raw, 0, len(raw)), R15) // @ } return verScionTmp, nil case T4Svc: - // @ unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15) + // @ unfold acc(sl.Bytes(raw, 0, len(raw)), R15) verScionTmp := addr.HostSVC(binary.BigEndian.Uint16(raw[:addr.HostLenSVC])) - // @ fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15) + // @ fold acc(sl.Bytes(raw, 0, len(raw)), R15) // @ fold acc(verScionTmp.Mem(), R15) - // @ package (acc((net.Addr)(verScionTmp).Mem(), R15) --* acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15)) { } + // @ package (acc((net.Addr)(verScionTmp).Mem(), R15) --* acc(sl.Bytes(raw, 0, len(raw)), R15)) { } return verScionTmp, nil case T16Ip: verScionTmp := &net.IPAddr{IP: net.IP(raw)} - // @ unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15) + // @ unfold acc(sl.Bytes(raw, 0, len(raw)), R15) // @ fold acc(verScionTmp.Mem(), R15) - // @ package (acc((net.Addr)(verScionTmp).Mem(), R15) --* acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15)) { + // @ package (acc((net.Addr)(verScionTmp).Mem(), R15) --* acc(sl.Bytes(raw, 0, len(raw)), R15)) { // @ assert acc(&verScionTmp.IP, R50) && verScionTmp.IP === raw // @ unfold acc(verScionTmp.Mem(), R15) - // @ fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R15) + // @ fold acc(sl.Bytes(raw, 0, len(raw)), R15) // @ } return verScionTmp, nil } @@ -738,12 +770,12 @@ func parseAddr(addrType AddrType, raw []byte) (res net.Addr, err error) { // @ ensures isHostSVC(hostAddr) ==> err == nil // @ ensures err == nil ==> isIP(hostAddr) || isHostSVC(hostAddr) // @ ensures err != nil ==> err.ErrorMem() -// @ ensures err == nil && wildcard && isIP(hostAddr) ==> acc(sl.AbsSlice_Bytes(b, 0, len(b)), _) -// @ ensures err == nil && wildcard && isHostSVC(hostAddr) ==> sl.AbsSlice_Bytes(b, 0, len(b)) -// @ ensures err == nil && !wildcard && isHostSVC(hostAddr) ==> sl.AbsSlice_Bytes(b, 0, len(b)) +// @ ensures err == nil && wildcard && isIP(hostAddr) ==> acc(sl.Bytes(b, 0, len(b)), _) +// @ ensures err == nil && wildcard && isHostSVC(hostAddr) ==> sl.Bytes(b, 0, len(b)) +// @ ensures err == nil && !wildcard && isHostSVC(hostAddr) ==> sl.Bytes(b, 0, len(b)) // @ ensures err == nil && !wildcard && isHostSVC(hostAddr) ==> acc(hostAddr.Mem(), R20) -// @ ensures err == nil && !wildcard && isIP(hostAddr) ==> acc(sl.AbsSlice_Bytes(b, 0, len(b)), R20) -// @ ensures err == nil && !wildcard && isIP(hostAddr) ==> (acc(sl.AbsSlice_Bytes(b, 0, len(b)), R20) --* acc(hostAddr.Mem(), R20)) +// @ ensures err == nil && !wildcard && isIP(hostAddr) ==> acc(sl.Bytes(b, 0, len(b)), R20) +// @ ensures err == nil && !wildcard && isIP(hostAddr) ==> (acc(sl.Bytes(b, 0, len(b)), R20) --* acc(hostAddr.Mem(), R20)) // @ ensures err == nil && !wildcard && isIP(hostAddr) ==> (unfolding acc(hostAddr.Mem(), R20) in (isIPv4(hostAddr) ==> forall i int :: { &b[i] } 0 <= i && i < len(b) ==> &b[i] == &hostAddr.(*net.IPAddr).IP[i])) // @ ensures err == nil && !wildcard && isIP(hostAddr) ==> (unfolding acc(hostAddr.Mem(), R20) in (isIPv6(hostAddr) && isConvertibleToIPv4(hostAddr) ==> forall i int :: { &b[i] } 0 <= i && i < len(b) ==> &b[i] == &hostAddr.(*net.IPAddr).IP[12+i])) // @ ensures err == nil && !wildcard && isIP(hostAddr) ==> (unfolding acc(hostAddr.Mem(), R20) in (!isIPv4(hostAddr) && !isIPv6(hostAddr) ==> forall i int :: { &b[i] } 0 <= i && i < len(b) ==> &b[i] == &hostAddr.(*net.IPAddr).IP[i])) @@ -768,11 +800,11 @@ func packAddr(hostAddr net.Addr /*@ , ghost wildcard bool @*/) (addrtyp AddrType // @ } // @ assert !wildcard && isIP(hostAddr) ==> (unfolding acc(hostAddr.Mem(), R20) in (isIPv6(hostAddr) && isConvertibleToIPv4(hostAddr) ==> forall i int :: { &b[i] } 0 <= i && i < len(b) ==> &b[i] == &hostAddr.(*net.IPAddr).IP[12+i])) // @ ghost if wildcard { - // @ fold acc(sl.AbsSlice_Bytes(ip, 0, len(ip)), _) + // @ fold acc(sl.Bytes(ip, 0, len(ip)), _) // @ } else { - // @ fold acc(sl.AbsSlice_Bytes(ip, 0, len(ip)), R20) - // @ package acc(sl.AbsSlice_Bytes(ip, 0, len(ip)), R20) --* acc(hostAddr.Mem(), R20) { - // @ unfold acc(sl.AbsSlice_Bytes(ip, 0, len(ip)), R20) + // @ fold acc(sl.Bytes(ip, 0, len(ip)), R20) + // @ package acc(sl.Bytes(ip, 0, len(ip)), R20) --* acc(hostAddr.Mem(), R20) { + // @ unfold acc(sl.Bytes(ip, 0, len(ip)), R20) // @ fold acc(hostAddr.Mem(), R20) // @ } // @ } @@ -781,18 +813,18 @@ func packAddr(hostAddr net.Addr /*@ , ghost wildcard bool @*/) (addrtyp AddrType // @ assert !wildcard && isIP(hostAddr) ==> (unfolding acc(hostAddr.Mem(), R20) in (isIPv6(hostAddr) && isConvertibleToIPv4(hostAddr) ==> forall i int :: { &b[i] } 0 <= i && i < len(b) ==> &b[i] == &hostAddr.(*net.IPAddr).IP[12+i])) verScionTmp := a.IP // @ ghost if wildcard { - // @ fold acc(sl.AbsSlice_Bytes(verScionTmp, 0, len(verScionTmp)), _) + // @ fold acc(sl.Bytes(verScionTmp, 0, len(verScionTmp)), _) // @ } else { - // @ fold acc(sl.AbsSlice_Bytes(verScionTmp, 0, len(verScionTmp)), R20) - // @ package acc(sl.AbsSlice_Bytes(verScionTmp, 0, len(verScionTmp)), R20) --* acc(hostAddr.Mem(), R20) { - // @ unfold acc(sl.AbsSlice_Bytes(verScionTmp, 0, len(verScionTmp)), R20) + // @ fold acc(sl.Bytes(verScionTmp, 0, len(verScionTmp)), R20) + // @ package acc(sl.Bytes(verScionTmp, 0, len(verScionTmp)), R20) --* acc(hostAddr.Mem(), R20) { + // @ unfold acc(sl.Bytes(verScionTmp, 0, len(verScionTmp)), R20) // @ fold acc(hostAddr.Mem(), R20) // @ } // @ } return T16Ip, verScionTmp, nil case addr.HostSVC: verScionTmp := a.PackWithPad(2) - // @ fold sl.AbsSlice_Bytes(verScionTmp, 0, len(verScionTmp)) + // @ fold sl.Bytes(verScionTmp, 0, len(verScionTmp)) return T4Svc, verScionTmp, nil } return 0, nil, serrors.New("unsupported address", "addr", hostAddr) @@ -816,20 +848,18 @@ func packAddr(hostAddr net.Addr /*@ , ghost wildcard bool @*/) (addrtyp AddrType // @ ensures 0 <= res // @ decreases func (s *SCION) AddrHdrLen( /*@ ghost ubuf []byte, ghost insideSlayers bool @*/ ) (res int) { - /*@ - ghost if !insideSlayers { - unfold acc(s.Mem(ubuf), R51) - defer fold acc(s.Mem(ubuf), R51) - unfold acc(s.HeaderMem(ubuf[CmnHdrLen:]), R51) - defer fold acc(s.HeaderMem(ubuf[CmnHdrLen:]), R51) - assert s.AddrHdrLenSpec(ubuf) == ( - unfolding acc(s.Mem(ubuf), R52) in - unfolding acc(s.HeaderMem(ubuf[CmnHdrLen:]), R52) in - 2*addr.IABytes + s.DstAddrType.Length() + s.SrcAddrType.Length()) - assert s.AddrHdrLenSpec(ubuf) == - 2*addr.IABytes + s.DstAddrType.Length() + s.SrcAddrType.Length() - } - @*/ + // @ ghost if !insideSlayers { + // @ unfold acc(s.Mem(ubuf), R51) + // @ defer fold acc(s.Mem(ubuf), R51) + // @ unfold acc(s.HeaderMem(ubuf[CmnHdrLen:]), R51) + // @ defer fold acc(s.HeaderMem(ubuf[CmnHdrLen:]), R51) + // @ assert s.AddrHdrLenSpec(ubuf) == ( + // @ unfolding acc(s.Mem(ubuf), R52) in + // @ unfolding acc(s.HeaderMem(ubuf[CmnHdrLen:]), R52) in + // @ 2*addr.IABytes + s.DstAddrType.Length() + s.SrcAddrType.Length()) + // @ assert s.AddrHdrLenSpec(ubuf) == + // @ 2*addr.IABytes + s.DstAddrType.Length() + s.SrcAddrType.Length() + // @ } return 2*addr.IABytes + s.DstAddrType.Length() + s.SrcAddrType.Length() } @@ -837,8 +867,8 @@ func (s *SCION) AddrHdrLen( /*@ ghost ubuf []byte, ghost insideSlayers bool @*/ // buffer. The caller must ensure that the correct address types and lengths are set in the SCION // layer, otherwise the results of this method are undefined. // @ preserves acc(s.HeaderMem(ubuf), R10) -// @ preserves sl.AbsSlice_Bytes(buf, 0, len(buf)) -// @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R10) +// @ preserves sl.Bytes(buf, 0, len(buf)) +// @ preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R10) // @ ensures err != nil ==> err.ErrorMem() // @ decreases func (s *SCION) SerializeAddrHdr(buf []byte /*@ , ghost ubuf []byte @*/) (err error) { @@ -852,25 +882,25 @@ func (s *SCION) SerializeAddrHdr(buf []byte /*@ , ghost ubuf []byte @*/) (err er srcAddrBytes := s.SrcAddrType.Length() offset := 0 // @ sl.SplitRange_Bytes(buf, offset, len(buf), writePerm) - // @ unfold sl.AbsSlice_Bytes(buf[offset:], 0, len(buf[offset:])) + // @ unfold sl.Bytes(buf[offset:], 0, len(buf[offset:])) binary.BigEndian.PutUint64(buf[offset:], uint64(s.DstIA)) - // @ fold sl.AbsSlice_Bytes(buf[offset:], 0, len(buf[offset:])) + // @ fold sl.Bytes(buf[offset:], 0, len(buf[offset:])) // @ sl.CombineRange_Bytes(buf, offset, len(buf), writePerm) offset += addr.IABytes // @ sl.SplitRange_Bytes(buf, offset, len(buf), writePerm) - // @ unfold sl.AbsSlice_Bytes(buf[offset:], 0, len(buf[offset:])) + // @ unfold sl.Bytes(buf[offset:], 0, len(buf[offset:])) binary.BigEndian.PutUint64(buf[offset:], uint64(s.SrcIA)) - // @ fold sl.AbsSlice_Bytes(buf[offset:], 0, len(buf[offset:])) + // @ fold sl.Bytes(buf[offset:], 0, len(buf[offset:])) // @ sl.CombineRange_Bytes(buf, offset, len(buf), writePerm) offset += addr.IABytes // @ sl.SplitRange_Bytes(buf, offset, offset+dstAddrBytes, writePerm) // @ sl.SplitRange_Bytes(ubuf, offset, offset+dstAddrBytes, R10) - // @ unfold sl.AbsSlice_Bytes(buf[offset:offset+dstAddrBytes], 0, len(buf[offset:offset+dstAddrBytes])) - // @ unfold acc(sl.AbsSlice_Bytes(ubuf[offset:offset+dstAddrBytes], 0, len(ubuf[offset:offset+dstAddrBytes])), R10) + // @ unfold sl.Bytes(buf[offset:offset+dstAddrBytes], 0, len(buf[offset:offset+dstAddrBytes])) + // @ unfold acc(sl.Bytes(ubuf[offset:offset+dstAddrBytes], 0, len(ubuf[offset:offset+dstAddrBytes])), R10) copy(buf[offset:offset+dstAddrBytes], s.RawDstAddr /*@ , R10 @*/) - // @ fold sl.AbsSlice_Bytes(buf[offset:offset+dstAddrBytes], 0, len(buf[offset:offset+dstAddrBytes])) - // @ fold acc(sl.AbsSlice_Bytes(ubuf[offset:offset+dstAddrBytes], 0, len(ubuf[offset:offset+dstAddrBytes])), R10) + // @ fold sl.Bytes(buf[offset:offset+dstAddrBytes], 0, len(buf[offset:offset+dstAddrBytes])) + // @ fold acc(sl.Bytes(ubuf[offset:offset+dstAddrBytes], 0, len(ubuf[offset:offset+dstAddrBytes])), R10) // @ sl.CombineRange_Bytes(buf, offset, offset+dstAddrBytes, writePerm) // @ sl.CombineRange_Bytes(ubuf, offset, offset+dstAddrBytes, R10) @@ -878,13 +908,13 @@ func (s *SCION) SerializeAddrHdr(buf []byte /*@ , ghost ubuf []byte @*/) (err er // @ sl.SplitRange_Bytes(buf, offset, offset+srcAddrBytes, writePerm) // @ sl.SplitRange_Bytes(ubuf, offset, offset+srcAddrBytes, R10) - // @ unfold sl.AbsSlice_Bytes(buf[offset:offset+srcAddrBytes], 0, len(buf[offset:offset+srcAddrBytes])) - // @ unfold acc(sl.AbsSlice_Bytes(ubuf[offset:offset+srcAddrBytes], 0, len(ubuf[offset:offset+srcAddrBytes])), R10) + // @ unfold sl.Bytes(buf[offset:offset+srcAddrBytes], 0, len(buf[offset:offset+srcAddrBytes])) + // @ unfold acc(sl.Bytes(ubuf[offset:offset+srcAddrBytes], 0, len(ubuf[offset:offset+srcAddrBytes])), R10) copy(buf[offset:offset+srcAddrBytes], s.RawSrcAddr /*@ , R10 @*/) - // @ fold sl.AbsSlice_Bytes(buf[offset:offset+srcAddrBytes], 0, len(buf[offset:offset+srcAddrBytes])) - // @ fold acc(sl.AbsSlice_Bytes(ubuf[offset:offset+srcAddrBytes], 0, len(ubuf[offset:offset+srcAddrBytes])), R10) + // @ fold sl.Bytes(buf[offset:offset+srcAddrBytes], 0, len(buf[offset:offset+srcAddrBytes])) + // @ fold acc(sl.Bytes(ubuf[offset:offset+srcAddrBytes], 0, len(ubuf[offset:offset+srcAddrBytes])), R10) // @ sl.CombineRange_Bytes(buf, offset, offset+srcAddrBytes, writePerm) // @ sl.CombineRange_Bytes(ubuf, offset, offset+srcAddrBytes, R10) @@ -898,7 +928,7 @@ func (s *SCION) SerializeAddrHdr(buf []byte /*@ , ghost ubuf []byte @*/) (err er // @ requires acc(&s.SrcAddrType, HalfPerm) && s.SrcAddrType.Has3Bits() // @ requires acc(&s.DstAddrType, HalfPerm) && s.DstAddrType.Has3Bits() // @ requires acc(&s.RawSrcAddr) && acc(&s.RawDstAddr) -// @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R10) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R41) // @ ensures res == nil ==> s.HeaderMem(data) // @ ensures res != nil ==> res.ErrorMem() // @ ensures res != nil ==> ( @@ -907,19 +937,19 @@ func (s *SCION) SerializeAddrHdr(buf []byte /*@ , ghost ubuf []byte @*/) (err er // @ acc(&s.RawSrcAddr) && acc(&s.RawDstAddr)) // @ decreases func (s *SCION) DecodeAddrHdr(data []byte) (res error) { - // @ ghost l := s.AddrHdrLen(nil, true) + // @ ghost l := s.AddrHdrLenSpecInternal() if len(data) < s.AddrHdrLen( /*@ nil, true @*/ ) { return serrors.New("provided buffer is too small", "expected", s.AddrHdrLen( /*@ nil, true @*/ ), "actual", len(data)) } offset := 0 - // @ unfold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R10) + // @ unfold acc(sl.Bytes(data, 0, len(data)), R41) // @ assert forall i int :: { &data[offset:][i] }{ &data[i] } 0 <= i && i < l ==> &data[offset:][i] == &data[i] s.DstIA = addr.IA(binary.BigEndian.Uint64(data[offset:])) offset += addr.IABytes // @ assert forall i int :: { &data[offset:][i] } 0 <= i && i < l ==> &data[offset:][i] == &data[offset+i] s.SrcIA = addr.IA(binary.BigEndian.Uint64(data[offset:])) - // @ fold acc(sl.AbsSlice_Bytes(data, 0, len(data)), R10) + // @ fold acc(sl.Bytes(data, 0, len(data)), R41) offset += addr.IABytes dstAddrBytes := s.DstAddrType.Length() srcAddrBytes := s.SrcAddrType.Length() @@ -935,13 +965,13 @@ func (s *SCION) DecodeAddrHdr(data []byte) (res error) { // @ requires acc(&s.RawSrcAddr, R20) && acc(&s.RawDstAddr, R20) // @ requires len(s.RawSrcAddr) % 2 == 0 && len(s.RawDstAddr) % 2 == 0 // @ requires acc(&s.SrcIA, R20) && acc(&s.DstIA, R20) -// @ requires acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) -// @ requires acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) -// @ preserves acc(sl.AbsSlice_Bytes(upperLayer, 0, len(upperLayer)), R20) +// @ requires acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) +// @ requires acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) +// @ preserves acc(sl.Bytes(upperLayer, 0, len(upperLayer)), R20) // @ ensures acc(&s.RawSrcAddr, R20) && acc(&s.RawDstAddr, R20) // @ ensures acc(&s.SrcIA, R20) && acc(&s.DstIA, R20) -// @ ensures acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) -// @ ensures acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) +// @ ensures acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) +// @ ensures acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) // @ ensures s == nil ==> err != nil // @ ensures len(s.RawDstAddr) == 0 ==> err != nil // @ ensures len(s.RawSrcAddr) == 0 ==> err != nil @@ -964,12 +994,12 @@ func (s *SCION) computeChecksum(upperLayer []byte, protocol uint8) (res uint16, // @ requires acc(&s.RawSrcAddr, R20) && acc(&s.RawDstAddr, R20) // @ requires len(s.RawSrcAddr) % 2 == 0 && len(s.RawDstAddr) % 2 == 0 // @ requires acc(&s.SrcIA, R20) && acc(&s.DstIA, R20) -// @ requires acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) -// @ requires acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) +// @ requires acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) +// @ requires acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) // @ ensures acc(&s.RawSrcAddr, R20) && acc(&s.RawDstAddr, R20) // @ ensures acc(&s.SrcIA, R20) && acc(&s.DstIA, R20) -// @ ensures acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) -// @ ensures acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) +// @ ensures acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) +// @ ensures acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) // @ ensures len(s.RawDstAddr) == 0 ==> err != nil // @ ensures len(s.RawSrcAddr) == 0 ==> err != nil // @ ensures err != nil ==> err.ErrorMem() @@ -999,7 +1029,7 @@ func (s *SCION) pseudoHeaderChecksum(length int, protocol uint8) (res uint32, er } // Address length is guaranteed to be a multiple of 2 by the protocol. // @ ghost var rawSrcAddrLen int = len(s.RawSrcAddr) - // @ invariant acc(&s.RawSrcAddr, R20) && acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) + // @ invariant acc(&s.RawSrcAddr, R20) && acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) // @ invariant len(s.RawSrcAddr) == rawSrcAddrLen // @ invariant len(s.RawSrcAddr) % 2 == 0 // @ invariant i % 2 == 0 @@ -1007,20 +1037,20 @@ func (s *SCION) pseudoHeaderChecksum(length int, protocol uint8) (res uint32, er // @ decreases len(s.RawSrcAddr) - i for i := 0; i < len(s.RawSrcAddr); i += 2 { // @ preserves err == nil - // @ requires acc(&s.RawSrcAddr, R20) && acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) + // @ requires acc(&s.RawSrcAddr, R20) && acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) // @ requires 0 <= i && i < len(s.RawSrcAddr) && i % 2 == 0 && len(s.RawSrcAddr) % 2 == 0 - // @ ensures acc(&s.RawSrcAddr, R20) && acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) + // @ ensures acc(&s.RawSrcAddr, R20) && acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) // @ ensures s.RawSrcAddr === before(s.RawSrcAddr) // @ decreases // @ outline( - // @ unfold acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) + // @ unfold acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) csum += uint32(s.RawSrcAddr[i]) << 8 csum += uint32(s.RawSrcAddr[i+1]) - // @ fold acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) + // @ fold acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), R20) // @ ) } // @ ghost var rawDstAddrLen int = len(s.RawDstAddr) - // @ invariant acc(&s.RawDstAddr, R20) && acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) + // @ invariant acc(&s.RawDstAddr, R20) && acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) // @ invariant len(s.RawDstAddr) == rawDstAddrLen // @ invariant len(s.RawDstAddr) % 2 == 0 // @ invariant i % 2 == 0 @@ -1028,16 +1058,16 @@ func (s *SCION) pseudoHeaderChecksum(length int, protocol uint8) (res uint32, er // @ decreases len(s.RawDstAddr) - i for i := 0; i < len(s.RawDstAddr); i += 2 { // @ preserves err == nil - // @ requires acc(&s.RawDstAddr, R20) && acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) + // @ requires acc(&s.RawDstAddr, R20) && acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) // @ requires 0 <= i && i < len(s.RawDstAddr) && i % 2 == 0 && len(s.RawDstAddr) % 2 == 0 - // @ ensures acc(&s.RawDstAddr, R20) && acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) + // @ ensures acc(&s.RawDstAddr, R20) && acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) // @ ensures s.RawDstAddr === before(s.RawDstAddr) // @ decreases // @ outline( - // @ unfold acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) + // @ unfold acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) csum += uint32(s.RawDstAddr[i]) << 8 csum += uint32(s.RawDstAddr[i+1]) - // @ fold acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) + // @ fold acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R20) // @ ) } l := uint32(length) @@ -1046,13 +1076,13 @@ func (s *SCION) pseudoHeaderChecksum(length int, protocol uint8) (res uint32, er return csum, nil } -// @ preserves acc(sl.AbsSlice_Bytes(upperLayer, 0, len(upperLayer)), R20) +// @ preserves acc(sl.Bytes(upperLayer, 0, len(upperLayer)), R20) // @ decreases func (s *SCION) upperLayerChecksum(upperLayer []byte, csum uint32) uint32 { // Compute safe boundary to ensure we do not access out of bounds. // Odd lengths are handled at the end. safeBoundary := len(upperLayer) - 1 - // @ unfold acc(sl.AbsSlice_Bytes(upperLayer, 0, len(upperLayer)), R20) + // @ unfold acc(sl.Bytes(upperLayer, 0, len(upperLayer)), R20) // @ invariant 0 <= i && i < safeBoundary + 2 // @ invariant i % 2 == 0 // @ invariant forall i int :: { &upperLayer[i] } 0 <= i && i < len(upperLayer) ==> acc(&upperLayer[i], R20) @@ -1064,15 +1094,17 @@ func (s *SCION) upperLayerChecksum(upperLayer []byte, csum uint32) uint32 { if len(upperLayer)%2 == 1 { csum += uint32(upperLayer[safeBoundary]) << 8 } - // @ fold acc(sl.AbsSlice_Bytes(upperLayer, 0, len(upperLayer)), R20) + // @ fold acc(sl.Bytes(upperLayer, 0, len(upperLayer)), R20) return csum } // (VerifiedSCION) The following function terminates but Gobra can't // deduce that because of limited support of bitwise operations. -// @ decreases _ +// @ decreases func (s *SCION) foldChecksum(csum uint32) (res uint16) { + // @ decreases csum for csum > 0xffff { + // @ b.FoldChecksumLemma(csum) csum = (csum >> 16) + (csum & 0xffff) } return ^uint16(csum) diff --git a/pkg/slayers/scion_spec.gobra b/pkg/slayers/scion_spec.gobra index 632535619..37e82f182 100644 --- a/pkg/slayers/scion_spec.gobra +++ b/pkg/slayers/scion_spec.gobra @@ -27,8 +27,11 @@ import ( "github.com/scionproto/scion/pkg/slayers/path/onehop" "github.com/scionproto/scion/pkg/slayers/path/scion" - . "github.com/scionproto/scion/verification/utils/definitions" - "github.com/scionproto/scion/verification/utils/slices" + . "verification/utils/definitions" + sl "verification/utils/slices" + "verification/io" + "encoding/binary" + "verification/utils/seqs" ) pred PathPoolMem(pathPool []path.Path, pathPoolRaw path.Path) { @@ -177,7 +180,7 @@ pred (s *SCION) Mem(ubuf []byte) { // end of path pool // helpful facts for other methods: // - for router::updateScionLayer: - (typeOf(s.Path) == type[*onehop.Path] ==> CmnHdrLen + s.AddrHdrLenSpecInternal() + s.Path.Len(ubuf[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen]) <= len(ubuf)) + (typeOf(s.Path) == type[*onehop.Path] ==> CmnHdrLen + s.AddrHdrLenSpecInternal() + s.Path.LenSpec(ubuf[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen]) <= len(ubuf)) } ghost @@ -187,9 +190,9 @@ pure func (s *SCION) ValidPathMetaData(ghost ub []byte) bool { return unfolding acc(s.Mem(ub), _) in let ubPath := s.UBPath(ub) in (typeOf(s.Path) == type[*scion.Raw] ==> - s.Path.(*scion.Raw).ValidCurrIdxs(ubPath)) && + s.Path.(*scion.Raw).GetBase(ubPath).Valid()) && (typeOf(s.Path) == type[*epic.Path] ==> - s.Path.(*epic.Path).ValidCurrIdxs(ubPath)) + s.Path.(*epic.Path).GetBase(ubPath).Valid()) } // TODO: simplify the body of the predicate when let expressions @@ -231,8 +234,8 @@ pred (s *SCION) ChecksumMem() { acc(&s.RawSrcAddr) && acc(&s.RawDstAddr) && len(s.RawSrcAddr) % 2 == 0 && len(s.RawDstAddr) % 2 == 0 && acc(&s.SrcIA) && acc(&s.DstIA) && - slices.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) && - slices.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) + sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) && + sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) } pred (b *BaseLayer) Mem(ghost ub []byte, ghost breakPoint int) { @@ -349,6 +352,254 @@ func (s *SCION) GetPath(ub []byte) path.Path { return unfolding acc(s.Mem(ub), _) in s.Path } +ghost +opaque +pure +requires acc(s.Mem(ub), _) +requires acc(sl.Bytes(ub, 0, length), _) +requires CmnHdrLen <= length +decreases +func (s *SCION) ValidHeaderOffset(ub []byte, length int) bool { + return GetAddressOffsetWithinLength(ub, length) == s.PathStartIdx(ub) && + GetLengthWithinLength(ub,length) == s.PathEndIdx(ub) +} + +ghost +requires acc(s.Mem(ub), R56) +requires acc(sl.Bytes(ub, 0, len(ub)), R55) +requires acc(sl.Bytes(ub, 0, length), R55) +requires CmnHdrLen <= length && length <= len(ub) +requires s.ValidHeaderOffset(ub, len(ub)) +ensures acc(s.Mem(ub), R56) +ensures acc(sl.Bytes(ub, 0, len(ub)), R55) +ensures acc(sl.Bytes(ub, 0, length), R55) +ensures s.ValidHeaderOffset(ub, length) +decreases +func (s *SCION) ValidHeaderOffsetToSubSliceLemma(ub []byte, length int) { + reveal s.ValidHeaderOffset(ub, len(ub)) + unfold acc(sl.Bytes(ub, 0, len(ub)), R56) + unfold acc(sl.Bytes(ub, 0, length), R56) + assert reveal s.ValidHeaderOffset(ub, length) + fold acc(sl.Bytes(ub, 0, len(ub)), R56) + fold acc(sl.Bytes(ub, 0, length), R56) +} + +ghost +requires acc(s.Mem(ub), R56) +requires acc(sl.Bytes(ub, 0, len(ub)), R55) +requires acc(sl.Bytes(ub, 0, length), R55) +requires CmnHdrLen <= length && length <= len(ub) +requires s.ValidHeaderOffset(ub, length) +ensures acc(s.Mem(ub), R56) +ensures acc(sl.Bytes(ub, 0, len(ub)), R55) +ensures acc(sl.Bytes(ub, 0, length), R55) +ensures s.ValidHeaderOffset(ub, len(ub)) +decreases +func (s *SCION) ValidHeaderOffsetFromSubSliceLemma(ub []byte, length int) { + reveal s.ValidHeaderOffset(ub, len(ub)) + unfold acc(sl.Bytes(ub, 0, len(ub)), R56) + unfold acc(sl.Bytes(ub, 0, length), R56) + assert reveal s.ValidHeaderOffset(ub, length) + fold acc(sl.Bytes(ub, 0, len(ub)), R56) + fold acc(sl.Bytes(ub, 0, length), R56) +} + +ghost +opaque +pure +requires acc(s.Mem(ub), _) +requires acc(sl.Bytes(ub, 0, len(ub)), _) +decreases +func (s *SCION) EqAbsHeader(ub []byte) bool { + return unfolding acc(s.Mem(ub), _) in + let low := CmnHdrLen+s.AddrHdrLenSpecInternal() in + let high := s.HdrLen*LineLen in + GetAddressOffset(ub) == low && + GetLength(ub) == int(high) && + // Might be worth introducing EqAbsHeader as an interface method on Path + // to avoid doing these casts, especially when we add support for EPIC. + typeOf(s.Path) == (*scion.Raw) && + unfolding acc(s.Path.Mem(ub[low:high]), _) in + unfolding acc(sl.Bytes(ub, 0, len(ub)), _) in + let _ := Asserting(forall k int :: {&ub[low:high][k]} 0 <= k && k < high ==> + &ub[low:high][k] == &ub[low + k]) in + let _ := Asserting(forall k int :: {&ub[low:high][:scion.MetaLen][k]} 0 <= k && k < scion.MetaLen ==> + &ub[low:high][:scion.MetaLen][k] == &ub[low:high][k]) in + let metaHdr := scion.DecodedFrom(binary.BigEndian.Uint32(ub[low:high][:scion.MetaLen])) in + let seg1 := int(metaHdr.SegLen[0]) in + let seg2 := int(metaHdr.SegLen[1]) in + let seg3 := int(metaHdr.SegLen[2]) in + let segs := io.CombineSegLens(seg1, seg2, seg3) in + s.Path.(*scion.Raw).Base.GetBase() == + scion.Base{metaHdr, segs.NumInfoFields(), segs.TotalHops()} +} + +// Describes a SCION packet that was successfully decoded by `DecodeFromBytes`. +ghost +opaque +pure +requires acc(s.Mem(ub), _) +decreases +func (s *SCION) ValidScionInitSpec(ub []byte) bool { + return unfolding acc(s.Mem(ub), _) in + let low := CmnHdrLen+s.AddrHdrLenSpecInternal() in + let high := s.HdrLen*LineLen in + typeOf(s.Path) == (*scion.Raw) && + s.Path.(*scion.Raw).GetBase(ub[low:high]).WeaklyValid() +} + +// Checks if the common path header is valid in the serialized scion packet. +ghost +opaque +requires acc(sl.Bytes(raw, 0, len(raw)), R56) +decreases +pure func ValidPktMetaHdr(raw []byte) bool { + return CmnHdrLen <= len(raw) && + let start := GetAddressOffset(raw) in + let end := start+scion.MetaLen in + 0 <= start && end <= len(raw) && + let rawHdr := raw[start:end] in + let length := GetLength(raw) in + length <= len(raw) && + unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in + let _ := Asserting(forall k int :: {&rawHdr[k]} 0 <= k && k < scion.MetaLen ==> &rawHdr[k] == &raw[start + k]) in + let hdr := binary.BigEndian.Uint32(rawHdr) in + let metaHdr := scion.DecodedFrom(hdr) in + let seg1 := int(metaHdr.SegLen[0]) in + let seg2 := int(metaHdr.SegLen[1]) in + let seg3 := int(metaHdr.SegLen[2]) in + let segs := io.CombineSegLens(seg1, seg2, seg3) in + let base := scion.Base{metaHdr, segs.NumInfoFields(), segs.TotalHops()} in + 0 < metaHdr.SegLen[0] && + base.Valid() && + scion.PktLen(segs, start + scion.MetaLen) <= length +} + +ghost +opaque +requires acc(sl.Bytes(raw, 0, len(raw)), R56) +decreases +pure func IsSupportedPkt(raw []byte) bool { + return CmnHdrLen <= len(raw) && + let pathType := path.Type(GetPathType(raw)) in + let nextHdr := L4ProtocolType(GetNextHdr(raw)) in + pathType == scion.PathType && + nextHdr != L4SCMP +} + +ghost +opaque +decreases +pure func IsSupportedRawPkt(raw seq[byte]) bool { + return CmnHdrLen <= len(raw) && + let pathType := path.Type(raw[8]) in + let nextHdr := L4ProtocolType(raw[4]) in + pathType == scion.PathType && + nextHdr != L4SCMP +} + +ghost +requires CmnHdrLen <= idx && idx <= len(raw) +preserves acc(sl.Bytes(raw, 0, len(raw)), R55) +preserves acc(sl.Bytes(raw[:idx], 0, idx), R55) +ensures IsSupportedPkt(raw) == IsSupportedPkt(raw[:idx]) +decreases +func IsSupportedPktSubslice(raw []byte, idx int) { + unfold acc(sl.Bytes(raw, 0, len(raw)), R56) + unfold acc(sl.Bytes(raw[:idx], 0, idx), R56) + reveal IsSupportedPkt(raw) + reveal IsSupportedPkt(raw[:idx]) + fold acc(sl.Bytes(raw, 0, len(raw)), R56) + fold acc(sl.Bytes(raw[:idx], 0, idx), R56) +} + +ghost +preserves acc(s.Mem(ub), R55) +preserves acc(sl.Bytes(ub, 0, len(ub)), R55) +preserves acc(sl.Bytes(buffer, 0, len(buffer)), R55) +preserves s.EqPathType(ub) +preserves s.EqPathTypeWithBuffer(ub, buffer) +ensures IsSupportedPkt(ub) == IsSupportedPkt(buffer) +decreases +func (s *SCION) IsSupportedPktLemma(ub []byte, buffer []byte) { + reveal s.EqPathType(ub) + reveal s.EqPathTypeWithBuffer(ub, buffer) + reveal IsSupportedPkt(ub) + reveal IsSupportedPkt(buffer) +} + +ghost +requires acc(sl.Bytes(ub, 0, len(ub)), _) +requires CmnHdrLen <= len(ub) +decreases +pure func GetAddressOffset(ub []byte) int { + return GetAddressOffsetWithinLength(ub, len(ub)) +} + +ghost +requires acc(sl.Bytes(ub, 0, length), _) +requires CmnHdrLen <= length +decreases +pure func GetAddressOffsetWithinLength(ub []byte, length int) int { + return unfolding acc(sl.Bytes(ub, 0, length), _) in + let dstAddrLen := AddrType(ub[9] >> 4 & 0x7).Length() in + let srcAddrLen := AddrType(ub[9] & 0x7).Length() in + CmnHdrLen + 2*addr.IABytes + dstAddrLen + srcAddrLen +} + +ghost +requires acc(sl.Bytes(ub, 0, len(ub)), _) +requires CmnHdrLen <= len(ub) +decreases +pure func GetLength(ub []byte) int { + return GetLengthWithinLength(ub, len(ub)) +} + +ghost +requires acc(sl.Bytes(ub, 0, length), _) +requires CmnHdrLen <= length +decreases +pure func GetLengthWithinLength(ub []byte, length int) int { + return unfolding acc(sl.Bytes(ub, 0, length), _) in int(ub[5])*LineLen +} + +ghost +requires acc(sl.Bytes(ub, 0, len(ub)), _) +requires CmnHdrLen <= len(ub) +decreases +pure func GetPathType(ub []byte) int { + return unfolding acc(sl.Bytes(ub, 0, len(ub)), _) in int(ub[8]) +} + +ghost +requires acc(sl.Bytes(ub, 0, len(ub)), _) +requires CmnHdrLen <= len(ub) +decreases +pure func GetNextHdr(ub []byte) int { + return unfolding acc(sl.Bytes(ub, 0, len(ub)), _) in int(ub[4]) +} + +ghost +opaque +requires acc(s.Mem(ub), _) +requires acc(sl.Bytes(ub, 0, len(ub)), _) +decreases +pure func (s *SCION) EqPathType(ub []byte) bool { + return reveal s.EqPathTypeWithBuffer(ub, ub) +} + +ghost +opaque +requires acc(s.Mem(ub), _) +requires acc(sl.Bytes(buffer, 0, len(buffer)), _) +decreases +pure func (s *SCION) EqPathTypeWithBuffer(ub []byte, buffer []byte) bool { + return unfolding acc(s.Mem(ub), _) in + CmnHdrLen <= len(buffer) && + path.Type(GetPathType(buffer)) == s.PathType && + L4ProtocolType(GetNextHdr(buffer)) == s.NextHdr +} + ghost pure requires acc(s.Mem(ub), _) @@ -519,6 +770,6 @@ decreases pure func (s *SCION) InferSizeOHP(ghost ub []byte) (b bool) { return unfolding acc(s.Mem(ub), _) in let pathSlice := ub[CmnHdrLen+s.AddrHdrLenSpecInternal() : s.HdrLen*LineLen] in - let pathLen := s.Path.Len(pathSlice) in + let pathLen := s.Path.LenSpec(pathSlice) in CmnHdrLen + s.AddrHdrLenSpecInternal() + pathLen <= len(ub) } \ No newline at end of file diff --git a/pkg/slayers/scion_test.gobra b/pkg/slayers/scion_test.gobra index eae1258d6..3678ddaee 100644 --- a/pkg/slayers/scion_test.gobra +++ b/pkg/slayers/scion_test.gobra @@ -39,7 +39,7 @@ requires acc(src.Mem(), _) func testSrcSetterWildcard(s *SCION, src *net.IPAddr) { res := s.SetSrcAddr(src, true) // in the wildcard case we have wildcard access to the address in the SCION struct - assert acc(sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), _) + assert acc(sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)), _) } // These tests show the behavior of SetSrcAddr when an HostSVC is passed @@ -50,7 +50,7 @@ func testSrcSetterSVC(s *SCION, src addr.HostSVC) { res := s.SetSrcAddr(src, false) assert src.Mem() // if the address is HostSVC we have the whole permission anyway in a slice predicate - unfold sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) + unfold sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) assert forall i int :: { &s.RawSrcAddr[i] } 0 <= i && i < len(s.RawSrcAddr) ==> acc(&s.RawSrcAddr[i]) } @@ -60,7 +60,7 @@ requires acc(src.Mem(), _) func testSrcSetterSVCWildcard(s *SCION, src addr.HostSVC) { res := s.SetSrcAddr(src, true) // if the address is HostSVC we have the whole permission anyway in a slice predicate - unfold sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) + unfold sl.Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) assert forall i int :: { &s.RawSrcAddr[i] } 0 <= i && i < len(s.RawSrcAddr) ==> acc(&s.RawSrcAddr[i]) } @@ -82,7 +82,7 @@ requires acc(dst.Mem(), _) func testDstSetterWildcard(s *SCION, dst *net.IPAddr) { res := s.SetDstAddr(dst, true) // in the wildcard case we have wildcard access to the address in the SCION struct - assert acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), _) + assert acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), _) } // These tests show the behavior of SetDstAddr when an HostSVC is passed @@ -93,7 +93,7 @@ func testDstSetterSVC(s *SCION, dst addr.HostSVC) { res := s.SetDstAddr(dst, false) assert dst.Mem() // if the address is HostSVC we have the whole permission anyway in a slice predicate - unfold sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) + unfold sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) assert forall i int :: { &s.RawDstAddr[i] } 0 <= i && i < len(s.RawDstAddr) ==> acc(&s.RawDstAddr[i]) } @@ -103,6 +103,6 @@ requires acc(dst.Mem(), _) func testDstSetterSVCWildcard(s *SCION, dst addr.HostSVC) { res := s.SetDstAddr(dst, true) // if the address is HostSVC we have the whole permission anyway in a slice predicate - unfold sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) + unfold sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) assert forall i int :: { &s.RawDstAddr[i] } 0 <= i && i < len(s.RawDstAddr) ==> acc(&s.RawDstAddr[i]) } diff --git a/pkg/slayers/scmp.go b/pkg/slayers/scmp.go index 9f434b6c9..ab15de197 100644 --- a/pkg/slayers/scmp.go +++ b/pkg/slayers/scmp.go @@ -24,7 +24,7 @@ import ( "github.com/scionproto/scion/pkg/private/serrors" // @ . "github.com/scionproto/scion/verification/utils/definitions" - // @ "github.com/scionproto/scion/verification/utils/slices" + // @ sl "github.com/scionproto/scion/verification/utils/slices" ) // MaxSCMPPacketLen the maximum length a SCION packet including SCMP quote can @@ -130,14 +130,14 @@ func (s *SCMP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOp // @ decreases // @ outline ( // @ b.ExchangePred() - // @ slices.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) - // @ unfold slices.AbsSlice_Bytes(underlyingBufRes, 0, 2) + // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) + // @ unfold sl.Bytes(underlyingBufRes, 0, 2) // @ assert forall i int :: { &bytes[i] } 0 <= i && i < 2 ==> &bytes[i] == &underlyingBufRes[i] - // @ fold slices.AbsSlice_Bytes(bytes, 0, 2) + // @ fold sl.Bytes(bytes, 0, 2) s.TypeCode.SerializeTo(bytes) - // @ unfold slices.AbsSlice_Bytes(bytes, 0, 2) - // @ fold slices.AbsSlice_Bytes(underlyingBufRes, 0, 2) - // @ slices.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) + // @ unfold sl.Bytes(bytes, 0, 2) + // @ fold sl.Bytes(underlyingBufRes, 0, 2) + // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ b.RestoreMem(underlyingBufRes) // @ ) @@ -154,13 +154,13 @@ func (s *SCMP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOp // @ decreases // @ outline ( // @ b.ExchangePred() - // @ slices.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 4, writePerm) - // @ unfold slices.AbsSlice_Bytes(underlyingBufRes, 0, 4) + // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 4, writePerm) + // @ unfold sl.Bytes(underlyingBufRes, 0, 4) // @ assert forall i int :: { &bytes[i] } 0 <= i && i < 4 ==> &bytes[i] == &underlyingBufRes[i] bytes[2] = 0 bytes[3] = 0 - // @ fold slices.AbsSlice_Bytes(underlyingBufRes, 0, 4) - // @ slices.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 4, writePerm) + // @ fold sl.Bytes(underlyingBufRes, 0, 4) + // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 4, writePerm) // @ b.RestoreMem(underlyingBufRes) // @ ) verScionTmp := b.Bytes() @@ -182,13 +182,13 @@ func (s *SCMP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOp // @ decreases // @ outline ( // @ b.ExchangePred() - // @ slices.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 4, writePerm) - // @ unfold slices.AbsSlice_Bytes(underlyingBufRes, 0, 4) + // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 4, writePerm) + // @ unfold sl.Bytes(underlyingBufRes, 0, 4) // @ assert forall i int :: { &bytes[i] } 0 <= i && i < 4 ==> &bytes[i] == &underlyingBufRes[i] // @ assert forall i int :: { &bytes[2:][i] } 0 <= i && i < 2 ==> &bytes[2:][i] == &bytes[i + 2] binary.BigEndian.PutUint16(bytes[2:], s.Checksum) - // @ fold slices.AbsSlice_Bytes(underlyingBufRes, 0, 4) - // @ slices.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 4, writePerm) + // @ fold sl.Bytes(underlyingBufRes, 0, 4) + // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 4, writePerm) // @ b.RestoreMem(underlyingBufRes) // @ ) // @ fold s.Mem(ubufMem) @@ -197,7 +197,7 @@ func (s *SCMP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOp // DecodeFromBytes decodes the given bytes into this layer. // @ requires df != nil -// @ preserves slices.AbsSlice_Bytes(data, 0, len(data)) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R40) // @ requires s.NonInitMem() // @ preserves df.Mem() // @ ensures res == nil ==> s.Mem(data) @@ -210,31 +210,31 @@ func (s *SCMP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res err } // @ unfold s.NonInitMem() // @ requires len(data) >= 4 - // @ requires slices.AbsSlice_Bytes(data, 0, len(data)) + // @ requires acc(sl.Bytes(data, 0, len(data)), R40) // @ preserves acc(&s.TypeCode) - // @ ensures slices.AbsSlice_Bytes(data, 2, len(data)) - // @ ensures slices.AbsSlice_Bytes(data, 0, 2) + // @ ensures acc(sl.Bytes(data, 2, len(data)), R40) + // @ ensures acc(sl.Bytes(data, 0, 2), R40) // @ decreases // @ outline ( - // @ slices.SplitByIndex_Bytes(data, 0, len(data), 2, writePerm) - // @ unfold slices.AbsSlice_Bytes(data, 0, 2) + // @ sl.SplitByIndex_Bytes(data, 0, len(data), 2, R40) + // @ unfold acc(sl.Bytes(data, 0, 2), R40) s.TypeCode = CreateSCMPTypeCode(SCMPType(data[0]), SCMPCode(data[1])) - // @ fold slices.AbsSlice_Bytes(data, 0, 2) + // @ fold acc(sl.Bytes(data, 0, 2), R40) // @ ) // @ requires len(data) >= 4 - // @ requires slices.AbsSlice_Bytes(data, 0, 2) - // @ requires slices.AbsSlice_Bytes(data, 2, len(data)) + // @ requires acc(sl.Bytes(data, 0, 2), R40) + // @ requires acc(sl.Bytes(data, 2, len(data)), R40) // @ preserves acc(&s.Checksum) - // @ ensures slices.AbsSlice_Bytes(data, 0, len(data)) + // @ ensures acc(sl.Bytes(data, 0, len(data)), R40) // @ decreases // @ outline ( - // @ slices.SplitByIndex_Bytes(data, 2, len(data), 4, writePerm) - // @ unfold slices.AbsSlice_Bytes(data, 2, 4) + // @ sl.SplitByIndex_Bytes(data, 2, len(data), 4, R40) + // @ unfold acc(sl.Bytes(data, 2, 4), R40) // @ assert forall i int :: { &data[2:4][i] } 0 <= i && i < 2 ==> &data[2 + i] == &data[2:4][i] s.Checksum = binary.BigEndian.Uint16(data[2:4]) - // @ fold slices.AbsSlice_Bytes(data, 2, 4) - // @ slices.CombineAtIndex_Bytes(data, 0, 4, 2, writePerm) - // @ slices.CombineAtIndex_Bytes(data, 0, len(data), 4, writePerm) + // @ fold acc(sl.Bytes(data, 2, 4), R40) + // @ sl.CombineAtIndex_Bytes(data, 0, 4, 2, R40) + // @ sl.CombineAtIndex_Bytes(data, 0, len(data), 4, R40) // @ ) s.BaseLayer = BaseLayer{Contents: data[:4], Payload: data[4:]} // @ fold s.BaseLayer.Mem(data, 4) @@ -259,7 +259,7 @@ func (s *SCMP) SetNetworkLayerForChecksum(scn *SCION) { } // @ requires pb != nil -// @ requires slices.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ preserves pb.Mem() // @ ensures res != nil ==> res.ErrorMem() // @ decreases diff --git a/pkg/slayers/scmp_msg.go b/pkg/slayers/scmp_msg.go index 734c3b4c0..80748dddd 100644 --- a/pkg/slayers/scmp_msg.go +++ b/pkg/slayers/scmp_msg.go @@ -64,10 +64,10 @@ func (i *SCMPExternalInterfaceDown) NextLayerType() gopacket.LayerType { // DecodeFromBytes decodes the given bytes into this layer. // @ requires df != nil // @ requires i.NonInitMem() -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ preserves df.Mem() // @ ensures res == nil ==> i.Mem(data) -// @ ensures res != nil ==> (i.NonInitMem() && sl.AbsSlice_Bytes(data, 0, len(data))) +// @ ensures res != nil ==> (i.NonInitMem() && sl.Bytes(data, 0, len(data))) // @ ensures res != nil ==> res.ErrorMem() // @ decreases func (i *SCMPExternalInterfaceDown) DecodeFromBytes(data []byte, @@ -81,16 +81,16 @@ func (i *SCMPExternalInterfaceDown) DecodeFromBytes(data []byte, // @ unfold i.NonInitMem() offset := 0 // @ sl.SplitRange_Bytes(data, offset, len(data), R15) - // @ unfold acc(sl.AbsSlice_Bytes(data[offset:], 0, len(data[offset:])), R15) + // @ unfold acc(sl.Bytes(data[offset:], 0, len(data[offset:])), R15) i.IA = addr.IA(binary.BigEndian.Uint64(data[offset:])) - // @ fold acc(sl.AbsSlice_Bytes(data[offset:], 0, len(data[offset:])), R15) + // @ fold acc(sl.Bytes(data[offset:], 0, len(data[offset:])), R15) // @ sl.CombineRange_Bytes(data, offset, len(data), R15) offset += addr.IABytes // @ sl.SplitRange_Bytes(data, offset, offset+scmpRawInterfaceLen, R15) // @ ghost newSlice := data[offset : offset+scmpRawInterfaceLen] - // @ unfold acc(sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)), R15) + // @ unfold acc(sl.Bytes(newSlice, 0, len(newSlice)), R15) i.IfID = binary.BigEndian.Uint64(data[offset : offset+scmpRawInterfaceLen]) - // @ fold acc(sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)), R15) + // @ fold acc(sl.Bytes(newSlice, 0, len(newSlice)), R15) // @ sl.CombineRange_Bytes(data, offset, offset+scmpRawInterfaceLen, R15) offset += scmpRawInterfaceLen // @ sl.SplitByIndex_Bytes(data, 0, len(data), offset, writePerm) @@ -126,16 +126,16 @@ func (i *SCMPExternalInterfaceDown) SerializeTo(b gopacket.SerializeBuffer, opts // @ b.ExchangePred() // @ assert buf === underlyingBufRes[:addr.IABytes+scmpRawInterfaceLen] // @ sl.SplitRange_Bytes(underlyingBufRes, 0, len(buf), writePerm) - // @ assert sl.AbsSlice_Bytes(buf, 0, len(buf)) - // @ unfold sl.AbsSlice_Bytes(buf, 0, len(buf)) + // @ assert sl.Bytes(buf, 0, len(buf)) + // @ unfold sl.Bytes(buf, 0, len(buf)) binary.BigEndian.PutUint64(buf[offset:], uint64(i.IA)) - // @ fold sl.AbsSlice_Bytes(buf, 0, len(buf)) + // @ fold sl.Bytes(buf, 0, len(buf)) offset += addr.IABytes // @ sl.SplitRange_Bytes(buf, offset, offset+scmpRawInterfaceLen, writePerm) // @ ghost newSlice := buf[offset:offset+scmpRawInterfaceLen] - // @ unfold sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)) + // @ unfold sl.Bytes(newSlice, 0, len(newSlice)) binary.BigEndian.PutUint64(buf[offset:offset+scmpRawInterfaceLen], i.IfID) - // @ fold sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)) + // @ fold sl.Bytes(newSlice, 0, len(newSlice)) // @ sl.CombineRange_Bytes(buf, offset, offset+scmpRawInterfaceLen, writePerm) // @ sl.CombineRange_Bytes(underlyingBufRes, 0, len(buf), writePerm) // @ b.RestoreMem(underlyingBufRes) @@ -144,7 +144,7 @@ func (i *SCMPExternalInterfaceDown) SerializeTo(b gopacket.SerializeBuffer, opts // @ requires pb != nil // @ preserves pb.Mem() -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ ensures res != nil ==> res.ErrorMem() // @ decreases func decodeSCMPExternalInterfaceDown(data []byte, pb gopacket.PacketBuilder) (res error) { @@ -202,11 +202,11 @@ func (*SCMPInternalConnectivityDown) NextLayerType() gopacket.LayerType { // DecodeFromBytes decodes the given bytes into this layer. // @ requires df != nil -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ requires i.NonInitMem() // @ preserves df.Mem() // @ ensures res == nil ==> i.Mem(data) -// @ ensures res != nil ==> (i.NonInitMem() && sl.AbsSlice_Bytes(data, 0, len(data))) +// @ ensures res != nil ==> (i.NonInitMem() && sl.Bytes(data, 0, len(data))) // @ ensures res != nil ==> res.ErrorMem() // @ decreases func (i *SCMPInternalConnectivityDown) DecodeFromBytes(data []byte, @@ -221,23 +221,23 @@ func (i *SCMPInternalConnectivityDown) DecodeFromBytes(data []byte, // @ defer fold i.Mem(data) offset := 0 // @ sl.SplitRange_Bytes(data, offset, len(data), R15) - // @ unfold acc(sl.AbsSlice_Bytes(data[offset:], 0, len(data[offset:])), R15) + // @ unfold acc(sl.Bytes(data[offset:], 0, len(data[offset:])), R15) i.IA = addr.IA(binary.BigEndian.Uint64(data[offset:])) - // @ fold acc(sl.AbsSlice_Bytes(data[offset:], 0, len(data[offset:])), R15) + // @ fold acc(sl.Bytes(data[offset:], 0, len(data[offset:])), R15) // @ sl.CombineRange_Bytes(data, offset, len(data), R15) offset += addr.IABytes // @ sl.SplitRange_Bytes(data, offset, offset+scmpRawInterfaceLen, R15) // @ ghost newSlice := data[offset : offset+scmpRawInterfaceLen] - // @ unfold acc(sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)), R15) + // @ unfold acc(sl.Bytes(newSlice, 0, len(newSlice)), R15) i.Ingress = binary.BigEndian.Uint64(data[offset : offset+scmpRawInterfaceLen]) - // @ fold acc(sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)), R15) + // @ fold acc(sl.Bytes(newSlice, 0, len(newSlice)), R15) // @ sl.CombineRange_Bytes(data, offset, offset+scmpRawInterfaceLen, R15) offset += scmpRawInterfaceLen // @ sl.SplitRange_Bytes(data, offset, offset+scmpRawInterfaceLen, R15) // @ ghost newSlice = data[offset : offset+scmpRawInterfaceLen] - // @ unfold acc(sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)), R15) + // @ unfold acc(sl.Bytes(newSlice, 0, len(newSlice)), R15) i.Egress = binary.BigEndian.Uint64(data[offset : offset+scmpRawInterfaceLen]) - // @ fold acc(sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)), R15) + // @ fold acc(sl.Bytes(newSlice, 0, len(newSlice)), R15) // @ sl.CombineRange_Bytes(data, offset, offset+scmpRawInterfaceLen, R15) offset += scmpRawInterfaceLen // @ sl.SplitByIndex_Bytes(data, 0, len(data), offset, writePerm) @@ -271,25 +271,25 @@ func (i *SCMPInternalConnectivityDown) SerializeTo(b gopacket.SerializeBuffer, o // @ defer fold i.Mem(ubufMem) // @ b.ExchangePred() // @ sl.SplitRange_Bytes(underlyingBufRes, 0, len(buf), writePerm) - // @ assert sl.AbsSlice_Bytes(buf, 0, len(buf)) + // @ assert sl.Bytes(buf, 0, len(buf)) // @ sl.SplitRange_Bytes(buf, offset, len(buf), writePerm) - // @ unfold sl.AbsSlice_Bytes(buf[offset:], 0, len(buf[offset:])) + // @ unfold sl.Bytes(buf[offset:], 0, len(buf[offset:])) binary.BigEndian.PutUint64(buf[offset:], uint64(i.IA)) - // @ fold sl.AbsSlice_Bytes(buf[offset:], 0, len(buf[offset:])) + // @ fold sl.Bytes(buf[offset:], 0, len(buf[offset:])) // @ sl.CombineRange_Bytes(buf, offset, len(buf), writePerm) offset += addr.IABytes // @ ghost newSlice := buf[offset:offset+scmpRawInterfaceLen] // @ sl.SplitRange_Bytes(buf, offset, offset+scmpRawInterfaceLen, writePerm) - // @ unfold sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)) + // @ unfold sl.Bytes(newSlice, 0, len(newSlice)) binary.BigEndian.PutUint64(buf[offset:offset+scmpRawInterfaceLen], i.Ingress) - // @ fold sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)) + // @ fold sl.Bytes(newSlice, 0, len(newSlice)) // @ sl.CombineRange_Bytes(buf, offset, offset+scmpRawInterfaceLen, writePerm) offset += scmpRawInterfaceLen // @ ghost newSlice = buf[offset:offset+scmpRawInterfaceLen] // @ sl.SplitRange_Bytes(buf, offset, offset+scmpRawInterfaceLen, writePerm) - // @ unfold sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)) + // @ unfold sl.Bytes(newSlice, 0, len(newSlice)) binary.BigEndian.PutUint64(buf[offset:offset+scmpRawInterfaceLen], i.Egress) - // @ fold sl.AbsSlice_Bytes(newSlice, 0, len(newSlice)) + // @ fold sl.Bytes(newSlice, 0, len(newSlice)) // @ sl.CombineRange_Bytes(buf, offset, offset+scmpRawInterfaceLen, writePerm) // @ sl.CombineRange_Bytes(underlyingBufRes, 0, len(buf), writePerm) // @ b.RestoreMem(underlyingBufRes) @@ -298,7 +298,7 @@ func (i *SCMPInternalConnectivityDown) SerializeTo(b gopacket.SerializeBuffer, o // @ requires pb != nil // @ preserves pb.Mem() -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ ensures err != nil ==> err.ErrorMem() // @ decreases func decodeSCMPInternalConnectivityDown(data []byte, pb gopacket.PacketBuilder) (err error) { @@ -344,10 +344,10 @@ func (*SCMPEcho) NextLayerType() gopacket.LayerType { // DecodeFromBytes decodes the given bytes into this layer. // @ requires df != nil // @ requires i.NonInitMem() -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ preserves df.Mem() // @ ensures res == nil ==> i.Mem(data) -// @ ensures res != nil ==> (i.NonInitMem() && sl.AbsSlice_Bytes(data, 0, len(data))) +// @ ensures res != nil ==> (i.NonInitMem() && sl.Bytes(data, 0, len(data))) // @ ensures res != nil ==> res.ErrorMem() // @ decreases func (i *SCMPEcho) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res error) { @@ -362,44 +362,44 @@ func (i *SCMPEcho) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res // @ requires offset == 0 // @ preserves acc(&i.Identifier) // @ requires len(data) >= 4 - // @ requires sl.AbsSlice_Bytes(data, 0, len(data)) - // @ ensures sl.AbsSlice_Bytes(data, 2, len(data)) - // @ ensures sl.AbsSlice_Bytes(data, 0, 2) + // @ requires sl.Bytes(data, 0, len(data)) + // @ ensures sl.Bytes(data, 2, len(data)) + // @ ensures sl.Bytes(data, 0, 2) // @ decreases // @ outline ( // @ sl.SplitByIndex_Bytes(data, 0, len(data), 2, writePerm) - // @ unfold sl.AbsSlice_Bytes(data, 0, 2) + // @ unfold sl.Bytes(data, 0, 2) i.Identifier = binary.BigEndian.Uint16(data[:2]) - // @ fold sl.AbsSlice_Bytes(data, 0, 2) + // @ fold sl.Bytes(data, 0, 2) // @ ) offset += 2 // @ requires offset == 2 // @ preserves acc(&i.SeqNumber) // @ requires len(data) >= 4 - // @ requires sl.AbsSlice_Bytes(data, 2, len(data)) - // @ ensures sl.AbsSlice_Bytes(data, 2, 4) - // @ ensures sl.AbsSlice_Bytes(data, 4, len(data)) + // @ requires sl.Bytes(data, 2, len(data)) + // @ ensures sl.Bytes(data, 2, 4) + // @ ensures sl.Bytes(data, 4, len(data)) // @ decreases // @ outline ( // @ sl.SplitByIndex_Bytes(data, 2, len(data), 4, writePerm) - // @ unfold sl.AbsSlice_Bytes(data, 2, 4) + // @ unfold sl.Bytes(data, 2, 4) // @ assert forall i int :: { &data[offset:offset+2][i] } 0 <= i && i < 2 ==> &data[offset + i] == &data[offset : offset+2][i] i.SeqNumber = binary.BigEndian.Uint16(data[offset : offset+2]) - // @ fold sl.AbsSlice_Bytes(data, 2, 4) + // @ fold sl.Bytes(data, 2, 4) // @ ) offset += 2 // @ requires offset == 4 // @ requires len(data) >= 4 // @ requires acc(&i.BaseLayer) - // @ requires sl.AbsSlice_Bytes(data, 0, 2) - // @ requires sl.AbsSlice_Bytes(data, 2, 4) - // @ requires sl.AbsSlice_Bytes(data, 4, len(data)) + // @ requires sl.Bytes(data, 0, 2) + // @ requires sl.Bytes(data, 2, 4) + // @ requires sl.Bytes(data, 4, len(data)) // @ ensures acc(i.BaseLayer.Mem(data, 4)) // @ decreases // @ outline ( // @ sl.CombineAtIndex_Bytes(data, 0, 4, 2, writePerm) - // @ unfold sl.AbsSlice_Bytes(data, 0, 4) - // @ unfold sl.AbsSlice_Bytes(data, 4, len(data)) + // @ unfold sl.Bytes(data, 0, 4) + // @ unfold sl.Bytes(data, 4, len(data)) // @ assert forall i int :: { &data[offset:][i] } 0 <= i && i < len(data) - offset ==> &data[offset:][i] == &data[offset + i] i.BaseLayer = BaseLayer{ Contents: data[:offset], @@ -407,8 +407,8 @@ func (i *SCMPEcho) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res } // @ assert forall l int :: { &i.Payload[l] } 0 <= l && l < len(i.Payload) ==> &data[offset+l] == &i.Payload[l] // @ assert forall l int :: { &i.Payload[l] } 0 <= l && l < len(i.Payload) ==> acc(&i.Payload[l]) - // @ fold sl.AbsSlice_Bytes(i.Contents, 0, len(i.Contents)) - // @ fold sl.AbsSlice_Bytes(i.Payload, 0, len(i.Payload)) + // @ fold sl.Bytes(i.Contents, 0, len(i.Contents)) + // @ fold sl.Bytes(i.Payload, 0, len(i.Payload)) // @ fold i.BaseLayer.Mem(data, 4) // @ ) return nil @@ -441,9 +441,9 @@ func (i *SCMPEcho) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.Seriali // @ outline ( // @ b.ExchangePred() // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) - // @ unfold sl.AbsSlice_Bytes(underlyingBufRes, 0, 2) + // @ unfold sl.Bytes(underlyingBufRes, 0, 2) binary.BigEndian.PutUint16(buf[:2], i.Identifier) - // @ fold sl.AbsSlice_Bytes(underlyingBufRes, 0, 2) + // @ fold sl.Bytes(underlyingBufRes, 0, 2) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ b.RestoreMem(underlyingBufRes) // @ ) @@ -459,10 +459,10 @@ func (i *SCMPEcho) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.Seriali // @ b.ExchangePred() // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ sl.SplitByIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 4, writePerm) - // @ unfold sl.AbsSlice_Bytes(underlyingBufRes, 2, 4) + // @ unfold sl.Bytes(underlyingBufRes, 2, 4) // @ assert forall i int :: { &buf[offset:offset+2][i] } 0 <= i && i < 2 ==> &buf[offset:offset+2][i] == &buf[offset + i] binary.BigEndian.PutUint16(buf[offset:offset+2], i.SeqNumber) - // @ fold sl.AbsSlice_Bytes(underlyingBufRes, 2, 4) + // @ fold sl.Bytes(underlyingBufRes, 2, 4) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 4, writePerm) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ b.RestoreMem(underlyingBufRes) @@ -472,7 +472,7 @@ func (i *SCMPEcho) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.Seriali // @ requires pb != nil // @ preserves pb.Mem() -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ ensures err != nil ==> err.ErrorMem() // @ decreases func decodeSCMPEcho(data []byte, pb gopacket.PacketBuilder) (err error) { @@ -514,10 +514,10 @@ func (*SCMPParameterProblem) NextLayerType() gopacket.LayerType { // DecodeFromBytes decodes the given bytes into this layer. // @ requires df != nil // @ requires i.NonInitMem() -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ preserves df.Mem() // @ ensures res == nil ==> i.Mem(data) -// @ ensures res != nil ==> (i.NonInitMem() && sl.AbsSlice_Bytes(data, 0, len(data))) +// @ ensures res != nil ==> (i.NonInitMem() && sl.Bytes(data, 0, len(data))) // @ ensures res != nil ==> res.ErrorMem() // @ decreases func (i *SCMPParameterProblem) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res error) { @@ -530,33 +530,33 @@ func (i *SCMPParameterProblem) DecodeFromBytes(data []byte, df gopacket.DecodeFe // @ defer fold i.Mem(data) // @ preserves acc(&i.Pointer) // @ requires len(data) >= 4 - // @ preserves sl.AbsSlice_Bytes(data, 0, len(data)) + // @ preserves sl.Bytes(data, 0, len(data)) // @ decreases // @ outline ( // @ sl.SplitByIndex_Bytes(data, 0, len(data), 2, writePerm) // @ sl.SplitByIndex_Bytes(data, 2, len(data), 4, writePerm) - // @ unfold sl.AbsSlice_Bytes(data, 2, 4) + // @ unfold sl.Bytes(data, 2, 4) // @ assert forall i int :: { &data[2:4][i] } 0 <= i && i < 2 ==> &data[2:4][i] == &data[2 + i] i.Pointer = binary.BigEndian.Uint16(data[2:4]) - // @ fold sl.AbsSlice_Bytes(data, 2, 4) + // @ fold sl.Bytes(data, 2, 4) // @ sl.CombineAtIndex_Bytes(data, 0, 4, 2, writePerm) // @ sl.CombineAtIndex_Bytes(data, 0, len(data), 4, writePerm) // @ ) // @ requires len(data) >= 4 // @ requires acc(&i.BaseLayer) // @ ensures i.BaseLayer.Mem(data, 4) - // @ requires sl.AbsSlice_Bytes(data, 0, len(data)) + // @ requires sl.Bytes(data, 0, len(data)) // @ decreases // @ outline ( - // @ unfold sl.AbsSlice_Bytes(data, 0, len(data)) + // @ unfold sl.Bytes(data, 0, len(data)) // @ assert forall i int :: { &data[4:][i] } 0 <= i && i < len(data) ==> &data[4:][i] == &data[4 + i] i.BaseLayer = BaseLayer{ Contents: data[:4], Payload: data[4:], } // @ assert forall l int :: { &i.Payload[l] } 0 <= l && l < len(i.Payload) ==> &data[4+l] == &i.Payload[l] - // @ fold sl.AbsSlice_Bytes(i.Contents, 0, len(i.Contents)) - // @ fold sl.AbsSlice_Bytes(i.Payload, 0, len(i.Payload)) + // @ fold sl.Bytes(i.Contents, 0, len(i.Contents)) + // @ fold sl.Bytes(i.Payload, 0, len(i.Payload)) // @ fold i.BaseLayer.Mem(data, 4) // @ ) return nil @@ -587,9 +587,9 @@ func (i *SCMPParameterProblem) SerializeTo(b gopacket.SerializeBuffer, opts gopa // @ outline ( // @ b.ExchangePred() // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) - // @ unfold sl.AbsSlice_Bytes(underlyingBufRes, 0, 2) + // @ unfold sl.Bytes(underlyingBufRes, 0, 2) binary.BigEndian.PutUint16(buf[0:2], uint16(0)) //Reserved - // @ fold sl.AbsSlice_Bytes(underlyingBufRes, 0, 2) + // @ fold sl.Bytes(underlyingBufRes, 0, 2) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ b.RestoreMem(underlyingBufRes) // @ ) @@ -603,10 +603,10 @@ func (i *SCMPParameterProblem) SerializeTo(b gopacket.SerializeBuffer, opts gopa // @ b.ExchangePred() // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ sl.SplitByIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 4, writePerm) - // @ unfold sl.AbsSlice_Bytes(underlyingBufRes, 2, 4) + // @ unfold sl.Bytes(underlyingBufRes, 2, 4) // @ assert forall i int :: { &buf[2:4][i] } 0 <= i && i < 2 ==> &buf[2:4][i] == &buf[2 + i] binary.BigEndian.PutUint16(buf[2:4], i.Pointer) - // @ fold sl.AbsSlice_Bytes(underlyingBufRes, 2, 4) + // @ fold sl.Bytes(underlyingBufRes, 2, 4) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 4, writePerm) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ b.RestoreMem(underlyingBufRes) @@ -616,7 +616,7 @@ func (i *SCMPParameterProblem) SerializeTo(b gopacket.SerializeBuffer, opts gopa // @ requires pb != nil // @ preserves pb.Mem() -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ ensures err != nil ==> err.ErrorMem() // @ decreases func decodeSCMPParameterProblem(data []byte, pb gopacket.PacketBuilder) (err error) { @@ -672,7 +672,7 @@ func (*SCMPTraceroute) NextLayerType() gopacket.LayerType { // DecodeFromBytes decodes the given bytes into this layer. // @ requires df != nil // @ requires i.NonInitMem() -// @ preserves sl.AbsSlice_Bytes(data, 0, len(data)) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R40) // @ preserves df.Mem() // @ ensures res == nil ==> i.Mem(data) // @ ensures res != nil ==> i.NonInitMem() @@ -690,66 +690,66 @@ func (i *SCMPTraceroute) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback // @ requires offset == 0 // @ preserves acc(&i.Identifier) // @ requires len(data) >= 2 + 2 + addr.IABytes + scmpRawInterfaceLen - // @ requires sl.AbsSlice_Bytes(data, 0, len(data)) - // @ ensures sl.AbsSlice_Bytes(data, 0, 2) - // @ ensures sl.AbsSlice_Bytes(data, 2, len(data)) + // @ requires acc(sl.Bytes(data, 0, len(data)), R40) + // @ ensures acc(sl.Bytes(data, 0, 2), R40) + // @ ensures acc(sl.Bytes(data, 2, len(data)), R40) // @ decreases // @ outline ( - // @ sl.SplitByIndex_Bytes(data, 0, len(data), 2, writePerm) - // @ unfold sl.AbsSlice_Bytes(data, 0, 2) + // @ sl.SplitByIndex_Bytes(data, 0, len(data), 2, R40) + // @ unfold acc(sl.Bytes(data, 0, 2), R40) i.Identifier = binary.BigEndian.Uint16(data[offset : offset+2]) - // @ fold sl.AbsSlice_Bytes(data, 0, 2) + // @ fold acc(sl.Bytes(data, 0, 2), R40) // @ ) offset += 2 // @ requires offset == 2 // @ preserves acc(&i.Sequence) // @ requires len(data) >= 2 + 2 + addr.IABytes + scmpRawInterfaceLen - // @ requires sl.AbsSlice_Bytes(data, 2, len(data)) - // @ ensures sl.AbsSlice_Bytes(data, 2, 2+2) - // @ ensures sl.AbsSlice_Bytes(data, 2+2, len(data)) + // @ requires acc(sl.Bytes(data, 2, len(data)), R40) + // @ ensures acc(sl.Bytes(data, 2, 2+2), R40) + // @ ensures acc(sl.Bytes(data, 2+2, len(data)), R40) // @ decreases // @ outline ( - // @ sl.SplitByIndex_Bytes(data, 2, len(data), 2+2, writePerm) - // @ unfold sl.AbsSlice_Bytes(data, 2, 2+2) + // @ sl.SplitByIndex_Bytes(data, 2, len(data), 2+2, R40) + // @ unfold acc(sl.Bytes(data, 2, 2+2), R40) // @ assert forall i int :: { &data[offset:offset+2][i] } 0 <= i && i < 2 ==> &data[offset + i] == &data[offset : offset+2][i] i.Sequence = binary.BigEndian.Uint16(data[offset : offset+2]) - // @ fold sl.AbsSlice_Bytes(data, 2, 2+2) + // @ fold acc(sl.Bytes(data, 2, 2+2), R40) // @ ) offset += 2 // @ requires offset == 2 + 2 // @ preserves acc(&i.IA) // @ requires len(data) >= 2 + 2 + addr.IABytes + scmpRawInterfaceLen - // @ requires sl.AbsSlice_Bytes(data, 2+2, len(data)) - // @ ensures sl.AbsSlice_Bytes(data, 2+2, 2+2+addr.IABytes) - // @ ensures sl.AbsSlice_Bytes(data, 2+2+addr.IABytes, len(data)) + // @ requires acc(sl.Bytes(data, 2+2, len(data)), R40) + // @ ensures acc(sl.Bytes(data, 2+2, 2+2+addr.IABytes), R40) + // @ ensures acc(sl.Bytes(data, 2+2+addr.IABytes, len(data)), R40) // @ decreases // @ outline ( - // @ sl.SplitByIndex_Bytes(data, 2+2, len(data), 2+2+addr.IABytes, writePerm) - // @ unfold sl.AbsSlice_Bytes(data, 2+2, 2+2+addr.IABytes) + // @ sl.SplitByIndex_Bytes(data, 2+2, len(data), 2+2+addr.IABytes, R40) + // @ unfold acc(sl.Bytes(data, 2+2, 2+2+addr.IABytes), R40) // @ assert forall i int :: { &data[offset:offset+addr.IABytes][i] } 0 <= i && i < addr.IABytes ==> &data[offset + i] == &data[offset : offset+addr.IABytes][i] i.IA = addr.IA(binary.BigEndian.Uint64(data[offset : offset+addr.IABytes])) - // @ fold sl.AbsSlice_Bytes(data, 2+2, 2+2+addr.IABytes) + // @ fold acc(sl.Bytes(data, 2+2, 2+2+addr.IABytes), R40) // @ ) offset += addr.IABytes // @ requires offset == 2 + 2 + addr.IABytes // @ preserves acc(&i.Interface) // @ requires len(data) >= 2 + 2 + addr.IABytes + scmpRawInterfaceLen - // @ requires sl.AbsSlice_Bytes(data, 2+2+addr.IABytes, len(data)) - // @ ensures sl.AbsSlice_Bytes(data, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen) - // @ ensures sl.AbsSlice_Bytes(data, 2+2+addr.IABytes+scmpRawInterfaceLen, len(data)) + // @ requires acc(sl.Bytes(data, 2+2+addr.IABytes, len(data)), R40) + // @ ensures acc(sl.Bytes(data, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen), R40) + // @ ensures acc(sl.Bytes(data, 2+2+addr.IABytes+scmpRawInterfaceLen, len(data)), R40) // @ decreases // @ outline ( - // @ sl.SplitByIndex_Bytes(data, 2+2+addr.IABytes, len(data), 2+2+addr.IABytes+scmpRawInterfaceLen, writePerm) - // @ unfold sl.AbsSlice_Bytes(data, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen) + // @ sl.SplitByIndex_Bytes(data, 2+2+addr.IABytes, len(data), 2+2+addr.IABytes+scmpRawInterfaceLen, R40) + // @ unfold acc(sl.Bytes(data, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen), R40) // @ assert forall i int :: { &data[offset:offset+scmpRawInterfaceLen][i] } 0 <= i && i < scmpRawInterfaceLen ==> &data[offset + i] == &data[offset : offset+addr.IABytes][i] i.Interface = binary.BigEndian.Uint64(data[offset : offset+scmpRawInterfaceLen]) - // @ fold sl.AbsSlice_Bytes(data, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen) + // @ fold acc(sl.Bytes(data, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen), R40) // @ ) offset += scmpRawInterfaceLen - // @ sl.CombineAtIndex_Bytes(data, 0, 2+2, 2, writePerm) - // @ sl.CombineAtIndex_Bytes(data, 0, 2+2+addr.IABytes, 2+2, writePerm) - // @ sl.CombineAtIndex_Bytes(data, 0, 2+2+addr.IABytes+scmpRawInterfaceLen, 2+2+addr.IABytes, writePerm) - // @ sl.CombineAtIndex_Bytes(data, 0, len(data), 2+2+addr.IABytes+scmpRawInterfaceLen, writePerm) + // @ sl.CombineAtIndex_Bytes(data, 0, 2+2, 2, R40) + // @ sl.CombineAtIndex_Bytes(data, 0, 2+2+addr.IABytes, 2+2, R40) + // @ sl.CombineAtIndex_Bytes(data, 0, 2+2+addr.IABytes+scmpRawInterfaceLen, 2+2+addr.IABytes, R40) + // @ sl.CombineAtIndex_Bytes(data, 0, len(data), 2+2+addr.IABytes+scmpRawInterfaceLen, R40) i.BaseLayer = BaseLayer{ Contents: data[:offset], Payload: data[offset:], @@ -786,9 +786,9 @@ func (i *SCMPTraceroute) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.S // @ outline ( // @ b.ExchangePred() // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) - // @ unfold sl.AbsSlice_Bytes(underlyingBufRes, 0, 2) + // @ unfold sl.Bytes(underlyingBufRes, 0, 2) binary.BigEndian.PutUint16(buf[:2], i.Identifier) - // @ fold sl.AbsSlice_Bytes(underlyingBufRes, 0, 2) + // @ fold sl.Bytes(underlyingBufRes, 0, 2) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ b.RestoreMem(underlyingBufRes) // @ ) @@ -804,10 +804,10 @@ func (i *SCMPTraceroute) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.S // @ b.ExchangePred() // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ sl.SplitByIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 2+2, writePerm) - // @ unfold sl.AbsSlice_Bytes(underlyingBufRes, 2, 2+2) + // @ unfold sl.Bytes(underlyingBufRes, 2, 2+2) // @ assert forall i int :: { &buf[offset:offset+2][i] } 0 <= i && i < 2 ==> &buf[offset:offset+2][i] == &buf[offset + i] binary.BigEndian.PutUint16(buf[offset:offset+2], i.Sequence) - // @ fold sl.AbsSlice_Bytes(underlyingBufRes, 2, 2+2) + // @ fold sl.Bytes(underlyingBufRes, 2, 2+2) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 2+2, writePerm) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ b.RestoreMem(underlyingBufRes) @@ -824,10 +824,10 @@ func (i *SCMPTraceroute) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.S // @ b.ExchangePred() // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2+2, writePerm) // @ sl.SplitByIndex_Bytes(underlyingBufRes, 2+2, len(underlyingBufRes), 2+2+addr.IABytes, writePerm) - // @ unfold sl.AbsSlice_Bytes(underlyingBufRes, 2+2, 2+2+addr.IABytes) + // @ unfold sl.Bytes(underlyingBufRes, 2+2, 2+2+addr.IABytes) // @ assert forall i int :: { &buf[offset:offset+addr.IABytes][i] } 0 <= i && i < addr.IABytes ==> &buf[offset:offset+addr.IABytes][i] == &buf[offset + i] binary.BigEndian.PutUint64(buf[offset:offset+addr.IABytes], uint64(i.IA)) - // @ fold sl.AbsSlice_Bytes(underlyingBufRes, 2+2, 2+2+addr.IABytes) + // @ fold sl.Bytes(underlyingBufRes, 2+2, 2+2+addr.IABytes) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 2+2, len(underlyingBufRes), 2+2+addr.IABytes, writePerm) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2+2, writePerm) // @ b.RestoreMem(underlyingBufRes) @@ -844,10 +844,10 @@ func (i *SCMPTraceroute) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.S // @ b.ExchangePred() // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2+2+addr.IABytes, writePerm) // @ sl.SplitByIndex_Bytes(underlyingBufRes, 2+2+addr.IABytes, len(underlyingBufRes), 2+2+addr.IABytes+scmpRawInterfaceLen, writePerm) - // @ unfold sl.AbsSlice_Bytes(underlyingBufRes, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen) + // @ unfold sl.Bytes(underlyingBufRes, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen) // @ assert forall i int :: { &buf[offset:offset+scmpRawInterfaceLen][i] } 0 <= i && i < scmpRawInterfaceLen ==> &buf[offset:offset+scmpRawInterfaceLen][i] == &buf[offset + i] binary.BigEndian.PutUint64(buf[offset:offset+scmpRawInterfaceLen], i.Interface) - // @ fold sl.AbsSlice_Bytes(underlyingBufRes, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen) + // @ fold sl.Bytes(underlyingBufRes, 2+2+addr.IABytes, 2+2+addr.IABytes+scmpRawInterfaceLen) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 2+2+addr.IABytes, len(underlyingBufRes), 2+2+addr.IABytes+scmpRawInterfaceLen, writePerm) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2+2+addr.IABytes, writePerm) // @ b.RestoreMem(underlyingBufRes) @@ -857,7 +857,7 @@ func (i *SCMPTraceroute) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.S // @ requires pb != nil // @ preserves pb.Mem() -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ ensures err != nil ==> err.ErrorMem() // @ decreases func decodeSCMPTraceroute(data []byte, pb gopacket.PacketBuilder) (err error) { @@ -901,10 +901,10 @@ func (*SCMPDestinationUnreachable) NextLayerType() gopacket.LayerType { // DecodeFromBytes decodes the given bytes into this layer. // @ requires df != nil // @ requires i.NonInitMem() -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ preserves df.Mem() // @ ensures res == nil ==> i.Mem(data) -// @ ensures res != nil ==> (i.NonInitMem() && sl.AbsSlice_Bytes(data, 0, len(data))) +// @ ensures res != nil ==> (i.NonInitMem() && sl.Bytes(data, 0, len(data))) // @ ensures res != nil ==> res.ErrorMem() // @ decreases func (i *SCMPDestinationUnreachable) DecodeFromBytes(data []byte, @@ -918,15 +918,15 @@ func (i *SCMPDestinationUnreachable) DecodeFromBytes(data []byte, // @ unfold i.NonInitMem() // @ defer fold i.Mem(data) // @ defer fold i.BaseLayer.Mem(data, minLength) - // @ unfold sl.AbsSlice_Bytes(data, 0, len(data)) + // @ unfold sl.Bytes(data, 0, len(data)) // @ assert forall i int :: { &data[minLength:][i] } 0 <= i && i < len(data) - minLength ==> &data[minLength:][i] == &data[minLength + i] i.BaseLayer = BaseLayer{ Contents: data[:minLength], Payload: data[minLength:], } // @ assert forall l int :: { &i.Payload[l] } 0 <= l && l < len(i.Payload) ==> &data[minLength:][l] == &i.Payload[l] - // @ fold sl.AbsSlice_Bytes(i.Contents, 0, len(i.Contents)) - // @ fold sl.AbsSlice_Bytes(i.Payload, 0, len(i.Payload)) + // @ fold sl.Bytes(i.Contents, 0, len(i.Contents)) + // @ fold sl.Bytes(i.Payload, 0, len(i.Payload)) return nil } @@ -948,16 +948,16 @@ func (i *SCMPDestinationUnreachable) SerializeTo(b gopacket.SerializeBuffer, opt // @ assert buf === underlyingBufRes[:4] // @ b.ExchangePred() // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 4, writePerm) - // @ unfold sl.AbsSlice_Bytes(underlyingBufRes, 0, 4) + // @ unfold sl.Bytes(underlyingBufRes, 0, 4) copy(buf, make([]byte, 4) /*@, writePerm@*/) - // @ fold sl.AbsSlice_Bytes(underlyingBufRes, 0, 4) + // @ fold sl.Bytes(underlyingBufRes, 0, 4) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 4, writePerm) // @ b.RestoreMem(underlyingBufRes) return nil } // @ requires pb != nil -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ preserves pb.Mem() // @ ensures err != nil ==> err.ErrorMem() // @ decreases @@ -1000,11 +1000,11 @@ func (*SCMPPacketTooBig) NextLayerType() gopacket.LayerType { // DecodeFromBytes decodes the given bytes into this layer. // @ requires df != nil -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ requires i.NonInitMem() // @ preserves df.Mem() // @ ensures res == nil ==> i.Mem(data) -// @ ensures res != nil ==> (i.NonInitMem() && sl.AbsSlice_Bytes(data, 0, len(data))) +// @ ensures res != nil ==> (i.NonInitMem() && sl.Bytes(data, 0, len(data))) // @ ensures res != nil ==> res.ErrorMem() // @ decreases func (i *SCMPPacketTooBig) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res error) { @@ -1017,33 +1017,33 @@ func (i *SCMPPacketTooBig) DecodeFromBytes(data []byte, df gopacket.DecodeFeedba // @ defer fold i.Mem(data) // @ preserves acc(&i.MTU) // @ requires len(data) >= 4 - // @ preserves sl.AbsSlice_Bytes(data, 0, len(data)) + // @ preserves sl.Bytes(data, 0, len(data)) // @ decreases // @ outline ( // @ sl.SplitByIndex_Bytes(data, 0, len(data), 2, writePerm) // @ sl.SplitByIndex_Bytes(data, 2, len(data), 4, writePerm) - // @ unfold sl.AbsSlice_Bytes(data, 2, 4) + // @ unfold sl.Bytes(data, 2, 4) // @ assert forall i int :: { &data[2:4][i] } 0 <= i && i < 2 ==> &data[2:4][i] == &data[2 + i] i.MTU = binary.BigEndian.Uint16(data[2:4]) - // @ fold sl.AbsSlice_Bytes(data, 2, 4) + // @ fold sl.Bytes(data, 2, 4) // @ sl.CombineAtIndex_Bytes(data, 0, 4, 2, writePerm) // @ sl.CombineAtIndex_Bytes(data, 0, len(data), 4, writePerm) // @ ) // @ requires len(data) >= 4 // @ requires acc(&i.BaseLayer) - // @ requires sl.AbsSlice_Bytes(data, 0, len(data)) + // @ requires sl.Bytes(data, 0, len(data)) // @ ensures i.BaseLayer.Mem(data, 4) // @ decreases // @ outline ( - // @ unfold sl.AbsSlice_Bytes(data, 0, len(data)) + // @ unfold sl.Bytes(data, 0, len(data)) // @ assert forall i int :: { &data[4:][i] } 0 <= i && i < len(data) ==> &data[4:][i] == &data[4 + i] i.BaseLayer = BaseLayer{ Contents: data[:4], Payload: data[4:], } // @ assert forall l int :: { &i.Payload[l] } 0 <= l && l < len(i.Payload) ==> &data[4+l] == &i.Payload[l] - // @ fold sl.AbsSlice_Bytes(i.Contents, 0, len(i.Contents)) - // @ fold sl.AbsSlice_Bytes(i.Payload, 0, len(i.Payload)) + // @ fold sl.Bytes(i.Contents, 0, len(i.Contents)) + // @ fold sl.Bytes(i.Payload, 0, len(i.Payload)) // @ fold i.BaseLayer.Mem(data, 4) // @ ) return nil @@ -1074,9 +1074,9 @@ func (i *SCMPPacketTooBig) SerializeTo(b gopacket.SerializeBuffer, opts gopacket // @ outline ( // @ b.ExchangePred() // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) - // @ unfold sl.AbsSlice_Bytes(underlyingBufRes, 0, 2) + // @ unfold sl.Bytes(underlyingBufRes, 0, 2) binary.BigEndian.PutUint16(buf[0:2], uint16(0)) //Reserved - // @ fold sl.AbsSlice_Bytes(underlyingBufRes, 0, 2) + // @ fold sl.Bytes(underlyingBufRes, 0, 2) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ b.RestoreMem(underlyingBufRes) // @ ) @@ -1090,10 +1090,10 @@ func (i *SCMPPacketTooBig) SerializeTo(b gopacket.SerializeBuffer, opts gopacket // @ b.ExchangePred() // @ sl.SplitByIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ sl.SplitByIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 4, writePerm) - // @ unfold sl.AbsSlice_Bytes(underlyingBufRes, 2, 4) + // @ unfold sl.Bytes(underlyingBufRes, 2, 4) // @ assert forall i int :: { &buf[2:4][i] } 0 <= i && i < 2 ==> &buf[2:4][i] == &buf[2 + i] binary.BigEndian.PutUint16(buf[2:4], i.MTU) - // @ fold sl.AbsSlice_Bytes(underlyingBufRes, 2, 4) + // @ fold sl.Bytes(underlyingBufRes, 2, 4) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 2, len(underlyingBufRes), 4, writePerm) // @ sl.CombineAtIndex_Bytes(underlyingBufRes, 0, len(underlyingBufRes), 2, writePerm) // @ b.RestoreMem(underlyingBufRes) @@ -1103,7 +1103,7 @@ func (i *SCMPPacketTooBig) SerializeTo(b gopacket.SerializeBuffer, opts gopacket // @ requires pb != nil // @ preserves pb.Mem() -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ ensures err != nil ==> err.ErrorMem() // @ decreases func decodeSCMPPacketTooBig(data []byte, pb gopacket.PacketBuilder) (err error) { diff --git a/pkg/slayers/scmp_typecode.go b/pkg/slayers/scmp_typecode.go index f0fe0fb17..b7d45666a 100644 --- a/pkg/slayers/scmp_typecode.go +++ b/pkg/slayers/scmp_typecode.go @@ -20,7 +20,7 @@ package slayers import ( "encoding/binary" "fmt" - //@ "github.com/scionproto/scion/verification/utils/slices" + //@ sl "github.com/scionproto/scion/verification/utils/slices" //@ . "github.com/scionproto/scion/verification/utils/definitions" ) @@ -133,11 +133,11 @@ func (a SCMPTypeCode) String() string { // SerializeTo writes the SCMPTypeCode value to the buffer. // @ requires len(bytes) >= 2 -// @ preserves slices.AbsSlice_Bytes(bytes, 0, 2) +// @ preserves sl.Bytes(bytes, 0, 2) // @ decreases func (a SCMPTypeCode) SerializeTo(bytes []byte) { - //@ unfold slices.AbsSlice_Bytes(bytes, 0, 2) - //@ defer fold slices.AbsSlice_Bytes(bytes, 0, 2) + //@ unfold sl.Bytes(bytes, 0, 2) + //@ defer fold sl.Bytes(bytes, 0, 2) binary.BigEndian.PutUint16(bytes, uint16(a)) } diff --git a/private/topology/linktype.go b/private/topology/linktype.go index 4948c9c93..d8b47579f 100644 --- a/private/topology/linktype.go +++ b/private/topology/linktype.go @@ -22,7 +22,7 @@ import ( "github.com/scionproto/scion/pkg/private/serrors" //@ . "github.com/scionproto/scion/verification/utils/definitions" - //@ "github.com/scionproto/scion/verification/utils/slices" + //@ sl "github.com/scionproto/scion/verification/utils/slices" ) // LinkType describes inter-AS links. @@ -44,7 +44,7 @@ const ( Peer LinkType = 4 ) -//@ decreases +// @ decreases func (l LinkType) String() string { if l == Unset { return "unset" @@ -53,57 +53,57 @@ func (l LinkType) String() string { if err != nil { return err.Error() } - //@ unfold slices.AbsSlice_Bytes(s, 0, len(s)) + //@ unfold sl.Bytes(s, 0, len(s)) return string(s) } // LinkTypeFromString returns the numerical link type associated with a string description. If the // string is not recognized, an Unset link type is returned. The matching is case-insensitive. -//@ decreases +// @ decreases func LinkTypeFromString(s string) (res LinkType) { var l /*@@@*/ LinkType tmp := []byte(s) - //@ fold slices.AbsSlice_Bytes(tmp, 0, len(tmp)) + //@ fold sl.Bytes(tmp, 0, len(tmp)) if err := l.UnmarshalText(tmp); err != nil { return Unset } return l } -//@ ensures (l == Core || l == Parent || l == Child || l == Peer) == (err == nil) -//@ ensures err == nil ==> slices.AbsSlice_Bytes(res, 0, len(res)) -//@ ensures err != nil ==> err.ErrorMem() -//@ decreases +// @ ensures (l == Core || l == Parent || l == Child || l == Peer) == (err == nil) +// @ ensures err == nil ==> sl.Bytes(res, 0, len(res)) +// @ ensures err != nil ==> err.ErrorMem() +// @ decreases func (l LinkType) MarshalText() (res []byte, err error) { switch l { case Core: tmp := []byte("core") - //@ fold slices.AbsSlice_Bytes(tmp, 0, len(tmp)) + //@ fold sl.Bytes(tmp, 0, len(tmp)) return tmp, nil case Parent: tmp := []byte("parent") - //@ fold slices.AbsSlice_Bytes(tmp, 0, len(tmp)) + //@ fold sl.Bytes(tmp, 0, len(tmp)) return tmp, nil case Child: tmp := []byte("child") - //@ fold slices.AbsSlice_Bytes(tmp, 0, len(tmp)) + //@ fold sl.Bytes(tmp, 0, len(tmp)) return tmp, nil case Peer: tmp := []byte("peer") - //@ fold slices.AbsSlice_Bytes(tmp, 0, len(tmp)) + //@ fold sl.Bytes(tmp, 0, len(tmp)) return tmp, nil default: return nil, serrors.New("invalid link type") } } -//@ preserves acc(l) -//@ preserves acc(slices.AbsSlice_Bytes(data, 0, len(data)), R15) -//@ ensures err != nil ==> err.ErrorMem() -//@ decreases +// @ preserves acc(l) +// @ preserves acc(sl.Bytes(data, 0, len(data)), R15) +// @ ensures err != nil ==> err.ErrorMem() +// @ decreases func (l *LinkType) UnmarshalText(data []byte) (err error) { - //@ unfold acc(slices.AbsSlice_Bytes(data, 0, len(data)), R15) - //@ ghost defer fold acc(slices.AbsSlice_Bytes(data, 0, len(data)), R15) + //@ unfold acc(sl.Bytes(data, 0, len(data)), R15) + //@ ghost defer fold acc(sl.Bytes(data, 0, len(data)), R15) switch strings.ToLower(string(data)) { case "core": *l = Core diff --git a/private/underlay/conn/conn.go b/private/underlay/conn/conn.go index 06d706d2a..ad9611602 100644 --- a/private/underlay/conn/conn.go +++ b/private/underlay/conn/conn.go @@ -33,7 +33,7 @@ import ( "github.com/scionproto/scion/pkg/private/serrors" "github.com/scionproto/scion/private/underlay/sockctrl" //@ . "github.com/scionproto/scion/verification/utils/definitions" - //@ "github.com/scionproto/scion/verification/utils/slices" + //@ sl "github.com/scionproto/scion/verification/utils/slices" ) // Messages is a list of ipX.Messages. It is necessary to hide the type alias @@ -45,7 +45,7 @@ type Conn interface { //@ pred Mem() // (VerifiedSCION) Reads a message to b. Returns the number of read bytes. //@ requires acc(Mem(), _) - //@ preserves slices.AbsSlice_Bytes(b, 0, len(b)) + //@ preserves sl.Bytes(b, 0, len(b)) //@ ensures err == nil ==> 0 <= n && n <= len(b) //@ ensures err == nil ==> acc(addr.Mem(), _) //@ ensures err != nil ==> err.ErrorMem() @@ -56,13 +56,13 @@ type Conn interface { //@ ensures err != nil ==> err.ErrorMem() ReadBatch(m Messages) (n int, err error) //@ requires acc(Mem(), _) - //@ preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R10) + //@ preserves acc(sl.Bytes(b, 0, len(b)), R10) //@ ensures err == nil ==> 0 <= n && n <= len(b) //@ ensures err != nil ==> err.ErrorMem() Write(b []byte) (n int, err error) //@ requires acc(u.Mem(), _) //@ requires acc(Mem(), _) - //@ preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R10) + //@ preserves acc(sl.Bytes(b, 0, len(b)), R10) //@ ensures err == nil ==> 0 <= n && n <= len(b) //@ ensures err != nil ==> err.ErrorMem() WriteTo(b []byte, u *net.UDPAddr) (n int, err error) @@ -125,13 +125,11 @@ func New(listen, remote *net.UDPAddr, cfg *Config) (res Conn, e error) { if listen == nil && remote == nil { panic("either listen or remote must be set") } - /*@ - assert remote != nil ==> a == remote - assert remote == nil ==> a == listen - unfold acc(a.Mem(), R15) - unfold acc(slices.AbsSlice_Bytes(a.IP, 0, len(a.IP)), R15) - assert forall i int :: { &a.IP[i] } 0 <= i && i < len(a.IP) ==> acc(&a.IP[i], R15) - @*/ + // @ assert remote != nil ==> a == remote + // @ assert remote == nil ==> a == listen + // @ unfold acc(a.Mem(), R15) + // @ unfold acc(sl.Bytes(a.IP, 0, len(a.IP)), R15) + // @ assert forall i int :: { &a.IP[i] } 0 <= i && i < len(a.IP) ==> acc(&a.IP[i], R15) if a.IP.To4( /*@ false @*/ ) != nil { return newConnUDPIPv4(listen, remote, cfg) } @@ -395,7 +393,7 @@ func (cc *connUDPBase) initConnUDP(network string, laddr, raddr *net.UDPAddr, cf } // @ preserves acc(c.Mem(), _) -// @ preserves slices.AbsSlice_Bytes(b, 0, len(b)) +// @ preserves sl.Bytes(b, 0, len(b)) // @ preserves unfolding acc(c.Mem(), _) in c.conn == underlyingConn // @ ensures err == nil ==> 0 <= n && n <= len(b) // @ ensures err == nil ==> acc(addr.Mem(), _) @@ -406,7 +404,7 @@ func (c *connUDPBase) ReadFrom(b []byte /*@, ghost underlyingConn *net.UDPConn @ } // @ preserves acc(c.Mem(), _) -// @ preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R15) +// @ preserves acc(sl.Bytes(b, 0, len(b)), R15) // @ preserves unfolding acc(c.Mem(), _) in c.conn == underlyingConn // @ ensures err == nil ==> 0 <= n && n <= len(b) // @ ensures err != nil ==> err.ErrorMem() @@ -418,7 +416,7 @@ func (c *connUDPBase) Write(b []byte /*@, ghost underlyingConn *net.UDPConn @*/) // @ requires acc(dst.Mem(), _) // @ preserves acc(c.Mem(), _) // @ preserves unfolding acc(c.Mem(), _) in c.conn == underlyingConn -// @ preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R15) +// @ preserves acc(sl.Bytes(b, 0, len(b)), R15) // @ ensures err == nil ==> 0 <= n && n <= len(b) // @ ensures err != nil ==> err.ErrorMem() func (c *connUDPBase) WriteTo(b []byte, dst *net.UDPAddr /*@, ghost underlyingConn *net.UDPConn @*/) (n int, err error) { @@ -475,8 +473,8 @@ func NewReadMessages(n int) (res Messages) { for i := range m /*@ with i0 @*/ { // Allocate a single-element, to avoid allocations when setting the buffer. m[i].Buffers = make([][]byte, 1) - //@ fold slices.AbsSlice_Bytes(m[i].Buffers[0], 0, len(m[i].Buffers[0])) - //@ fold slices.AbsSlice_Bytes(m[i].OOB, 0, len(m[i].OOB)) + //@ fold sl.Bytes(m[i].Buffers[0], 0, len(m[i].Buffers[0])) + //@ fold sl.Bytes(m[i].OOB, 0, len(m[i].OOB)) //@ fold m[i].Mem() } return m diff --git a/private/underlay/conn/conn_spec.gobra b/private/underlay/conn/conn_spec.gobra index 298e98721..92fcef78f 100644 --- a/private/underlay/conn/conn_spec.gobra +++ b/private/underlay/conn/conn_spec.gobra @@ -20,7 +20,7 @@ import ( "net" . "github.com/scionproto/scion/verification/utils/definitions" - "github.com/scionproto/scion/verification/utils/slices" + sl "github.com/scionproto/scion/verification/utils/slices" "golang.org/x/net/ipv4" "golang.org/x/net/ipv6" @@ -66,7 +66,7 @@ pred (c *Config) Mem() { *connUDPIPv4 implements Conn requires acc(c.Mem(), _) -preserves slices.AbsSlice_Bytes(b, 0, len(b)) +preserves sl.Bytes(b, 0, len(b)) ensures err == nil ==> 0 <= n && n <= len(b) ensures err == nil ==> acc(addr.Mem(), _) ensures err != nil ==> err.ErrorMem() @@ -83,7 +83,7 @@ func (c *connUDPIPv4) ReadFrom(b []byte) (n int, addr *net.UDPAddr, err error) { } preserves acc(c.Mem(), _) -preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R15) +preserves acc(sl.Bytes(b, 0, len(b)), R15) ensures err == nil ==> 0 <= n && n <= len(b) ensures err != nil ==> err.ErrorMem() func (c *connUDPIPv4) Write(b []byte) (n int, err error) { @@ -101,7 +101,7 @@ func (c *connUDPIPv4) Write(b []byte) (n int, err error) { requires acc(dst.Mem(), _) preserves acc(c.Mem(), _) -preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R15) +preserves acc(sl.Bytes(b, 0, len(b)), R15) ensures err == nil ==> 0 <= n && n <= len(b) ensures err != nil ==> err.ErrorMem() func (c *connUDPIPv4) WriteTo(b []byte, dst *net.UDPAddr) (n int, err error) { @@ -151,7 +151,7 @@ func (c *connUDPIPv4) Close() (err error) { *connUDPIPv6 implements Conn preserves acc(c.Mem(), _) -preserves slices.AbsSlice_Bytes(b, 0, len(b)) +preserves sl.Bytes(b, 0, len(b)) ensures err == nil ==> 0 <= n && n <= len(b) ensures err == nil ==> acc(addr.Mem(), _) ensures err != nil ==> err.ErrorMem() @@ -169,7 +169,7 @@ func (c *connUDPIPv6) ReadFrom(b []byte) (n int, addr *net.UDPAddr, err error) { } preserves acc(c.Mem(), _) -preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R15) +preserves acc(sl.Bytes(b, 0, len(b)), R15) ensures err == nil ==> 0 <= n && n <= len(b) ensures err != nil ==> err.ErrorMem() func (c *connUDPIPv6) Write(b []byte) (n int, err error) { @@ -187,7 +187,7 @@ func (c *connUDPIPv6) Write(b []byte) (n int, err error) { requires acc(dst.Mem(), _) preserves acc(c.Mem(), _) -preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R15) +preserves acc(sl.Bytes(b, 0, len(b)), R15) ensures err == nil ==> 0 <= n && n <= len(b) ensures err != nil ==> err.ErrorMem() func (c *connUDPIPv6) WriteTo(b []byte, dst *net.UDPAddr) (n int, err error) { diff --git a/router/bfd_spec.gobra b/router/bfd_spec.gobra index 367fe90d4..84f2e4aba 100644 --- a/router/bfd_spec.gobra +++ b/router/bfd_spec.gobra @@ -32,6 +32,6 @@ pred (b *bfdSend) Mem() { acc(b.scn) && acc(b.ohp) && b.mac.Mem() && - sl.AbsSlice_Bytes(b.macBuffer, 0, path.MACBufferSize) && + sl.Bytes(b.macBuffer, 0, path.MACBufferSize) && b.buffer.Mem() } \ No newline at end of file diff --git a/router/dataplane.go b/router/dataplane.go index 09ce9260c..22d1cd639 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -67,6 +67,7 @@ import ( "github.com/scionproto/scion/router/control" // @ . "github.com/scionproto/scion/verification/utils/definitions" // @ fl "github.com/scionproto/scion/verification/utils/floats" + // @ gsync "github.com/scionproto/scion/verification/utils/ghost_sync" // @ sl "github.com/scionproto/scion/verification/utils/slices" // @ "github.com/scionproto/scion/verification/utils/seqs" // @ socketspec "golang.org/x/net/internal/socket/" @@ -102,10 +103,12 @@ type bfdSession interface { // @ requires acc(Mem(), _) // @ requires msg.Mem(ub) // (VerifiedSCION) an implementation must copy the fields it needs from msg - // @ preserves sl.AbsSlice_Bytes(ub, 0, len(ub)) + // @ preserves sl.Bytes(ub, 0, len(ub)) // @ ensures msg.NonInitMem() + // @ decreases 0 if sync.IgnoreBlockingForTermination() ReceiveMessage(msg *layers.BFD /*@ , ghost ub []byte @*/) // @ requires acc(Mem(), _) + // @ decreases 0 if sync.IgnoreBlockingForTermination() IsUp() bool } @@ -131,20 +134,17 @@ type BatchConn interface { // contracts for IO-spec // @ requires Prophecy(prophecyM) // @ requires io.token(place) && MultiReadBio(place, prophecyM) - // @ preserves dp.Valid() // @ ensures err != nil ==> prophecyM == 0 // @ ensures err == nil ==> prophecyM == n // @ ensures io.token(old(MultiReadBioNext(place, prophecyM))) - // @ ensures old(MultiReadBioCorrectIfs(place, prophecyM, ifsToIO_ifs(ingressID))) + // @ ensures old(MultiReadBioCorrectIfs(place, prophecyM, path.ifsToIO_ifs(ingressID))) // @ ensures err == nil ==> - // @ forall i int :: { &msgs[i] } 0 <= i && i < n ==> - // @ unfolding acc(msgs[i].Mem(), _) in absIO_val(dp, msgs[i].Buffers[0], ingressID) == - // @ old(MultiReadBioIO_val(place, n)[i]) - // TODO (Markus): uint16 or option[io.IO_ifs] for ingress - ReadBatch(msgs underlayconn.Messages /*@, ghost ingressID uint16, ghost prophecyM int, ghost place io.Place, ghost dp io.DataPlaneSpec @*/) (n int, err error) + // @ forall i int :: { &msgs[i] } 0 <= i && i < n ==> + // @ MsgToAbsVal(&msgs[i], ingressID) == old(MultiReadBioIO_val(place, n)[i]) + ReadBatch(msgs underlayconn.Messages /*@, ghost ingressID uint16, ghost prophecyM int, ghost place io.Place @*/) (n int, err error) // @ requires acc(addr.Mem(), _) // @ requires acc(Mem(), _) - // @ preserves acc(sl.AbsSlice_Bytes(b, 0, len(b)), R10) + // @ preserves acc(sl.Bytes(b, 0, len(b)), R10) // @ ensures err == nil ==> 0 <= n && n <= len(b) // @ ensures err != nil ==> err.ErrorMem() WriteTo(b []byte, addr *net.UDPAddr) (n int, err error) @@ -152,10 +152,18 @@ type BatchConn interface { // (VerifiedSCION) opted for less reusable spec for WriteBatch for // performance reasons. // @ requires len(msgs) == 1 - // @ preserves acc(msgs[0].Mem(), R50) && msgs[0].HasActiveAddr() + // @ requires acc(msgs[0].Mem(), R50) && msgs[0].HasActiveAddr() + // preconditions for IO-spec: + // @ requires MsgToAbsVal(&msgs[0], egressID) == ioAbsPkts + // @ requires io.token(place) && io.CBioIO_bio3s_send(place, ioAbsPkts) + // @ ensures acc(msgs[0].Mem(), R50) && msgs[0].HasActiveAddr() // @ ensures err == nil ==> 0 <= n && n <= len(msgs) // @ ensures err != nil ==> err.ErrorMem() - WriteBatch(msgs underlayconn.Messages, flags int) (n int, err error) + // postconditions for IO-spec: + // (VerifiedSCION) the permission to the protocol must always be returned, + // otherwise the router cannot continue after failing to send a packet. + // @ ensures io.token(old(io.dp3s_iospec_bio3s_send_T(place, ioAbsPkts))) + WriteBatch(msgs underlayconn.Messages, flags int /*@, ghost egressID uint16, ghost place io.Place, ghost ioAbsPkts io.IO_val @*/) (n int, err error) // @ requires Mem() // @ ensures err != nil ==> err.ErrorMem() // @ decreases @@ -170,10 +178,9 @@ type BatchConn interface { // Currently, only the following features are supported: // - initializing connections; MUST be done prior to calling Run type DataPlane struct { - // (VerifiedSCION) this is morally ghost - // It is stored in the dataplane in order to retain - // knowledge that macFactory will not fail - // @ key *[]byte + // (VerifiedSCION) This is stored in the dataplane in order to retain + // knowledge that macFactory will not fail. + // @ ghost key *[]byte external map[uint16]BatchConn linkTypes map[uint16]topology.LinkType neighborIAs map[uint16]addr.IA @@ -231,10 +238,11 @@ func (e scmpError) Error() string { // @ requires d.LocalIA().IsZero() // @ requires !ia.IsZero() // @ preserves d.mtx.LockP() -// @ preserves d.mtx.LockInv() == MutexInvariant!; +// @ preserves d.mtx.LockInv() == MutexInvariant! // @ ensures acc(d.Mem(), OutMutexPerm) // @ ensures !d.IsRunning() // @ ensures e == nil +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) SetIA(ia addr.IA) (e error) { d.mtx.Lock() defer d.mtx.Unlock() @@ -266,12 +274,13 @@ func (d *DataPlane) SetIA(ia addr.IA) (e error) { // @ requires !d.IsRunning() // @ requires !d.KeyIsSet() // @ requires len(key) > 0 -// @ requires sl.AbsSlice_Bytes(key, 0, len(key)) +// @ requires sl.Bytes(key, 0, len(key)) // @ preserves d.mtx.LockP() -// @ preserves d.mtx.LockInv() == MutexInvariant!; +// @ preserves d.mtx.LockInv() == MutexInvariant! // @ ensures acc(d.Mem(), OutMutexPerm) // @ ensures !d.IsRunning() // @ ensures res == nil ==> d.KeyIsSet() +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) SetKey(key []byte) (res error) { // @ share key d.mtx.Lock() @@ -302,9 +311,9 @@ func (d *DataPlane) SetKey(key []byte) (res error) { } // @ d.key = &key verScionTemp := - // @ requires acc(&key, _) && acc(sl.AbsSlice_Bytes(key, 0, len(key)), _) + // @ requires acc(&key, _) && acc(sl.Bytes(key, 0, len(key)), _) // @ requires scrypto.ValidKeyForHash(key) - // @ ensures acc(&key, _) && acc(sl.AbsSlice_Bytes(key, 0, len(key)), _) + // @ ensures acc(&key, _) && acc(sl.Bytes(key, 0, len(key)), _) // @ ensures h != nil && h.Mem() // @ decreases func /*@ f @*/ () (h hash.Hash) { @@ -328,9 +337,10 @@ func (d *DataPlane) SetKey(key []byte) (res error) { // @ requires conn != nil && conn.Mem() // @ requires ip.Mem() // @ preserves d.mtx.LockP() -// @ preserves d.mtx.LockInv() == MutexInvariant!; +// @ preserves d.mtx.LockInv() == MutexInvariant! // @ ensures acc(d.Mem(), OutMutexPerm) // @ ensures !d.IsRunning() +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) AddInternalInterface(conn BatchConn, ip net.IP) error { d.mtx.Lock() defer d.mtx.Unlock() @@ -366,7 +376,8 @@ func (d *DataPlane) AddInternalInterface(conn BatchConn, ip net.IP) error { // @ preserves acc(d.Mem(), OutMutexPerm) // @ preserves !d.IsRunning() // @ preserves d.mtx.LockP() -// @ preserves d.mtx.LockInv() == MutexInvariant!; +// @ preserves d.mtx.LockInv() == MutexInvariant! +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) AddExternalInterface(ifID uint16, conn BatchConn) error { d.mtx.Lock() defer d.mtx.Unlock() @@ -410,7 +421,8 @@ func (d *DataPlane) AddExternalInterface(ifID uint16, conn BatchConn) error { // @ preserves acc(d.Mem(), OutMutexPerm) // @ preserves !d.IsRunning() // @ preserves d.mtx.LockP() -// @ preserves d.mtx.LockInv() == MutexInvariant!; +// @ preserves d.mtx.LockInv() == MutexInvariant! +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) AddNeighborIA(ifID uint16, remote addr.IA) error { d.mtx.Lock() defer d.mtx.Unlock() @@ -448,6 +460,7 @@ func (d *DataPlane) AddNeighborIA(ifID uint16, remote addr.IA) error { // (VerifiedSCION) unlike all other setter methods, this does not lock d.mtx. // This was reported in https://github.com/scionproto/scion/issues/4282. // @ preserves MutexInvariant!() +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) AddLinkType(ifID uint16, linkTo topology.LinkType) error { // @ unfold acc(d.Mem(), OutMutexPerm) if _, existsB := d.linkTypes[ifID]; existsB { @@ -504,20 +517,21 @@ func (d *DataPlane) AddExternalInterfaceBFD(ifID uint16, conn BatchConn, // returns InterfaceUp if the relevant bfdsession state is up, or if there is no BFD // session. Otherwise, it returns InterfaceDown. // @ preserves acc(d.Mem(), R5) +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) getInterfaceState(interfaceID uint16) control.InterfaceState { // @ unfold acc(d.Mem(), R5) // @ defer fold acc(d.Mem(), R5) bfdSessions := d.bfdSessions // @ ghost if bfdSessions != nil { - // @ unfold acc(accBfdSession(d.bfdSessions), R20) - // @ defer fold acc(accBfdSession(d.bfdSessions), R20) + // @ unfold acc(accBfdSession(d.bfdSessions), R20) + // @ defer fold acc(accBfdSession(d.bfdSessions), R20) // @ } - // (VerifiedSCION) had to rewrite this, as Gobra does not correctly - // implement short-circuiting. if bfdSession, ok := bfdSessions[interfaceID]; ok { // @ assert interfaceID in domain(d.bfdSessions) // @ assert bfdSession in range(d.bfdSessions) // @ assert bfdSession != nil + // (VerifiedSCION) This checked used to be conjoined with 'ok' in the condition + // of the if stmt above. We broke it down to perform intermediate asserts. if !bfdSession.IsUp() { return control.InterfaceDown } @@ -563,7 +577,8 @@ func (d *DataPlane) addBFDController(ifID uint16, s *bfdSend, cfg control.BFD, // @ preserves acc(d.Mem(), OutMutexPerm) // @ preserves !d.IsRunning() // @ preserves d.mtx.LockP() -// @ preserves d.mtx.LockInv() == MutexInvariant!; +// @ preserves d.mtx.LockInv() == MutexInvariant! +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) AddSvc(svc addr.HostSVC, a *net.UDPAddr) error { d.mtx.Lock() // @ unfold MutexInvariant!() @@ -616,6 +631,7 @@ func (d *DataPlane) AddSvc(svc addr.HostSVC, a *net.UDPAddr) error { // @ requires a != nil && acc(a.Mem(), R10) // @ preserves acc(d.Mem(), OutMutexPerm/2) // @ preserves d.mtx.LockP() +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) DelSvc(svc addr.HostSVC, a *net.UDPAddr) error { d.mtx.Lock() defer d.mtx.Unlock() @@ -645,7 +661,8 @@ func (d *DataPlane) DelSvc(svc addr.HostSVC, a *net.UDPAddr) error { // @ preserves acc(d.Mem(), OutMutexPerm) // @ preserves !d.IsRunning() // @ preserves d.mtx.LockP() -// @ preserves d.mtx.LockInv() == MutexInvariant!; +// @ preserves d.mtx.LockInv() == MutexInvariant! +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (d *DataPlane) AddNextHop(ifID uint16, a *net.UDPAddr) error { d.mtx.Lock() defer d.mtx.Unlock() @@ -726,12 +743,13 @@ func (d *DataPlane) AddNextHopBFD(ifID uint16, src, dst *net.UDPAddr, cfg contro // @ requires d.PreWellConfigured() // (VerifiedSCION) here, the spec still uses a private field. // @ requires d.mtx.LockP() -// @ requires d.mtx.LockInv() == MutexInvariant!; +// @ requires d.mtx.LockInv() == MutexInvariant! // @ requires ctx != nil && ctx.Mem() // contracts for IO-spec // @ requires dp.Valid() // @ requires d.DpAgreesWithSpec(dp) // @ requires io.token(place) && dp.dp3s_iospec_ordered(state, place) +// @ #backend[moreJoins()] func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost state io.IO_dp3s_state_local, ghost dp io.DataPlaneSpec @*/) error { // @ share d, ctx d.mtx.Lock() @@ -760,11 +778,13 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ decreases // @ outline ( // @ reveal d.PreWellConfigured() + // @ reveal d.getDomExternal() // @ reveal d.DpAgreesWithSpec(dp) // @ unfold d.Mem() d.running = true // @ fold MutexInvariant!() // @ fold d.Mem() + // @ reveal d.getDomExternal() // @ reveal d.PreWellConfigured() // @ reveal d.DpAgreesWithSpec(dp) // @ ) @@ -778,27 +798,22 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // dPtr as an helper parameter. It always receives the value &d. // @ requires acc(dPtr, _) // @ requires let d := *dPtr in - // @ acc(&d.external, _) && acc(&d.linkTypes, _) && - // @ acc(&d.neighborIAs, _) && acc(&d.internal, _) && - // @ acc(&d.internalIP, _) && acc(&d.internalNextHops, _) && - // @ acc(&d.svc, _) && acc(&d.macFactory, _) && acc(&d.bfdSessions, _) && - // @ acc(&d.localIA, _) && acc(&d.running, _) && acc(&d.Metrics, _) && - // @ acc(&d.forwardingMetrics, _) && acc(&d.key, _) - // @ requires let d := *dPtr in - // @ acc(d.Mem(), _) && d.WellConfigured() - // @ requires let d := *dPtr in d.getValSvc() != nil - // @ requires let d := *dPtr in d.getValForwardingMetrics() != nil - // @ requires let d := *dPtr in (0 in d.getDomForwardingMetrics()) - // @ requires let d := *dPtr in (ingressID in d.getDomForwardingMetrics()) - // @ requires let d := *dPtr in d.macFactory != nil + // @ acc(d.Mem(), _) && + // @ d.WellConfigured() && + // @ d.getValSvc() != nil && + // @ d.getValForwardingMetrics() != nil && + // @ (0 in d.getDomForwardingMetrics()) && + // @ (ingressID in d.getDomForwardingMetrics()) && + // @ d.getMacFactory() != nil // @ requires rd != nil && acc(rd.Mem(), _) // contracts for IO-spec // @ requires dp.Valid() // @ requires let d := *dPtr in - // @ acc(d.Mem(), _) && d.DpAgreesWithSpec(dp) - // @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; - func /*@ rc @*/ (ingressID uint16, rd BatchConn, dPtr **DataPlane /*@, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) { - // @ ghost ioIngressID := ifsToIO_ifs(ingressID) + // @ d.DpAgreesWithSpec(dp) + // @ requires acc(ioLock.LockP(), _) + // @ requires ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> + // @ #backend[moreJoins()] + func /*@ rc @*/ (ingressID uint16, rd BatchConn, dPtr **DataPlane /*@, ghost ioLock gpointer[gsync.GhostMutex], ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) { d := *dPtr msgs := conn.NewReadMessages(inputBatchCnt) // @ requires forall i int :: { &msgs[i] } 0 <= i && i < len(msgs) ==> @@ -820,12 +835,12 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // to be able to perform this unfold. // @ unfold msgs[i0].Mem() msg := msgs[i0] - // @ ensures sl.AbsSlice_Bytes(tmp, 0, len(tmp)) + // @ ensures sl.Bytes(tmp, 0, len(tmp)) // @ decreases // @ outline( tmp := make([]byte, bufSize) // @ assert forall i int :: { &tmp[i] } 0 <= i && i < len(tmp) ==> acc(&tmp[i]) - // @ fold sl.AbsSlice_Bytes(tmp, 0, len(tmp)) + // @ fold sl.Bytes(tmp, 0, len(tmp)) // @ ) // @ assert msgs[i0] === msg msg.Buffers[0] = tmp @@ -838,7 +853,7 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ outline ( writeMsgs := make(underlayconn.Messages, 1) writeMsgs[0].Buffers = make([][]byte, 1) - // @ fold sl.AbsSlice_Bytes(writeMsgs[0].OOB, 0, len(writeMsgs[0].OOB)) + // @ fold sl.Bytes(writeMsgs[0].OOB, 0, len(writeMsgs[0].OOB)) // @ sl.NilAcc_Bytes() // @ fold writeMsgInv(writeMsgs) // @ ) @@ -846,17 +861,14 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta processor := newPacketProcessor(d, ingressID) var scmpErr /*@@@*/ scmpError + // @ d.getRunningMem() + // @ invariant acc(&scmpErr) // @ invariant forall i int :: { &msgs[i] } 0 <= i && i < len(msgs) ==> // @ msgs[i].Mem() // @ invariant writeMsgInv(writeMsgs) // @ invariant acc(dPtr, _) && *dPtr === d - // @ invariant acc(&d.external, _) && acc(&d.linkTypes, _) && - // @ acc(&d.neighborIAs, _) && acc(&d.internal, _) && - // @ acc(&d.internalIP, _) && acc(&d.internalNextHops, _) && - // @ acc(&d.svc, _) && acc(&d.macFactory, _) && acc(&d.bfdSessions, _) && - // @ acc(&d.localIA, _) && acc(&d.running, _) && acc(&d.Metrics, _) && - // @ acc(&d.forwardingMetrics, _) && acc(&d.key, _) + // @ invariant acc(&d.running, _) // necessary for loop condition // @ invariant acc(d.Mem(), _) && d.WellConfigured() // @ invariant d.getValSvc() != nil // @ invariant d.getValForwardingMetrics() != nil @@ -864,8 +876,12 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ invariant ingressID in d.getDomForwardingMetrics() // @ invariant acc(rd.Mem(), _) // @ invariant processor.sInit() && processor.sInitD() === d - // @ invariant acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> + // @ invariant processor.getIngressID() == ingressID + // @ invariant acc(ioLock.LockP(), _) + // @ invariant ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> + // @ invariant d.DpAgreesWithSpec(dp) && dp.Valid() for d.running { + // @ ghost ioIngressID := path.ifsToIO_ifs(ingressID) // Multi recv event // @ ghost ioLock.Lock() // @ unfold SharedInv!< dp, ioSharedArg !>() @@ -873,14 +889,24 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ ghost numberOfReceivedPacketsProphecy := AllocProphecy() // @ ExtractMultiReadBio(dp, t, numberOfReceivedPacketsProphecy, s) // @ MultiUpdateElemWitness(t, numberOfReceivedPacketsProphecy, ioIngressID, s, ioSharedArg) - // @ ghost ioValSeq := MultiReadBioIO_val(t,numberOfReceivedPacketsProphecy) + // @ ghost ioValSeq := MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy) // @ ghost sN := MultiReadBioUpd(t, numberOfReceivedPacketsProphecy, s) // @ ghost tN := MultiReadBioNext(t, numberOfReceivedPacketsProphecy) // @ assert dp.dp3s_iospec_ordered(sN, tN) - pkts, err := rd.ReadBatch(msgs /*@, ingressID, numberOfReceivedPacketsProphecy, t , dp @*/) + // @ BeforeReadBatch: + pkts, err := rd.ReadBatch(msgs /*@, ingressID, numberOfReceivedPacketsProphecy, t @*/) + // @ assert old[BeforeReadBatch](MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy)) == ioValSeq + // @ assert err == nil ==> + // @ forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> + // @ ioValSeq[i] == old[BeforeReadBatch](MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy)[i]) + // @ assert err == nil ==> + // @ forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> MsgToAbsVal(&msgs[i], ingressID) == ioValSeq[i] // @ ghost *ioSharedArg.State = sN // @ ghost *ioSharedArg.Place = tN + // @ assert err == nil ==> + // @ forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> + // @ MsgToAbsVal(&msgs[i], ingressID) == old[BeforeReadBatch](MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy)[i]) // @ MultiElemWitnessConv(ioSharedArg.IBufY, ioIngressID, ioValSeq) // @ fold SharedInv!< dp, ioSharedArg !>() // @ ioLock.Unlock() @@ -902,6 +928,8 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ !msgs[i].HasWildcardPermAddr() // @ assert forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> // @ msgs[i].GetN() <= len(msgs[i].GetFstBuffer()) + // @ assert forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> + // @ MsgToAbsVal(&msgs[i], ingressID) == ioValSeq[i] // (VerifiedSCION) using regular for loop instead of range loop to avoid unnecessary // complications with permissions @@ -909,12 +937,6 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ invariant forall i int :: { &msgs[i] } 0 <= i && i < len(msgs) ==> msgs[i].Mem() // @ invariant writeMsgInv(writeMsgs) // @ invariant acc(dPtr, _) && *dPtr === d - // @ invariant acc(&d.external, _) && acc(&d.linkTypes, _) && - // @ acc(&d.neighborIAs, _) && acc(&d.internal, _) && - // @ acc(&d.internalIP, _) && acc(&d.internalNextHops, _) && - // @ acc(&d.svc, _) && acc(&d.macFactory, _) && acc(&d.bfdSessions, _) && - // @ acc(&d.localIA, _) && acc(&d.running, _) && acc(&d.Metrics, _) && - // @ acc(&d.forwardingMetrics, _) && acc(&d.key, _) // @ invariant acc(d.Mem(), _) && d.WellConfigured() // @ invariant d.getValSvc() != nil // @ invariant d.getValForwardingMetrics() != nil @@ -930,11 +952,23 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ invariant forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> // @ msgs[i].GetN() <= len(msgs[i].GetFstBuffer()) // @ invariant processor.sInit() && processor.sInitD() === d + // @ invariant processor.getIngressID() == ingressID + // contracts for IO-spec + // @ invariant pkts <= len(ioValSeq) + // @ invariant d.DpAgreesWithSpec(dp) && dp.Valid() + // @ invariant ioIngressID == path.ifsToIO_ifs(ingressID) + // @ invariant acc(ioLock.LockP(), _) + // @ invariant ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> + // @ invariant forall i int :: { &msgs[i] } i0 <= i && i < pkts ==> + // @ MsgToAbsVal(&msgs[i], ingressID) == ioValSeq[i] + // @ invariant MultiElemWitnessWithIndex(ioSharedArg.IBufY, ioIngressID, ioValSeq, i0) + // @ decreases pkts - i0 for i0 := 0; i0 < pkts; i0++ { // @ assert &msgs[:pkts][i0] == &msgs[i0] // @ preserves 0 <= i0 && i0 < pkts && pkts <= len(msgs) // @ preserves acc(msgs[i0].Mem(), R1) // @ ensures p === msgs[:pkts][i0].GetMessage() + // @ decreases // @ outline( // @ unfold acc(msgs[i0].Mem(), R1) p := msgs[:pkts][i0] @@ -951,7 +985,6 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ prometheus.CounterMemImpliesNonNil(inputCounters.InputBytesTotal) inputCounters.InputPacketsTotal.Inc() // @ assert msgs[i0].GetN() == p.N - // (VerifiedSCION) Gobra still does not fully support floats // @ fl.CastPreservesOrder64(0, p.N) inputCounters.InputBytesTotal.Add(float64(p.N)) @@ -961,19 +994,31 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ assert p.Buffers === m.Buffers // @ assert acc(&p.Buffers[0]) // @ assert p.N <= len(p.Buffers[0]) - // @ sl.SplitRange_Bytes(p.Buffers[0], 0, p.N, writePerm) + // @ sl.SplitRange_Bytes(p.Buffers[0], 0, p.N, HalfPerm) tmpBuf := p.Buffers[0][:p.N] - // @ assert sl.AbsSlice_Bytes(tmpBuf, 0, p.N) - // @ assert sl.AbsSlice_Bytes(tmpBuf, 0, len(tmpBuf)) - result, err /*@ , addrAliasesPkt @*/ := processor.processPkt(tmpBuf, srcAddr) + // @ ghost absPktTmpBuf := absIO_val(tmpBuf, ingressID) + // @ ghost absPktBuf0 := absIO_val(msgs[i0].Buffers[0], ingressID) + // @ assert msgs[i0] === p + // @ absIO_valWidenLemma(p.Buffers[0], ingressID, p.N) + // @ assert absPktTmpBuf.isIO_val_Pkt2 ==> absPktTmpBuf === absPktBuf0 + // @ MultiElemWitnessStep(ioSharedArg.IBufY, ioIngressID, ioValSeq, i0) + // @ assert ioValSeq[i0].isIO_val_Pkt2 ==> + // @ ElemWitness(ioSharedArg.IBufY, ioIngressID, ioValSeq[i0].IO_val_Pkt2_2) + // @ assert absPktTmpBuf.isIO_val_Pkt2 ==> absPktTmpBuf == ioValSeq[i0] + // @ assert path.ifsToIO_ifs(processor.getIngressID()) == ioIngressID + // @ sl.SplitRange_Bytes(p.Buffers[0], 0, p.N, HalfPerm) + // @ assert sl.Bytes(tmpBuf, 0, p.N) + // @ assert sl.Bytes(tmpBuf, 0, len(tmpBuf)) + result, err /*@ , addrAliasesPkt, newAbsPkt @*/ := processor.processPkt(tmpBuf, srcAddr /*@, ioLock, ioSharedArg, dp @*/) // @ fold scmpErr.Mem() + switch { case err == nil: // @ unfold scmpErr.Mem() case errors.As(err, &scmpErr): // @ unfold d.validResult(result, addrAliasesPkt) // @ ghost if addrAliasesPkt && result.OutAddr != nil { - // @ apply acc(result.OutAddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(tmpBuf, 0, len(tmpBuf)), R15) + // @ apply acc(result.OutAddr.Mem(), R15) --* acc(sl.Bytes(tmpBuf, 0, len(tmpBuf)), R15) // @ } // @ unfold scmpErr.Mem() if !scmpErr.TypeCode.InfoMsg() { @@ -987,11 +1032,11 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta default: // @ unfold d.validResult(result, addrAliasesPkt) // @ ghost if addrAliasesPkt { - // @ apply acc(result.OutAddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(tmpBuf, 0, len(tmpBuf)), R15) + // @ apply acc(result.OutAddr.Mem(), R15) --* acc(sl.Bytes(tmpBuf, 0, len(tmpBuf)), R15) // @ } // @ sl.CombineRange_Bytes(p.Buffers[0], 0, p.N, writePerm) // @ assert acc(m) - // @ assert sl.AbsSlice_Bytes(m.OOB, 0, len(m.OOB)) + // @ assert sl.Bytes(m.OOB, 0, len(m.OOB)) // @ assert (m.Addr != nil ==> acc(m.Addr.Mem(), _)) // @ assert 0 <= m.N // @ msgs[:pkts][i0].IsActive = false @@ -1006,7 +1051,7 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta if result.OutConn == nil { // e.g. BFD case no message is forwarded // @ unfold d.validResult(result, addrAliasesPkt) // @ ghost if addrAliasesPkt { - // @ apply acc(result.OutAddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(tmpBuf, 0, len(tmpBuf)), R15) + // @ apply acc(result.OutAddr.Mem(), R15) --* acc(sl.Bytes(tmpBuf, 0, len(tmpBuf)), R15) // @ } // @ sl.CombineRange_Bytes(p.Buffers[0], 0, p.N, writePerm) // @ msgs[:pkts][i0].IsActive = false @@ -1014,6 +1059,13 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta continue } + // (VerifiedSCION) we currently have this assumption because we cannot think of a sound way to capture + // the behaviour of errors.As(...) in our specifications. Nonetheless, we checked extensively that, when + // processPkt does not return an error or returns an scmpError (and thus errors.As(err, &scmpErr) succeeds), + // result.OutPkt is always non-nil. For the other kinds of errors, the result is nil, but that branch is killed + // before this point. + // @ assume result.OutPkt != nil + // Write to OutConn; drop the packet if this would block. // Use WriteBatch because it's the only available function that // supports MSG_DONTWAIT. @@ -1027,11 +1079,31 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta writeMsgs[0].Addr = result.OutAddr } // @ sl.NilAcc_Bytes() + // @ assert absIO_val(result.OutPkt, result.EgressID) == + // @ absIO_val(writeMsgs[0].Buffers[0], result.EgressID) + // @ assert result.OutPkt != nil ==> newAbsPkt == + // @ absIO_val(writeMsgs[0].Buffers[0], result.EgressID) // @ fold acc(writeMsgs[0].Mem(), R50) - _, err = result.OutConn.WriteBatch(writeMsgs, syscall.MSG_DONTWAIT) + + // @ ghost ioLock.Lock() + // @ unfold SharedInv!< dp, ioSharedArg !>() + // @ ghost t, s := *ioSharedArg.Place, *ioSharedArg.State + // @ ghost if(newAbsPkt.isIO_val_Pkt2) { + // @ ApplyElemWitness(s.obuf, ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2) + // @ assert newAbsPkt.IO_val_Pkt2_2 in AsSet(s.obuf[newAbsPkt.IO_val_Pkt2_1]) + // @ assert dp.dp3s_iospec_bio3s_send_guard(s, t, newAbsPkt) + // @ } else { assert newAbsPkt.isIO_val_Unsupported } + // @ unfold dp.dp3s_iospec_ordered(s, t) + // @ unfold dp.dp3s_iospec_bio3s_send(s, t) + // @ io.TriggerBodyIoSend(newAbsPkt) + // @ ghost tN := io.dp3s_iospec_bio3s_send_T(t, newAbsPkt) + _, err = result.OutConn.WriteBatch(writeMsgs, syscall.MSG_DONTWAIT /*@, result.EgressID, t, newAbsPkt @*/) + // @ ghost *ioSharedArg.Place = tN + // @ fold SharedInv!< dp, ioSharedArg !>() + // @ ghost ioLock.Unlock() // @ unfold acc(writeMsgs[0].Mem(), R50) // @ ghost if addrAliasesPkt && result.OutAddr != nil { - // @ apply acc(result.OutAddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(tmpBuf, 0, len(tmpBuf)), R15) + // @ apply acc(result.OutAddr.Mem(), R15) --* acc(sl.Bytes(tmpBuf, 0, len(tmpBuf)), R15) // @ } // @ sl.CombineRange_Bytes(p.Buffers[0], 0, p.N, writePerm) // @ msgs[:pkts][i0].IsActive = false @@ -1125,44 +1197,33 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ invariant acc(d.Mem(), _) && d.WellConfigured() // @ invariant externals != nil ==> acc(externals, R4) // @ invariant externals != nil ==> acc(accBatchConn(externals), R4) - // (VerifiedSCION) can we drop a few of these perms? - // @ invariant acc(&d.external, _) && acc(&d.linkTypes, _) && - // @ acc(&d.neighborIAs, _) && acc(&d.internal, _) && - // @ acc(&d.internalIP, _) && acc(&d.internalNextHops, _) && - // @ acc(&d.svc, _) && acc(&d.macFactory, _) && acc(&d.bfdSessions, _) && - // @ acc(&d.localIA, _) && acc(&d.running, _) && acc(&d.Metrics, _) && - // @ acc(&d.forwardingMetrics, _) && acc(&d.key, _) // @ invariant acc(d.Mem(), _) && d.WellConfigured() // @ invariant d.getValSvc() != nil // @ invariant d.getValForwardingMetrics() != nil // @ invariant 0 in d.getDomForwardingMetrics() - // @ invariant d.macFactory != nil + // @ invariant d.getMacFactory() != nil // @ invariant dp.Valid() // @ invariant d.DpAgreesWithSpec(dp) - // @ invariant acc(ioLockRun.LockP(), _) && ioLockRun.LockInv() == SharedInv!< dp, ioSharedArgRun !>; + // @ invariant acc(ioLockRun.LockP(), _) + // @ invariant ioLockRun.LockInv() == SharedInv!< dp, ioSharedArgRun !> // @ decreases len(externals) - len(visited) for ifID, v := range externals /*@ with visited @*/ { cl := // @ requires acc(&read, _) && read implements rc // @ requires acc(&d, _) - // @ requires acc(&d.external, _) && acc(&d.linkTypes, _) && - // @ acc(&d.neighborIAs, _) && acc(&d.internal, _) && - // @ acc(&d.internalIP, _) && acc(&d.internalNextHops, _) && - // @ acc(&d.svc, _) && acc(&d.macFactory, _) && acc(&d.bfdSessions, _) && - // @ acc(&d.localIA, _) && acc(&d.running, _) && acc(&d.Metrics, _) && - // @ acc(&d.forwardingMetrics, _) && acc(&d.key, _) // @ requires acc(d.Mem(), _) && d.WellConfigured() // @ requires d.getValSvc() != nil // @ requires d.getValForwardingMetrics() != nil // @ requires 0 in d.getDomForwardingMetrics() // @ requires i in d.getDomForwardingMetrics() - // @ requires d.macFactory != nil + // @ requires d.getMacFactory() != nil // @ requires c != nil && acc(c.Mem(), _) // contracts for IO-spec // @ requires dp.Valid() // @ requires d.DpAgreesWithSpec(dp) - // @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; - func /*@ closure2 @*/ (i uint16, c BatchConn /*@, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) { + // @ requires acc(ioLock.LockP(), _) + // @ requires ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> + func /*@ closure2 @*/ (i uint16, c BatchConn /*@, ghost ioLock gpointer[gsync.GhostMutex], ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) { defer log.HandlePanic() read(i, c, &d /*@, ioLock, ioSharedArg, dp @*/) //@ as rc } @@ -1176,26 +1237,22 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta cl := // @ requires acc(&read, _) && read implements rc // @ requires acc(&d, _) - // @ requires acc(&d.external, _) && acc(&d.linkTypes, _) && - // @ acc(&d.neighborIAs, _) && acc(&d.internal, _) && - // @ acc(&d.internalIP, _) && acc(&d.internalNextHops, _) && - // @ acc(&d.svc, _) && acc(&d.macFactory, _) && acc(&d.bfdSessions, _) && - // @ acc(&d.localIA, _) && acc(&d.running, _) && acc(&d.Metrics, _) && - // @ acc(&d.forwardingMetrics, _) && acc(&d.key, _) // @ requires acc(d.Mem(), _) && d.WellConfigured() // @ requires d.getValSvc() != nil // @ requires d.getValForwardingMetrics() != nil // @ requires 0 in d.getDomForwardingMetrics() - // @ requires d.macFactory != nil + // @ requires d.getMacFactory() != nil // @ requires c != nil && acc(c.Mem(), _) // contracts for IO-spec // @ requires dp.Valid() // @ requires d.DpAgreesWithSpec(dp) - // @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; - func /*@ closure3 @*/ (c BatchConn /*@, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) { + // @ requires acc(ioLock.LockP(), _) + // @ requires ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> + func /*@ closure3 @*/ (c BatchConn /*@, ghost ioLock gpointer[gsync.GhostMutex], ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) { defer log.HandlePanic() read(0, c, &d /*@, ioLock, ioSharedArg, dp @*/) //@ as rc } + // @ d.getInternalMem() go cl(d.internal /*@, ioLockRun, ioSharedArgRun, dp @*/) //@ as closure3 d.mtx.Unlock() @@ -1229,6 +1286,7 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ decreases func (d *DataPlane) initMetrics( /*@ ghost dp io.DataPlaneSpec @*/ ) { // @ assert reveal d.PreWellConfigured() + // @ reveal d.getDomExternal() // @ assert reveal d.DpAgreesWithSpec(dp) // @ assert unfolding acc(d.Mem(), _) in // @ d.dpSpecWellConfiguredLocalIA(dp) && @@ -1301,6 +1359,7 @@ func (d *DataPlane) initMetrics( /*@ ghost dp io.DataPlaneSpec @*/ ) { // @ assert d.dpSpecWellConfiguredNeighborIAs(dp) // @ assert d.dpSpecWellConfiguredLinkTypes(dp) // @ fold d.Mem() + // @ reveal d.getDomExternal() // @ reveal d.WellConfigured() // @ assert reveal d.DpAgreesWithSpec(dp) } @@ -1312,9 +1371,8 @@ type processResult struct { OutPkt []byte } -// @ requires acc(&d.macFactory, _) && d.macFactory != nil -// @ requires acc(d.Mem(), _) -// @ ensures res.sInit() && res.sInitD() == d +// @ requires acc(d.Mem(), _) && d.getMacFactory() != nil +// @ ensures res.sInit() && res.sInitD() == d && res.getIngressID() == ingressID // @ decreases func newPacketProcessor(d *DataPlane, ingressID uint16) (res *scionPacketProcessor) { var verScionTmp gopacket.SerializeBuffer @@ -1330,7 +1388,7 @@ func newPacketProcessor(d *DataPlane, ingressID uint16) (res *scionPacketProcess epicInput: make([]byte, libepic.MACBufferSize), }, } - // @ fold sl.AbsSlice_Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) + // @ fold sl.Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) // @ fold slayers.PathPoolMem(p.scionLayer.pathPool, p.scionLayer.pathPoolRaw) p.scionLayer.RecyclePaths() // @ fold p.scionLayer.NonInitMem() @@ -1343,6 +1401,7 @@ func newPacketProcessor(d *DataPlane, ingressID uint16) (res *scionPacketProcess // @ preserves p.sInit() // @ ensures p.sInitD() == old(p.sInitD()) +// @ ensures p.getIngressID() == old(p.getIngressID()) // @ ensures p.sInitRawPkt() == nil // @ ensures p.sInitPath() == nil // @ ensures p.sInitHopField() == path.HopField{} @@ -1368,34 +1427,51 @@ func (p *scionPacketProcessor) reset() (err error) { } // @ requires p.sInit() -// @ requires sl.AbsSlice_Bytes(rawPkt, 0, len(rawPkt)) +// @ requires sl.Bytes(rawPkt, 0, len(rawPkt)) // @ requires acc(srcAddr.Mem(), _) // @ requires let d := p.sInitD() in // @ acc(d.Mem(), _) && // @ d.WellConfigured() && // @ d.getValSvc() != nil && -// @ d.getValForwardingMetrics() != nil +// @ d.getValForwardingMetrics() != nil && +// @ d.DpAgreesWithSpec(dp) // @ ensures p.sInit() // @ ensures acc(p.sInitD().Mem(), _) // @ ensures p.sInitD() == old(p.sInitD()) +// @ ensures p.getIngressID() == old(p.getIngressID()) // @ ensures p.sInitD().validResult(respr, addrAliasesPkt) -// @ ensures acc(sl.AbsSlice_Bytes(rawPkt, 0, len(rawPkt)), 1 - R15) +// @ ensures acc(sl.Bytes(rawPkt, 0, len(rawPkt)), 1 - R15) // @ ensures addrAliasesPkt ==> ( // @ respr.OutAddr != nil && -// @ (acc(respr.OutAddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(rawPkt, 0, len(rawPkt)), R15))) -// @ ensures !addrAliasesPkt ==> acc(sl.AbsSlice_Bytes(rawPkt, 0, len(rawPkt)), R15) +// @ (acc(respr.OutAddr.Mem(), R15) --* acc(sl.Bytes(rawPkt, 0, len(rawPkt)), R15))) +// @ ensures !addrAliasesPkt ==> acc(sl.Bytes(rawPkt, 0, len(rawPkt)), R15) // @ ensures respr.OutPkt !== rawPkt && respr.OutPkt != nil ==> -// @ sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() +// @ requires acc(ioLock.LockP(), _) +// @ requires ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> +// @ requires let absPkt := absIO_val(rawPkt, p.getIngressID()) in +// @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, path.ifsToIO_ifs(p.getIngressID()), absPkt.IO_val_Pkt2_2) +// @ ensures respr.OutPkt != nil ==> +// @ newAbsPkt == absIO_val(respr.OutPkt, respr.EgressID) +// @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) +// @ ensures newAbsPkt.isIO_val_Pkt2 ==> +// @ ElemWitness(ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2) +// @ ensures reserr != nil && respr.OutPkt != nil ==> newAbsPkt.isIO_val_Unsupported +// @ decreases 0 if sync.IgnoreBlockingForTermination() +// @ #backend[moreJoins(1)] func (p *scionPacketProcessor) processPkt(rawPkt []byte, - srcAddr *net.UDPAddr) (respr processResult, reserr error /*@ , addrAliasesPkt bool @*/) { + srcAddr *net.UDPAddr /*@, ghost ioLock gpointer[gsync.GhostMutex], ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) (respr processResult, reserr error /*@ , ghost addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { if err := p.reset(); err != nil { // @ fold p.sInitD().validResult(processResult{}, false) - return processResult{}, err /*@, false @*/ + return processResult{}, err /*@, false, io.IO_val_Unit{} @*/ } // @ assert p.sInitD().getValForwardingMetrics() != nil // @ unfold p.sInit() + // @ assert !p.segmentChange // @ ghost d := p.d p.rawPkt = rawPkt p.srcAddr = srcAddr @@ -1409,56 +1485,55 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, if err != nil { // @ fold p.sInit() // @ fold p.sInitD().validResult(processResult{}, false) - return processResult{}, err /*@, false @*/ - } - /*@ - ghost var ub []byte - ghost var ubScionLayer []byte = p.rawPkt - ghost var ubHbhLayer []byte - ghost var ubE2eLayer []byte - - ghost llStart := 0 - ghost llEnd := 0 - ghost mustCombineRanges := lastLayerIdx != -1 && !offsets[lastLayerIdx].isNil - ghost var o offsetPair - ghost if lastLayerIdx == -1 { - ub = p.rawPkt - } else { - if offsets[lastLayerIdx].isNil { - ub = nil - sl.NilAcc_Bytes() - } else { - o = offsets[lastLayerIdx] - ub = p.rawPkt[o.start:o.end] - llStart = o.start - llEnd = o.end - sl.SplitRange_Bytes(p.rawPkt, o.start, o.end, writePerm) - } - } - hasHbhLayer := processed[0] - oHbh := offsets[0] - ubHbhLayer = hasHbhLayer && !oHbh.isNil ? p.rawPkt[oHbh.start:oHbh.end] : ([]byte)(nil) - hasE2eLayer := processed[1] - oE2e := offsets[1] - ubE2eLayer = hasE2eLayer && !oE2e.isNil ? p.rawPkt[oE2e.start:oE2e.end] : ([]byte)(nil) - assert processed[0] ==> p.hbhLayer.Mem(ubHbhLayer) - assert processed[1] ==> p.e2eLayer.Mem(ubE2eLayer) - @*/ - // @ assert sl.AbsSlice_Bytes(ub, 0, len(ub)) + return processResult{}, err /*@, false, io.IO_val_Unit{} @*/ + } + // @ ghost var ub []byte + // @ ghost var ubScionLayer []byte = p.rawPkt + // @ ghost var ubHbhLayer []byte + // @ ghost var ubE2eLayer []byte + + // @ ghost llStart := 0 + // @ ghost llEnd := 0 + // @ ghost mustCombineRanges := lastLayerIdx != -1 && !offsets[lastLayerIdx].isNil + // @ ghost var o offsetPair + // @ ghost if lastLayerIdx == -1 { + // @ ub = p.rawPkt + // @ } else { + // @ if offsets[lastLayerIdx].isNil { + // @ ub = nil + // @ sl.NilAcc_Bytes() + // @ } else { + // @ o = offsets[lastLayerIdx] + // @ ub = p.rawPkt[o.start:o.end] + // @ llStart = o.start + // @ llEnd = o.end + // @ sl.SplitRange_Bytes(p.rawPkt, o.start, o.end, HalfPerm) + // @ } + // @ } + // @ hasHbhLayer := processed[0] + // @ oHbh := offsets[0] + // @ ubHbhLayer = hasHbhLayer && !oHbh.isNil ? p.rawPkt[oHbh.start:oHbh.end] : ([]byte)(nil) + // @ hasE2eLayer := processed[1] + // @ oE2e := offsets[1] + // @ ubE2eLayer = hasE2eLayer && !oE2e.isNil ? p.rawPkt[oE2e.start:oE2e.end] : ([]byte)(nil) + // @ assert processed[0] ==> p.hbhLayer.Mem(ubHbhLayer) + // @ assert processed[1] ==> p.e2eLayer.Mem(ubE2eLayer) + // @ assert acc(sl.Bytes(ub, 0, len(ub)), HalfPerm) pld /*@ , start, end @*/ := p.lastLayer.LayerPayload( /*@ ub @*/ ) - // @ sl.SplitRange_Bytes(ub, start, end, writePerm) + // @ sl.SplitRange_Bytes(ub, start, end, HalfPerm) // @ sl.NilAcc_Bytes() - pathType := /*@ unfolding p.scionLayer.Mem(rawPkt) in @*/ p.scionLayer.PathType switch pathType { case empty.PathType: + // @ ghost sl.SplitRange_Bytes(p.rawPkt, o.start, o.end, HalfPerm) + // @ sl.SplitRange_Bytes(ub, start, end, HalfPerm) // @ ghost if mustCombineRanges { ghost defer sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, writePerm) } if p.lastLayer.NextLayerType( /*@ ub @*/ ) == layers.LayerTypeBFD { // @ ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, true, hasHbhLayer, hasE2eLayer) // @ defer fold p.sInit() // @ defer fold p.d.validResult(processResult{}, false) // @ ghost defer sl.CombineRange_Bytes(ub, start, end, writePerm) - return processResult{}, p.processIntraBFD(pld) /*@, false @*/ + return processResult{}, p.processIntraBFD(pld) /*@, false, io.IO_val_Unit{} @*/ } // @ establishMemUnsupportedPathTypeNextHeader() // @ defer fold p.sInit() @@ -1466,9 +1541,11 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ ghost defer ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, true, hasHbhLayer, hasE2eLayer) // @ ghost defer sl.CombineRange_Bytes(ub, start, end, writePerm) return processResult{}, serrors.WithCtx(unsupportedPathTypeNextHeader, - "type", pathType, "header", nextHdr(p.lastLayer /*@, ub @*/)) /*@, false @*/ + "type", pathType, "header", nextHdr(p.lastLayer /*@, ub @*/)) /*@, false, io.IO_val_Unit{} @*/ case onehop.PathType: if p.lastLayer.NextLayerType( /*@ ub @*/ ) == layers.LayerTypeBFD { + // @ ghost sl.SplitRange_Bytes(p.rawPkt, o.start, o.end, HalfPerm) + // @ sl.SplitRange_Bytes(ub, start, end, HalfPerm) // @ ghost if mustCombineRanges { ghost defer sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, writePerm) } // @ ghost defer sl.CombineRange_Bytes(ub, start, end, writePerm) // @ unfold acc(p.scionLayer.Mem(p.rawPkt), R10) @@ -1479,61 +1556,61 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ defer fold p.sInit() // @ defer fold p.d.validResult(processResult{}, false) // @ ghost defer ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, true, hasHbhLayer, hasE2eLayer) - return processResult{}, malformedPath /*@, false @*/ + return processResult{}, malformedPath /*@, false, io.IO_val_Unit{} @*/ } // @ defer fold p.sInit() // @ defer fold p.d.validResult(processResult{}, false) // @ ghost defer ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, true, hasHbhLayer, hasE2eLayer) - return processResult{}, p.processInterBFD(ohp, pld) /*@, false @*/ + return processResult{}, p.processInterBFD(ohp, pld) /*@, false, io.IO_val_Unit{} @*/ } - // @ sl.CombineRange_Bytes(ub, start, end, writePerm) - // (VerifiedSCION) Nested if because short-circuiting && is not working - // @ ghost if lastLayerIdx >= 0 { - // @ if !offsets[lastLayerIdx].isNil { - // @ o := offsets[lastLayerIdx] - // @ sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, writePerm) - // @ } + // @ sl.CombineRange_Bytes(ub, start, end, HalfPerm) + // @ ghost if lastLayerIdx >= 0 && !offsets[lastLayerIdx].isNil { + // @ o := offsets[lastLayerIdx] + // @ sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, HalfPerm) // @ } - // @ assert sl.AbsSlice_Bytes(p.rawPkt, 0, len(p.rawPkt)) + // @ assert sl.Bytes(p.rawPkt, 0, len(p.rawPkt)) // @ unfold acc(p.d.Mem(), _) - v1, v2 /*@, aliasesPkt @*/ := p.processOHP() + // @ assert reveal p.scionLayer.EqPathType(p.rawPkt) + // @ assert !(reveal slayers.IsSupportedPkt(p.rawPkt)) + v1, v2 /*@, aliasesPkt, newAbsPkt @*/ := p.processOHP() // @ ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, true, hasHbhLayer, hasE2eLayer) // @ fold p.sInit() - return v1, v2 /*@, aliasesPkt @*/ + return v1, v2 /*@, aliasesPkt, newAbsPkt @*/ case scion.PathType: - // @ sl.CombineRange_Bytes(ub, start, end, writePerm) - // @ ghost if lastLayerIdx >= 0 { - // @ ghost if !offsets[lastLayerIdx].isNil { - // @ o := offsets[lastLayerIdx] - // @ sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, writePerm) - // @ } + // @ sl.CombineRange_Bytes(ub, start, end, HalfPerm) + // @ ghost if lastLayerIdx >= 0 && !offsets[lastLayerIdx].isNil { + // @ o := offsets[lastLayerIdx] + // @ sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, HalfPerm) // @ } - // @ assert sl.AbsSlice_Bytes(p.rawPkt, 0, len(p.rawPkt)) - v1, v2 /*@ , addrAliasesPkt @*/ := p.processSCION( /*@ p.rawPkt, ub == nil, llStart, llEnd @*/ ) + // @ assert sl.Bytes(p.rawPkt, 0, len(p.rawPkt)) + v1, v2 /*@ , addrAliasesPkt, newAbsPkt @*/ := p.processSCION( /*@ p.rawPkt, ub == nil, llStart, llEnd, ioLock, ioSharedArg, dp @*/ ) // @ ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, v2 == nil, hasHbhLayer, hasE2eLayer) // @ fold p.sInit() - return v1, v2 /*@, addrAliasesPkt @*/ + return v1, v2 /*@, addrAliasesPkt, newAbsPkt @*/ case epic.PathType: - // @ sl.CombineRange_Bytes(ub, start, end, writePerm) + // @ sl.CombineRange_Bytes(ub, start, end, HalfPerm) // @ ghost if lastLayerIdx >= 0 { // @ ghost if !offsets[lastLayerIdx].isNil { // @ o := offsets[lastLayerIdx] - // @ sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, writePerm) + // @ sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, HalfPerm) // @ } // @ } - // @ assert sl.AbsSlice_Bytes(p.rawPkt, 0, len(p.rawPkt)) - v1, v2 /*@ , addrAliasesPkt @*/ := p.processEPIC( /*@ p.rawPkt, ub == nil, llStart, llEnd @*/ ) + // @ unfold acc(p.d.Mem(), _) + // @ assert reveal p.scionLayer.EqPathType(p.rawPkt) + // @ assert !(reveal slayers.IsSupportedPkt(p.rawPkt)) + // @ assert sl.Bytes(p.rawPkt, 0, len(p.rawPkt)) + v1, v2 /*@ , addrAliasesPkt, newAbsPkt @*/ := p.processEPIC( /*@ p.rawPkt, ub == nil, llStart, llEnd, ioLock, ioSharedArg, dp @*/ ) // @ ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, v2 == nil, hasHbhLayer, hasE2eLayer) // @ fold p.sInit() - return v1, v2 /*@, addrAliasesPkt @*/ + return v1, v2 /*@, addrAliasesPkt, newAbsPkt @*/ default: - // @ ghost if mustCombineRanges { ghost defer sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, writePerm) } + // @ ghost if mustCombineRanges { ghost defer sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, HalfPerm) } // @ ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, true, hasHbhLayer, hasE2eLayer) - // @ sl.CombineRange_Bytes(ub, start, end, writePerm) + // @ sl.CombineRange_Bytes(ub, start, end, HalfPerm) // @ fold p.d.validResult(processResult{}, false) // @ fold p.sInit() // @ establishMemUnsupportedPathType() - return processResult{}, serrors.WithCtx(unsupportedPathType, "type", pathType) /*@, false @*/ + return processResult{}, serrors.WithCtx(unsupportedPathType, "type", pathType) /*@, false, io.IO_val_Unit{} @*/ } } @@ -1541,11 +1618,12 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ requires acc(&p.ingressID, R20) // @ requires acc(p.d.Mem(), _) // @ requires p.bfdLayer.NonInitMem() -// @ preserves sl.AbsSlice_Bytes(data, 0, len(data)) +// @ preserves sl.Bytes(data, 0, len(data)) // @ ensures acc(&p.d, R20) // @ ensures acc(&p.ingressID, R20) // @ ensures p.bfdLayer.NonInitMem() // @ ensures err != nil ==> err.ErrorMem() +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (p *scionPacketProcessor) processInterBFD(oh *onehop.Path, data []byte) (err error) { // @ unfold acc(p.d.Mem(), _) // @ ghost if p.d.bfdSessions != nil { unfold acc(accBfdSession(p.d.bfdSessions), _) } @@ -1575,12 +1653,13 @@ func (p *scionPacketProcessor) processInterBFD(oh *onehop.Path, data []byte) (er // @ requires acc(&p.srcAddr, R20) && acc(p.srcAddr.Mem(), _) // @ requires p.bfdLayer.NonInitMem() // @ requires acc(p.d.Mem(), _) -// @ requires sl.AbsSlice_Bytes(data, 0, len(data)) +// @ requires sl.Bytes(data, 0, len(data)) // @ ensures acc(&p.d, R20) // @ ensures acc(&p.srcAddr, R20) // @ ensures p.bfdLayer.NonInitMem() -// @ ensures sl.AbsSlice_Bytes(data, 0, len(data)) +// @ ensures sl.Bytes(data, 0, len(data)) // @ ensures res != nil ==> res.ErrorMem() +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { // @ unfold acc(p.d.Mem(), _) // @ ghost if p.d.bfdSessions != nil { unfold acc(accBfdSession(p.d.bfdSessions), _) } @@ -1610,6 +1689,7 @@ func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { // @ invariant m != nil ==> acc(m, R20) // @ invariant m != nil ==> forall a *net.UDPAddr :: { a in range(m) } a in range(m) ==> acc(a.Mem(), _) // @ invariant acc(&p.srcAddr, R20) && acc(p.srcAddr.Mem(), _) + // @ decreases len(p.d.internalNextHops) - len(keys) for k, v := range p.d.internalNextHops /*@ with keys @*/ { // @ assert acc(&p.d.internalNextHops, _) // @ assert forall a *net.UDPAddr :: { a in range(m) } a in range(m) ==> acc(a.Mem(), _) @@ -1646,7 +1726,8 @@ func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { // @ requires acc(&p.rawPkt, R1) && ub === p.rawPkt // @ requires acc(&p.path) // @ requires p.scionLayer.Mem(ub) -// @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires sl.Bytes(ub, 0, len(ub)) +// @ requires acc(&p.segmentChange) && !p.segmentChange // @ preserves acc(&p.srcAddr, R10) && acc(p.srcAddr.Mem(), _) // @ preserves acc(&p.lastLayer, R10) // @ preserves p.lastLayer != nil @@ -1654,29 +1735,48 @@ func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { // @ acc(p.lastLayer.Mem(nil), R10) // @ preserves (p.lastLayer !== &p.scionLayer && !llIsNil) ==> // @ acc(p.lastLayer.Mem(ub[startLL:endLL]), R10) -// @ preserves acc(&p.ingressID, R20) +// @ requires acc(&p.ingressID, R20) // @ preserves acc(&p.infoField) // @ preserves acc(&p.hopField) -// @ preserves acc(&p.segmentChange) // @ preserves acc(&p.mac, R10) && p.mac != nil && p.mac.Mem() // @ preserves acc(&p.macBuffers.scionInput, R10) -// @ preserves sl.AbsSlice_Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) +// @ preserves sl.Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) // @ preserves acc(&p.cachedMac) +// @ ensures acc(&p.segmentChange) +// @ ensures acc(&p.ingressID, R20) // @ ensures acc(&p.d, R5) // @ ensures acc(&p.path) // @ ensures acc(&p.rawPkt, R1) // @ ensures reserr == nil ==> p.scionLayer.Mem(ub) // @ ensures reserr != nil ==> p.scionLayer.NonInitMem() -// @ ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), 1 - R15) +// @ ensures acc(sl.Bytes(ub, 0, len(ub)), 1 - R15) // @ ensures p.d.validResult(respr, addrAliasesPkt) // @ ensures addrAliasesPkt ==> ( // @ respr.OutAddr != nil && -// @ (acc(respr.OutAddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R15))) -// @ ensures !addrAliasesPkt ==> acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R15) +// @ (acc(respr.OutAddr.Mem(), R15) --* acc(sl.Bytes(ub, 0, len(ub)), R15))) +// @ ensures !addrAliasesPkt ==> acc(sl.Bytes(ub, 0, len(ub)), R15) // @ ensures respr.OutPkt !== ub && respr.OutPkt != nil ==> -// @ sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() -func (p *scionPacketProcessor) processSCION( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int @*/ ) (respr processResult, reserr error /*@ , addrAliasesPkt bool @*/) { +// contracts for IO-spec +// @ requires p.d.DpAgreesWithSpec(dp) +// @ requires dp.Valid() +// @ requires (typeOf(p.scionLayer.GetPath(ub)) == *scion.Raw) ==> +// @ p.scionLayer.EqAbsHeader(ub) && p.scionLayer.ValidScionInitSpec(ub) +// @ requires p.scionLayer.EqPathType(ub) +// @ requires acc(ioLock.LockP(), _) +// @ requires ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> +// @ requires let absPkt := absIO_val(p.rawPkt, p.ingressID) in +// @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, path.ifsToIO_ifs(p.ingressID), absPkt.IO_val_Pkt2_2) +// @ ensures reserr == nil && newAbsPkt.isIO_val_Pkt2 ==> +// @ ElemWitness(ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2) +// @ ensures respr.OutPkt != nil ==> +// @ newAbsPkt == absIO_val(respr.OutPkt, respr.EgressID) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ newAbsPkt.isIO_val_Unsupported +// @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) +// @ decreases 0 if sync.IgnoreBlockingForTermination() +func (p *scionPacketProcessor) processSCION( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost ioLock gpointer[gsync.GhostMutex], ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error /*@ , ghost addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { var ok bool // @ unfold acc(p.scionLayer.Mem(ub), R20) @@ -1687,9 +1787,9 @@ func (p *scionPacketProcessor) processSCION( /*@ ghost ub []byte, ghost llIsNil // @ p.scionLayer.DowngradePerm(ub) // @ establishMemMalformedPath() // @ fold p.d.validResult(processResult{}, false) - return processResult{}, malformedPath /*@ , false @*/ + return processResult{}, malformedPath /*@ , false, io.IO_val_Unit{} @*/ } - return p.process( /*@ ub, llIsNil, startLL, endLL @*/ ) + return p.process( /*@ ub, llIsNil, startLL, endLL , ioLock, ioSharedArg, dp @*/ ) } // @ requires 0 <= startLL && startLL <= endLL && endLL <= len(ub) @@ -1700,7 +1800,7 @@ func (p *scionPacketProcessor) processSCION( /*@ ghost ub []byte, ghost llIsNil // @ requires acc(&p.rawPkt, R1) && ub === p.rawPkt // @ requires acc(&p.path) // @ requires p.scionLayer.Mem(ub) -// @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires sl.Bytes(ub, 0, len(ub)) // @ preserves acc(&p.srcAddr, R10) && acc(p.srcAddr.Mem(), _) // @ preserves acc(&p.lastLayer, R10) // @ preserves p.lastLayer != nil @@ -1711,26 +1811,39 @@ func (p *scionPacketProcessor) processSCION( /*@ ghost ub []byte, ghost llIsNil // @ preserves acc(&p.ingressID, R20) // @ preserves acc(&p.infoField) // @ preserves acc(&p.hopField) -// @ preserves acc(&p.segmentChange) +// @ preserves acc(&p.segmentChange) && !p.segmentChange // @ preserves acc(&p.mac, R10) && p.mac != nil && p.mac.Mem() // @ preserves acc(&p.macBuffers.scionInput, R10) -// @ preserves sl.AbsSlice_Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) +// @ preserves sl.Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) // @ preserves acc(&p.cachedMac) // @ ensures acc(&p.d, R5) // @ ensures acc(&p.path) // @ ensures acc(&p.rawPkt, R1) // @ ensures reserr == nil ==> p.scionLayer.Mem(ub) // @ ensures reserr != nil ==> p.scionLayer.NonInitMem() -// @ ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), 1 - R15) +// @ ensures acc(sl.Bytes(ub, 0, len(ub)), 1 - R15) // @ ensures p.d.validResult(respr, addrAliasesPkt) // @ ensures addrAliasesPkt ==> ( // @ respr.OutAddr != nil && -// @ (acc(respr.OutAddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R15))) -// @ ensures !addrAliasesPkt ==> acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R15) +// @ (acc(respr.OutAddr.Mem(), R15) --* acc(sl.Bytes(ub, 0, len(ub)), R15))) +// @ ensures !addrAliasesPkt ==> acc(sl.Bytes(ub, 0, len(ub)), R15) // @ ensures respr.OutPkt !== ub && respr.OutPkt != nil ==> -// @ sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() -func (p *scionPacketProcessor) processEPIC( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int @*/ ) (respr processResult, reserr error /*@ , addrAliasesPkt bool @*/) { +// contracts for IO-spec +// @ requires p.scionLayer.EqPathType(p.rawPkt) +// @ requires !slayers.IsSupportedPkt(p.rawPkt) +// @ requires p.d.DpAgreesWithSpec(dp) +// @ requires dp.Valid() +// @ requires acc(ioLock.LockP(), _) +// @ requires ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> +// @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) +// @ ensures respr.OutPkt != nil ==> +// @ newAbsPkt == absIO_val(respr.OutPkt, respr.EgressID) && +// @ newAbsPkt.isIO_val_Unsupported +// @ decreases 0 if sync.IgnoreBlockingForTermination() +func (p *scionPacketProcessor) processEPIC( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost ioLock gpointer[gsync.GhostMutex], ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error /*@, ghost addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { + // @ TODO() // @ unfold acc(p.scionLayer.Mem(ub), R10) epicPath, ok := p.scionLayer.Path.(*epic.Path) if !ok { @@ -1738,7 +1851,7 @@ func (p *scionPacketProcessor) processEPIC( /*@ ghost ub []byte, ghost llIsNil b // @ p.scionLayer.DowngradePerm(ub) // @ establishMemMalformedPath() // @ fold p.d.validResult(respr, false) - return processResult{}, malformedPath /*@ , false @*/ + return processResult{}, malformedPath /*@ , false, io.IO_val_Unit{} @*/ } // @ ghost startP := p.scionLayer.PathStartIdx(ub) @@ -1752,7 +1865,7 @@ func (p *scionPacketProcessor) processEPIC( /*@ ghost ub []byte, ghost llIsNil b // @ p.scionLayer.DowngradePerm(ub) // @ establishMemMalformedPath() // @ fold p.d.validResult(respr, false) - return processResult{}, malformedPath /*@ , false @*/ + return processResult{}, malformedPath /*@ , false, io.IO_val_Unit{} @*/ } isPenultimate := p.path.IsPenultimateHop( /*@ ubPath[epic.MetadataLen:] @*/ ) @@ -1760,9 +1873,9 @@ func (p *scionPacketProcessor) processEPIC( /*@ ghost ub []byte, ghost llIsNil b // @ fold acc(epicPath.Mem(ubPath), R10) // @ fold acc(p.scionLayer.Mem(ub), R10) - result, err /*@ , addrAliases @*/ := p.process( /*@ ub, llIsNil, startLL, endLL @*/ ) + result, err /*@ , addrAliases, newAbsPkt @*/ := p.process( /*@ ub, llIsNil, startLL, endLL, ioLock, ioSharedArg, dp @*/ ) if err != nil { - return result, err /*@ , addrAliases @*/ + return result, err /*@ , addrAliases, newAbsPkt @*/ } // @ TODO() @@ -1771,7 +1884,7 @@ func (p *scionPacketProcessor) processEPIC( /*@ ghost ub []byte, ghost llIsNil b if err != nil { // @ p.scionLayer.DowngradePerm(ub) // @ fold p.d.validResult(respr, false) - return processResult{}, err /*@ , false @*/ + return processResult{}, err /*@ , false, io.IO_val_Unit{} @*/ } timestamp := time.Unix(int64(firstInfo.Timestamp), 0) @@ -1780,7 +1893,7 @@ func (p *scionPacketProcessor) processEPIC( /*@ ghost ub []byte, ghost llIsNil b // @ p.scionLayer.DowngradePerm(ub) // @ fold p.d.validResult(respr, false) // TODO(mawyss): Send back SCMP packet - return processResult{}, err /*@ , false @*/ + return processResult{}, err /*@ , false, io.IO_val_Unit{} @*/ } HVF := epicPath.PHVF @@ -1792,11 +1905,11 @@ func (p *scionPacketProcessor) processEPIC( /*@ ghost ub []byte, ghost llIsNil b if err != nil { // @ p.scionLayer.DowngradePerm(ub) // TODO(mawyss): Send back SCMP packet - return processResult{}, err /*@ , false @*/ + return processResult{}, err /*@ , false, io.IO_val_Unit{} @*/ } } - return result, nil /*@ , false @*/ + return result, nil /*@ , false, io.IO_val_Unit{} @*/ } // scionPacketProcessor processes packets. It contains pre-allocated per-packet @@ -1853,12 +1966,14 @@ type macBuffersT struct { // @ trusted // @ requires false +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported func (p *scionPacketProcessor) packSCMP( typ slayers.SCMPType, code slayers.SCMPCode, scmpP gopacket.SerializableLayer, cause error, -) (processResult, error) { +) (respr processResult, reserr error) { // check invoking packet was an SCMP error: if p.lastLayer.NextLayerType() == slayers.LayerTypeSCMP { @@ -1881,46 +1996,51 @@ func (p *scionPacketProcessor) packSCMP( // @ requires acc(&p.path, R20) // @ requires p.path === p.scionLayer.GetScionPath(ub) // @ requires acc(&p.hopField) && acc(&p.infoField) -// @ preserves acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R1) +// @ requires acc(sl.Bytes(ub, 0, len(ub)), R1) +// Preconditions for IO: +// @ requires p.scionLayer.EqAbsHeader(ub) +// @ requires p.scionLayer.ValidScionInitSpec(ub) +// @ ensures acc(sl.Bytes(ub, 0, len(ub)), R1) // @ ensures acc(&p.d, R50) // @ ensures acc(p.scionLayer.Mem(ub), R6) // @ ensures acc(&p.path, R20) // @ ensures p.path === p.scionLayer.GetScionPath(ub) // @ ensures acc(&p.hopField) && acc(&p.infoField) // @ ensures respr === processResult{} -// @ ensures reserr == nil ==> ( -// @ let ubPath := p.scionLayer.UBPath(ub) in -// @ let ubScionPath := p.scionLayer.UBScionPath(ub) in -// @ unfolding acc(p.scionLayer.Mem(ub), R10) in -// @ unfolding acc(p.scionLayer.Path.Mem(ubPath), R50) in -// @ p.path.GetCurrHF(ubScionPath) < p.path.GetNumHops(ubScionPath)) +// @ ensures reserr == nil ==> p.scionLayer.ValidPathMetaData(ub) // @ ensures acc(p.scionLayer.Mem(ub), R6) // @ ensures p.d.validResult(respr, false) -// @ ensures reserr == nil ==> ( -// @ let ubPath := p.scionLayer.UBPath(ub) in -// @ let ubScionPath := p.scionLayer.UBScionPath(ub) in -// @ unfolding acc(p.scionLayer.Mem(ub), R10) in -// @ unfolding acc(p.scionLayer.Path.Mem(ubPath), R50) in -// @ p.path.GetCurrINF(ubScionPath) < p.path.GetNumINF(ubScionPath)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// Postconditions for IO: +// @ ensures reserr == nil ==> +// @ slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures reserr == nil ==> absPkt(ub).PathNotFullyTraversed() +// @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) +// @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(ub)) +// @ ensures old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub) +// @ ensures respr.OutPkt == nil // @ decreases func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr processResult, reserr error) { var err error // @ unfold acc(p.scionLayer.Mem(ub), R6) // @ defer fold acc(p.scionLayer.Mem(ub), R6) - // @ ghost startP := p.scionLayer.PathStartIdx(ub) - // @ ghost endP := p.scionLayer.PathEndIdx(ub) - // @ ghost ubPath := ub[startP:endP] - + // @ ghost ubPath := p.scionLayer.UBPath(ub) // @ unfold acc(p.scionLayer.Path.Mem(ubPath), R7) // @ defer fold acc(p.scionLayer.Path.Mem(ubPath), R7) // @ ghost startScionP := p.scionLayer.PathScionStartIdx(ub) // @ ghost endScionP := p.scionLayer.PathScionEndIdx(ub) - // @ ghost ubScionPath := ub[startScionP:endScionP] - - // @ sl.SplitRange_Bytes(ub, startScionP, endScionP, R1) - // @ ghost defer sl.CombineRange_Bytes(ub, startScionP, endScionP, R1) - p.hopField, err = p.path.GetCurrentHopField( /*@ ubScionPath @*/ ) + // @ ghost ubScionPath := p.scionLayer.UBScionPath(ub) + // @ sl.SplitRange_Bytes(ub, startScionP, endScionP, R2) + // @ ghost defer sl.CombineRange_Bytes(ub, startScionP, endScionP, R2) + // (VerifiedSCION) Due to an incompleteness (https://github.com/viperproject/gobra/issues/770), + // we introduce a temporary variable to be able to call `path.AbsMacArrayCongruence()`. + var tmpHopField path.HopField + tmpHopField, err = p.path.GetCurrentHopField( /*@ ubScionPath @*/ ) + p.hopField = tmpHopField + // @ path.AbsMacArrayCongruence(p.hopField.Mac, tmpHopField.Mac) + // @ assert p.hopField.ToIO_HF() == tmpHopField.ToIO_HF() + // @ assert err == nil ==> reveal p.path.CorrectlyDecodedHf(ubScionPath, tmpHopField) + // @ assert err == nil ==> reveal p.path.CorrectlyDecodedHf(ubScionPath, p.hopField) // @ fold p.d.validResult(processResult{}, false) if err != nil { // TODO(lukedirtwalker) parameter problem invalid path? @@ -1931,6 +2051,36 @@ func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr proce // TODO(lukedirtwalker) parameter problem invalid path? return processResult{}, err } + // Segments without the Peering flag must consist of at least two HFs: + // https://github.com/scionproto/scion/issues/4524 + // (VerifiedSCION) The version verified here is prior to the support of peering + // links, so we do not check the Peering flag here. + hasSingletonSegment := + // @ unfolding acc(p.path.Mem(ubScionPath), _) in + // @ unfolding acc(p.path.Base.Mem(), _) in + p.path.PathMeta.SegLen[0] == 1 || + p.path.PathMeta.SegLen[1] == 1 || + p.path.PathMeta.SegLen[2] == 1 + if hasSingletonSegment { + // @ establishMemMalformedPath() + return processResult{}, malformedPath + } + if !p.path.CurrINFMatchesCurrHF( /*@ ubScionPath @*/ ) { + // @ establishMemMalformedPath() + return processResult{}, malformedPath + } + // @ p.EstablishEqAbsHeader(ub, startScionP, endScionP) + // @ p.path.EstablishValidPktMetaHdr(ubScionPath) + // @ p.SubSliceAbsPktToAbsPkt(ub, startScionP, endScionP) + // @ absPktFutureLemma(ub) + // @ p.path.DecodingLemma(ubScionPath, p.infoField, p.hopField) + // @ assert reveal p.path.EqAbsInfoField(p.path.absPkt(ubScionPath), + // @ p.infoField.ToAbsInfoField()) + // @ assert reveal p.path.EqAbsHopField(p.path.absPkt(ubScionPath), + // @ p.hopField.ToIO_HF()) + // @ assert reveal p.EqAbsHopField(absPkt(ub)) + // @ assert reveal p.EqAbsInfoField(absPkt(ub)) + // @ assert old(reveal slayers.IsSupportedPkt(ub)) == reveal slayers.IsSupportedPkt(ub) return processResult{}, nil } @@ -1939,8 +2089,11 @@ func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr proce // @ preserves acc(&p.d, R50) && acc(p.d.Mem(), _) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> -// @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ reserr != nil && sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases func (p *scionPacketProcessor) validateHopExpiry() (respr processResult, reserr error) { expiration := util.SecsToTime(p.infoField.Timestamp). @@ -1950,8 +2103,8 @@ func (p *scionPacketProcessor) validateHopExpiry() (respr processResult, reserr // @ fold p.d.validResult(respr, false) return processResult{}, nil } - // @ TODO() - // TODO: adapt; note that packSCMP always returns an empty addr and conn and + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP + // (VerifiedSCION): adapt; note that packSCMP always returns an empty addr and conn and // when the err is nil, it returns the bytes of p.buffer. This should be a magic wand // that is consumed after sending the reply. For now, we are making this simplifying // assumption, but in the future, we should elaborate the proof for this to not be @@ -1970,19 +2123,31 @@ func (p *scionPacketProcessor) validateHopExpiry() (respr processResult, reserr ) } -// @ preserves acc(&p.infoField, R20) -// @ preserves acc(&p.hopField, R20) -// @ preserves acc(&p.ingressID, R20) +// @ requires acc(&p.ingressID, R21) +// @ requires acc(&p.hopField, R20) +// @ requires acc(&p.infoField, R20) // @ preserves acc(&p.d, R50) && acc(p.d.Mem(), _) +// @ ensures acc(&p.infoField, R20) +// @ ensures acc(&p.hopField, R20) +// @ ensures acc(&p.ingressID, R21) // @ ensures p.d.validResult(respr, false) -// @ ensures respr.OutPkt != nil ==> reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ ensures respr.OutPkt != nil ==> +// @ reserr != nil && sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() // @ ensures reserr == nil && p.infoField.ConsDir ==> ( // @ p.ingressID == 0 || p.hopField.ConsIngress == p.ingressID) // @ ensures reserr == nil && !p.infoField.ConsDir ==> ( // @ p.ingressID == 0 || p.hopField.ConsEgress == p.ingressID) +// contracts for IO-spec +// @ requires oldPkt.PathNotFullyTraversed() +// @ requires p.EqAbsHopField(oldPkt) +// @ requires p.EqAbsInfoField(oldPkt) +// @ ensures reserr == nil ==> +// @ AbsValidateIngressIDConstraint(oldPkt, path.ifsToIO_ifs(p.ingressID)) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) validateIngressID() (respr processResult, reserr error) { +func (p *scionPacketProcessor) validateIngressID( /*@ ghost oldPkt io.IO_pkt2 @*/ ) (respr processResult, reserr error) { pktIngressID := p.hopField.ConsIngress errCode := slayers.SCMPCodeUnknownHopFieldIngress if !p.infoField.ConsDir { @@ -1990,7 +2155,7 @@ func (p *scionPacketProcessor) validateIngressID() (respr processResult, reserr errCode = slayers.SCMPCodeUnknownHopFieldEgress } if p.ingressID != 0 && p.ingressID != pktIngressID { - // @ TODO() + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") return p.packSCMP( slayers.SCMPTypeParameterProblem, errCode, @@ -1999,35 +2164,53 @@ func (p *scionPacketProcessor) validateIngressID() (respr processResult, reserr "pkt_ingress", pktIngressID, "router_ingress", p.ingressID), ) } + // @ reveal p.EqAbsHopField(oldPkt) + // @ reveal p.EqAbsInfoField(oldPkt) + // @ assert reveal AbsValidateIngressIDConstraint(oldPkt, path.ifsToIO_ifs(p.ingressID)) // @ fold p.d.validResult(respr, false) return processResult{}, nil } // @ requires acc(&p.d, R20) && acc(p.d.Mem(), _) -// @ requires acc(p.scionLayer.Mem(ubScionL), R19) +// @ requires acc(p.scionLayer.Mem(ub), R19) // @ requires acc(&p.path, R20) -// @ requires p.path === p.scionLayer.GetScionPath(ubScionL) -// @ preserves acc(&p.ingressID, R20) -// @ ensures acc(p.scionLayer.Mem(ubScionL), R19) +// @ requires p.path === p.scionLayer.GetScionPath(ub) +// @ preserves acc(&p.ingressID, R21) +// @ ensures acc(p.scionLayer.Mem(ub), R19) // @ ensures acc(&p.path, R20) -// @ ensures acc(&p.d, R20) +// @ ensures acc(&p.d, R20) && acc(p.d.Mem(), _) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> -// @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ reserr != nil && sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires acc(sl.Bytes(ub, 0, len(ub)), R20) +// @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures acc(sl.Bytes(ub, 0, len(ub)), R20) +// @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) +// @ ensures reserr == nil ==> p.DstIsLocalIngressID(ub) +// @ ensures reserr == nil ==> p.LastHopLen(ub) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) validateSrcDstIA( /*@ ghost ubScionL []byte @*/ ) (respr processResult, reserr error) { - // @ ghost ubPath := p.scionLayer.UBPath(ubScionL) - // @ ghost ubScionPath := p.scionLayer.UBScionPath(ubScionL) - // @ unfold acc(p.scionLayer.Mem(ubScionL), R20) - // @ defer fold acc(p.scionLayer.Mem(ubScionL), R20) - // @ unfold acc(p.scionLayer.HeaderMem(ubScionL[slayers.CmnHdrLen:]), R20) - // @ defer fold acc(p.scionLayer.HeaderMem(ubScionL[slayers.CmnHdrLen:]), R20) - // @ p.d.getLocalIA() +func (p *scionPacketProcessor) validateSrcDstIA( /*@ ghost ub []byte @*/ ) (respr processResult, reserr error) { + // @ ghost ubPath := p.scionLayer.UBPath(ub) + // @ unfold acc(p.scionLayer.Mem(ub), R20) + // @ defer fold acc(p.scionLayer.Mem(ub), R20) // @ ghost if typeOf(p.scionLayer.Path) == *epic.Path { // @ unfold acc(p.scionLayer.Path.Mem(ubPath), R20) // @ defer fold acc(p.scionLayer.Path.Mem(ubPath), R20) // @ } + // @ ghost startScionP := p.scionLayer.PathScionStartIdx(ub) + // @ ghost endScionP := p.scionLayer.PathScionEndIdx(ub) + // @ ghost ubScionPath := p.scionLayer.UBScionPath(ub) + // @ sl.SplitRange_Bytes(ub, startScionP, endScionP, R50) + // @ p.AbsPktToSubSliceAbsPkt(ub, startScionP, endScionP) + // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, startScionP) + // @ ghost defer sl.CombineRange_Bytes(ub, startScionP, endScionP, R50) + // @ unfold acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R20) + // @ defer fold acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R20) + // @ p.d.getLocalIA() srcIsLocal := (p.scionLayer.SrcIA == p.d.localIA) dstIsLocal := (p.scionLayer.DstIA == p.d.localIA) if p.ingressID == 0 { @@ -2036,25 +2219,39 @@ func (p *scionPacketProcessor) validateSrcDstIA( /*@ ghost ubScionL []byte @*/ ) // Note: SCMP error messages triggered by the sibling router may use paths that // don't start with the first hop. if p.path.IsFirstHop( /*@ ubScionPath @*/ ) && !srcIsLocal { - // @ TODO() // depends on packSCMP + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP return p.invalidSrcIA() } if dstIsLocal { - // @ TODO() // depends on packSCMP + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP return p.invalidDstIA() } } else { // Inbound if srcIsLocal { - // @ TODO() // depends on packSCMP + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP return p.invalidSrcIA() } if p.path.IsLastHop( /*@ ubScionPath @*/ ) != dstIsLocal { - // @ TODO() // depends on packSCMP + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP return p.invalidDstIA() } + // @ ghost if(p.path.IsLastHopSpec(ubScionPath)) { + // @ p.path.LastHopLemma(ubScionPath) + // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startScionP) + // @ p.SubSliceAbsPktToAbsPkt(ub, startScionP, endScionP) + // @ } } // @ fold p.d.validResult(processResult{}, false) + + // @ assert (unfolding acc(p.scionLayer.Mem(ub), R55) in + // @ (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in + // @ p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> p.ingressID != 0 + // @ assert (unfolding acc(p.scionLayer.Mem(ub), R55) in + // @ (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in + // @ p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> p.path.IsLastHopSpec(ubScionPath) + // @ assert reveal p.DstIsLocalIngressID(ub) + // @ assert reveal p.LastHopLen(ub) return processResult{}, nil } @@ -2090,24 +2287,15 @@ func (p *scionPacketProcessor) invalidDstIA() (processResult, error) { // @ requires acc(&p.path, R15) // @ requires acc(p.scionLayer.Mem(ub), R4) // @ requires p.path === p.scionLayer.GetScionPath(ub) -// @ requires acc(&p.ingressID, R20) +// @ requires acc(&p.ingressID, R21) // @ requires acc(&p.infoField, R4) && acc(&p.hopField, R4) -// @ requires let ubPath := p.scionLayer.UBPath(ub) in -// @ let ubScionPath := p.scionLayer.UBScionPath(ub) in -// @ unfolding acc(p.scionLayer.Mem(ub), R10) in -// @ unfolding acc(p.scionLayer.Path.Mem(ubPath), R50) in -// @ p.path.GetCurrHF(ubScionPath) <= p.path.GetNumHops(ubScionPath) -// @ requires let ubPath := p.scionLayer.UBPath(ub) in -// @ let ubScionPath := p.scionLayer.UBScionPath(ub) in -// @ unfolding acc(p.scionLayer.Mem(ub), R10) in -// @ unfolding acc(p.scionLayer.Path.Mem(ubPath), R50) in -// @ p.path.GetCurrINF(ubScionPath) <= p.path.GetNumINF(ubScionPath) +// @ requires p.scionLayer.ValidPathMetaData(ub) // @ requires acc(&p.d, R20) && acc(p.d.Mem(), _) // @ requires acc(&p.srcAddr, R20) && acc(p.srcAddr.Mem(), _) -// @ preserves acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R4) +// @ preserves acc(sl.Bytes(ub, 0, len(ub)), R4) // @ ensures acc(&p.path, R15) // @ ensures acc(p.scionLayer.Mem(ub), R4) -// @ ensures acc(&p.ingressID, R20) +// @ ensures acc(&p.ingressID, R21) // @ ensures acc(&p.infoField, R4) && acc(&p.hopField, R4) // @ ensures acc(&p.d, R20) // @ ensures acc(&p.srcAddr, R20) @@ -2144,7 +2332,7 @@ func (p *scionPacketProcessor) validateTransitUnderlaySrc( /*@ ghost ub []byte @ // @ ghost if p.d.internalNextHops != nil { unfold acc(accAddr(p.d.internalNextHops), _) } expectedSrc, ok := p.d.internalNextHops[pktIngressID] // @ ghost if ok { - // @ assert expectedSrc in range(p.d.internalNextHops) + // @ assert expectedSrc in range(p.d.internalNextHops) // @ unfold acc(expectedSrc.Mem(), _) // @ } // @ unfold acc(p.srcAddr.Mem(), _) @@ -2159,30 +2347,60 @@ func (p *scionPacketProcessor) validateTransitUnderlaySrc( /*@ ghost ub []byte @ } // @ requires acc(&p.d, R20) && acc(p.d.Mem(), _) -// @ preserves acc(&p.ingressID, R20) -// @ preserves acc(&p.segmentChange, R20) -// @ preserves acc(&p.infoField, R20) -// @ preserves acc(&p.hopField, R20) -// @ ensures acc(&p.d, R20) +// @ requires acc(&p.segmentChange, R20) +// @ requires acc(&p.ingressID, R21) +// @ requires acc(&p.infoField, R20) +// @ requires acc(&p.hopField, R20) +// @ ensures acc(&p.infoField, R20) +// @ ensures acc(&p.hopField, R20) +// @ ensures acc(&p.ingressID, R21) +// @ ensures acc(&p.segmentChange, R20) +// @ ensures acc(&p.d, R20) && acc(p.d.Mem(), _) // @ ensures p.d.validResult(respr, false) // @ ensures reserr == nil ==> respr === processResult{} -// @ ensures reserr != nil ==> sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ ensures reserr != nil ==> sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() +// @ requires p.d.WellConfigured() +// @ requires p.d.DpAgreesWithSpec(dp) +// @ requires oldPkt.PathNotFullyTraversed() +// @ requires p.EqAbsHopField(oldPkt) +// @ requires p.EqAbsInfoField(oldPkt) +// @ requires p.segmentChange ==> +// @ oldPkt.RightSeg != none[io.IO_seg2] && len(get(oldPkt.RightSeg).Past) > 0 +// @ requires !p.segmentChange ==> +// @ AbsValidateIngressIDConstraint(oldPkt, path.ifsToIO_ifs(p.ingressID)) +// @ requires p.segmentChange ==> +// @ AbsValidateIngressIDConstraintXover(oldPkt, path.ifsToIO_ifs(p.ingressID)) +// @ ensures reserr == nil ==> p.NoBouncingPkt(oldPkt) +// @ ensures reserr == nil && !p.segmentChange ==> +// @ AbsValidateEgressIDConstraint(oldPkt, (p.ingressID != 0), dp) +// @ ensures reserr == nil && p.segmentChange ==> +// @ oldPkt.RightSeg != none[io.IO_seg2] && len(get(oldPkt.RightSeg).Past) > 0 +// @ ensures reserr == nil && p.segmentChange ==> +// @ p.ingressID != 0 && AbsValidateEgressIDConstraintXover(oldPkt, dp) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) validateEgressID() (respr processResult, reserr error) { - pktEgressID := p.egressInterface() +func (p *scionPacketProcessor) validateEgressID( /*@ ghost oldPkt io.IO_pkt2, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { + pktEgressID := p.egressInterface( /*@ oldPkt @*/ ) + // @ reveal AbsEgressInterfaceConstraint(oldPkt, path.ifsToIO_ifs(pktEgressID)) // @ p.d.getInternalNextHops() // @ if p.d.internalNextHops != nil { unfold acc(accAddr(p.d.internalNextHops), _) } _, ih := p.d.internalNextHops[pktEgressID] // @ p.d.getExternalMem() // @ if p.d.external != nil { unfold acc(accBatchConn(p.d.external), _) } _, eh := p.d.external[pktEgressID] - if !ih && !eh { + // egress interface must be a known interface + // packet coming from internal interface, must go to an external interface + // packet coming from external interface can go to either internal or external interface + if !ih && !eh || (p.ingressID == 0) && !eh { errCode := slayers.SCMPCodeUnknownHopFieldEgress if !p.infoField.ConsDir { errCode = slayers.SCMPCodeUnknownHopFieldIngress } - // @ TODO() + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP return p.packSCMP( slayers.SCMPTypeParameterProblem, errCode, @@ -2190,27 +2408,34 @@ func (p *scionPacketProcessor) validateEgressID() (respr processResult, reserr e cannotRoute, ) } - + // @ p.d.getDomExternalLemma() + // @ p.EstablishNoBouncingPkt(oldPkt, pktEgressID) // @ p.d.getLinkTypesMem() ingress, egress := p.d.linkTypes[p.ingressID], p.d.linkTypes[pktEgressID] + // @ p.d.LinkTypesLemma(dp) if !p.segmentChange { // Check that the interface pair is valid within a single segment. // No check required if the packet is received from an internal interface. + // @ assert reveal AbsValidateIngressIDConstraint(oldPkt, path.ifsToIO_ifs(p.ingressID)) switch { case p.ingressID == 0: + // @ assert reveal AbsValidateEgressIDConstraint(oldPkt, (p.ingressID != 0), dp) // @ fold p.d.validResult(respr, false) return processResult{}, nil case ingress == topology.Core && egress == topology.Core: + // @ assert reveal AbsValidateEgressIDConstraint(oldPkt, (p.ingressID != 0), dp) // @ fold p.d.validResult(respr, false) return processResult{}, nil case ingress == topology.Child && egress == topology.Parent: + // @ assert reveal AbsValidateEgressIDConstraint(oldPkt, (p.ingressID != 0), dp) // @ fold p.d.validResult(respr, false) return processResult{}, nil case ingress == topology.Parent && egress == topology.Child: + // @ assert reveal AbsValidateEgressIDConstraint(oldPkt, (p.ingressID != 0), dp) // @ fold p.d.validResult(respr, false) return processResult{}, nil default: // malicious - // @ TODO() + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP return p.packSCMP( slayers.SCMPTypeParameterProblem, slayers.SCMPCodeInvalidPath, // XXX(matzf) new code InvalidHop? @@ -2219,20 +2444,24 @@ func (p *scionPacketProcessor) validateEgressID() (respr processResult, reserr e "egress_id", pktEgressID, "egress_type", egress)) } } + // @ assert reveal AbsValidateIngressIDConstraintXover(oldPkt, path.ifsToIO_ifs(p.ingressID)) // Check that the interface pair is valid on a segment switch. // Having a segment change received from the internal interface is never valid. switch { case ingress == topology.Core && egress == topology.Child: + // @ assert reveal AbsValidateEgressIDConstraintXover(oldPkt, dp) // @ fold p.d.validResult(respr, false) return processResult{}, nil case ingress == topology.Child && egress == topology.Core: + // @ assert reveal AbsValidateEgressIDConstraintXover(oldPkt, dp) // @ fold p.d.validResult(respr, false) return processResult{}, nil case ingress == topology.Child && egress == topology.Child: + // @ assert reveal AbsValidateEgressIDConstraintXover(oldPkt, dp) // @ fold p.d.validResult(respr, false) return processResult{}, nil default: - // @ TODO() + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP return p.packSCMP( slayers.SCMPTypeParameterProblem, slayers.SCMPCodeInvalidSegmentChange, @@ -2242,22 +2471,43 @@ func (p *scionPacketProcessor) validateEgressID() (respr processResult, reserr e } } -// @ preserves acc(&p.infoField) +// @ requires acc(&p.infoField) // @ requires acc(&p.path, R20) // @ requires acc(p.scionLayer.Mem(ub), R19) // @ requires p.path === p.scionLayer.GetScionPath(ub) -// @ preserves acc(&p.ingressID, R20) -// @ preserves acc(&p.hopField, R20) -// @ preserves sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires acc(&p.hopField, R20) +// @ requires sl.Bytes(ub, 0, len(ub)) +// @ requires acc(&p.ingressID, R21) +// preconditions for IO: +// @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires absPkt(ub).PathNotFullyTraversed() +// @ requires acc(&p.d, R55) && acc(p.d.Mem(), _) && acc(&p.ingressID, R55) +// @ requires p.LastHopLen(ub) +// @ requires p.EqAbsHopField(absPkt(ub)) +// @ requires p.EqAbsInfoField(absPkt(ub)) +// @ ensures acc(&p.ingressID, R21) +// @ ensures acc(&p.hopField, R20) +// @ ensures sl.Bytes(ub, 0, len(ub)) +// @ ensures acc(&p.infoField) // @ ensures acc(&p.path, R20) // @ ensures acc(p.scionLayer.Mem(ub), R19) // @ ensures err != nil ==> err.ErrorMem() +// posconditions for IO: +// @ ensures acc(&p.d, R55) && acc(p.d.Mem(), _) && acc(&p.ingressID, R55) +// @ ensures err == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures err == nil ==> absPkt(ub).PathNotFullyTraversed() +// @ ensures err == nil ==> +// @ absPkt(ub) == AbsUpdateNonConsDirIngressSegID(old(absPkt(ub)), path.ifsToIO_ifs(p.ingressID)) +// @ ensures err == nil ==> p.LastHopLen(ub) +// @ ensures err == nil ==> p.EqAbsHopField(absPkt(ub)) +// @ ensures err == nil ==> p.EqAbsInfoField(absPkt(ub)) +// @ ensures err == nil ==> old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub) // @ decreases func (p *scionPacketProcessor) updateNonConsDirIngressSegID( /*@ ghost ub []byte @*/ ) (err error) { // @ ghost ubPath := p.scionLayer.UBPath(ub) // @ ghost ubScionPath := p.scionLayer.UBScionPath(ub) - // @ ghost startScion := p.scionLayer.PathScionStartIdx(ub) - // @ ghost endScion := p.scionLayer.PathScionEndIdx(ub) + // @ ghost startScionP := p.scionLayer.PathScionStartIdx(ub) + // @ ghost endScionP := p.scionLayer.PathScionEndIdx(ub) // @ unfold acc(p.scionLayer.Mem(ub), R20) // @ defer fold acc(p.scionLayer.Mem(ub), R20) @@ -2269,16 +2519,43 @@ func (p *scionPacketProcessor) updateNonConsDirIngressSegID( /*@ ghost ub []byte // means this comes from this AS itself, so nothing has to be done. // TODO(lukedirtwalker): For packets destined to peer links this shouldn't // be updated. + // @ reveal p.EqAbsInfoField(absPkt(ub)) + // @ reveal p.EqAbsHopField(absPkt(ub)) if !p.infoField.ConsDir && p.ingressID != 0 { - p.infoField.UpdateSegID(p.hopField.Mac) + p.infoField.UpdateSegID(p.hopField.Mac /*@, p.hopField.ToIO_HF() @*/) + // @ reveal p.LastHopLen(ub) + // @ assert path.AbsUInfoFromUint16(p.infoField.SegID) == + // @ old(io.upd_uinfo(path.AbsUInfoFromUint16(p.infoField.SegID), p.hopField.ToIO_HF())) // (VerifiedSCION) the following property is guaranteed by the type system, but Gobra cannot infer it yet // @ assume 0 <= p.path.GetCurrINF(ubScionPath) - // @ sl.SplitRange_Bytes(ub, startScion, endScion, writePerm) - // @ ghost defer sl.CombineRange_Bytes(ub, startScion, endScion, writePerm) - if err := p.path.SetInfoField(p.infoField, int( /*@ unfolding acc(p.path.Mem(ubScionPath), R45) in (unfolding acc(p.path.Base.Mem(), R50) in @*/ p.path.PathMeta.CurrINF) /*@ ) , ubScionPath @*/); err != nil { + // @ sl.SplitRange_Bytes(ub, startScionP, endScionP, HalfPerm) + // @ sl.SplitByIndex_Bytes(ub, 0, startScionP, slayers.CmnHdrLen, R54) + // @ sl.Reslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) + // @ p.AbsPktToSubSliceAbsPkt(ub, startScionP, endScionP) + // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, startScionP) + // @ sl.SplitRange_Bytes(ub, startScionP, endScionP, HalfPerm) + if err := p.path.SetInfoField(p.infoField, int( /*@ unfolding acc(p.path.Mem(ubScionPath), R45) in (unfolding acc(p.path.Base.Mem(), R50) in @*/ p.path.PathMeta.CurrINF) /*@ ) , ubScionPath, @*/); err != nil { + // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ sl.CombineAtIndex_Bytes(ub, 0, startScionP, slayers.CmnHdrLen, R54) + // @ ghost sl.CombineRange_Bytes(ub, startScionP, endScionP, writePerm) return serrors.WrapStr("update info field", err) } - } + // @ ghost sl.CombineRange_Bytes(ub, startScionP, endScionP, HalfPerm) + // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) + // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ sl.CombineAtIndex_Bytes(ub, 0, startScionP, slayers.CmnHdrLen, R54) + // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startScionP) + // @ p.SubSliceAbsPktToAbsPkt(ub, startScionP, endScionP) + // @ ghost sl.CombineRange_Bytes(ub, startScionP, endScionP, HalfPerm) + // @ absPktFutureLemma(ub) + // @ assert absPkt(ub).CurrSeg.UInfo == + // @ old(io.upd_uinfo(path.AbsUInfoFromUint16(p.infoField.SegID), p.hopField.ToIO_HF())) + // @ assert reveal p.EqAbsInfoField(absPkt(ub)) + // @ assert reveal p.EqAbsHopField(absPkt(ub)) + // @ assert reveal p.LastHopLen(ub) + } + // @ assert absPkt(ub) == reveal AbsUpdateNonConsDirIngressSegID(old(absPkt(ub)), path.ifsToIO_ifs(p.ingressID)) return nil } @@ -2320,28 +2597,37 @@ func (p *scionPacketProcessor) currentHopPointer( /*@ ghost ubScionL []byte @*/ scion.MetaLen + path.InfoLen*p.path.NumINF + path.HopLen*int(p.path.PathMeta.CurrHF)) } +// @ requires acc(&p.infoField, R20) +// @ requires acc(&p.hopField, R20) // @ preserves acc(&p.mac, R20) && p.mac != nil && p.mac.Mem() -// @ preserves acc(&p.infoField, R20) -// @ preserves acc(&p.hopField, R20) // @ preserves acc(&p.macBuffers.scionInput, R20) -// @ preserves sl.AbsSlice_Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) +// @ preserves sl.Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) // @ preserves acc(&p.cachedMac) // @ preserves acc(&p.d, R50) && acc(p.d.Mem(), _) +// @ ensures acc(&p.infoField, R20) +// @ ensures acc(&p.hopField, R20) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> -// @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ reserr != nil && sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures len(p.cachedMac) == path.MACBufferSize -// @ ensures sl.AbsSlice_Bytes(p.cachedMac, 0, len(p.cachedMac)) +// @ ensures sl.Bytes(p.cachedMac, 0, len(p.cachedMac)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires oldPkt.PathNotFullyTraversed() +// @ requires p.EqAbsHopField(oldPkt) +// @ requires p.EqAbsInfoField(oldPkt) +// @ ensures reserr == nil ==> AbsVerifyCurrentMACConstraint(oldPkt, dp) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) verifyCurrentMAC() (respr processResult, reserr error) { +func (p *scionPacketProcessor) verifyCurrentMAC( /*@ ghost oldPkt io.IO_pkt2, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { fullMac := path.FullMAC(p.mac, p.infoField, p.hopField, p.macBuffers.scionInput) - // @ fold acc(sl.AbsSlice_Bytes(p.hopField.Mac[:path.MacLen], 0, path.MacLen), R20) - // @ defer unfold acc(sl.AbsSlice_Bytes(p.hopField.Mac[:path.MacLen], 0, path.MacLen), R20) - // @ sl.SplitRange_Bytes(fullMac, 0, path.MacLen, R20) - // @ ghost defer sl.CombineRange_Bytes(fullMac, 0, path.MacLen, R20) + // @ fold acc(sl.Bytes(p.hopField.Mac[:path.MacLen], 0, path.MacLen), R21) + // @ defer unfold acc(sl.Bytes(p.hopField.Mac[:path.MacLen], 0, path.MacLen), R21) + // @ sl.SplitRange_Bytes(fullMac, 0, path.MacLen, R21) + // @ ghost defer sl.CombineRange_Bytes(fullMac, 0, path.MacLen, R21) if subtle.ConstantTimeCompare(p.hopField.Mac[:path.MacLen], fullMac[:path.MacLen]) == 0 { - // @ TODO() + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") return p.packSCMP( slayers.SCMPTypeParameterProblem, slayers.SCMPCodeInvalidHopFieldMAC, @@ -2357,7 +2643,13 @@ func (p *scionPacketProcessor) verifyCurrentMAC() (respr processResult, reserr e // Add the full MAC to the SCION packet processor, // such that EPIC does not need to recalculate it. p.cachedMac = fullMac - + // @ reveal p.EqAbsInfoField(oldPkt) + // @ reveal p.EqAbsHopField(oldPkt) + // (VerifiedSCION) Assumptions for Cryptography: + // @ absInf := p.infoField.ToAbsInfoField() + // @ absHF := p.hopField.ToIO_HF() + // @ AssumeForIO(dp.hf_valid(absInf.ConsDir, absInf.AInfo, absInf.UInfo, absHF)) + // @ reveal AbsVerifyCurrentMACConstraint(oldPkt, dp) // @ fold p.d.validResult(processResult{}, false) return processResult{}, nil } @@ -2365,20 +2657,24 @@ func (p *scionPacketProcessor) verifyCurrentMAC() (respr processResult, reserr e // @ requires acc(&p.d, R15) // @ requires acc(p.d.Mem(), _) // @ requires p.d.getValSvc() != nil -// @ requires acc(sl.AbsSlice_Bytes(ubScionL, 0, len(ubScionL)), R15) +// @ requires acc(sl.Bytes(ubScionL, 0, len(ubScionL)), R15) // @ preserves acc(p.scionLayer.Mem(ubScionL), R10) // @ ensures acc(&p.d, R15) // @ ensures p.d.validResult(respr, addrAliasesUb) -// @ ensures !addrAliasesUb ==> acc(sl.AbsSlice_Bytes(ubScionL, 0, len(ubScionL)), R15) +// @ ensures !addrAliasesUb ==> acc(sl.Bytes(ubScionL, 0, len(ubScionL)), R15) // @ ensures !addrAliasesUb && resaddr != nil ==> acc(resaddr.Mem(), _) // @ ensures addrAliasesUb ==> resaddr != nil // @ ensures addrAliasesUb ==> acc(resaddr.Mem(), R15) -// @ ensures addrAliasesUb ==> (acc(resaddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ubScionL, 0, len(ubScionL)), R15)) +// @ ensures addrAliasesUb ==> (acc(resaddr.Mem(), R15) --* acc(sl.Bytes(ubScionL, 0, len(ubScionL)), R15)) // @ ensures respr.OutPkt != nil ==> -// @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ reserr != nil && sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> !addrAliasesUb // @ ensures reserr != nil ==> reserr.ErrorMem() -func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte @*/ ) (resaddr *net.UDPAddr, respr processResult, reserr error /*@ , addrAliasesUb bool @*/) { +// contracts for IO-spec +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported +// @ decreases 0 if sync.IgnoreBlockingForTermination() +func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte @*/ ) (resaddr *net.UDPAddr, respr processResult, reserr error /*@ , ghost addrAliasesUb bool @*/) { // (VerifiedSCION) the parameter used to be p.scionLayer, // instead of &p.scionLayer. a, err /*@ , addrAliases @*/ := p.d.resolveLocalDst(&p.scionLayer /*@, ubScionL @*/) @@ -2386,9 +2682,9 @@ func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte @*/ ) ( switch { case errors.Is(err, noSVCBackend): // @ ghost if addrAliases { - // @ apply acc(a.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ubScionL, 0, len(ubScionL)), R15) + // @ apply acc(a.Mem(), R15) --* acc(sl.Bytes(ubScionL, 0, len(ubScionL)), R15) // @ } - // @ TODO() + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") r, err := p.packSCMP( slayers.SCMPTypeDestinationUnreachable, slayers.SCMPCodeNoRoute, @@ -2400,17 +2696,31 @@ func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte @*/ ) ( } } -// @ requires acc(&p.path, R20) -// @ requires p.scionLayer.Mem(ub) -// @ requires p.path === p.scionLayer.GetScionPath(ub) -// @ preserves acc(&p.infoField) -// @ preserves acc(&p.hopField, R20) -// @ preserves sl.AbsSlice_Bytes(ub, 0, len(ub)) -// @ ensures acc(&p.path, R20) -// @ ensures reserr == nil ==> p.scionLayer.Mem(ub) -// @ ensures reserr == nil ==> p.path == p.scionLayer.GetScionPath(ub) -// @ ensures reserr != nil ==> p.scionLayer.NonInitMem() -// @ ensures reserr != nil ==> reserr.ErrorMem() +// @ requires acc(&p.path, R20) +// @ requires p.scionLayer.Mem(ub) +// @ requires p.path === p.scionLayer.GetScionPath(ub) +// @ requires sl.Bytes(ub, 0, len(ub)) +// @ requires acc(&p.infoField) +// @ requires acc(&p.hopField, R20) +// @ requires !p.GetIsXoverSpec(ub) +// Preconditions for IO: +// @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires absPkt(ub).PathNotFullyTraversed() +// @ requires p.EqAbsHopField(absPkt(ub)) +// @ requires p.EqAbsInfoField(absPkt(ub)) +// @ ensures acc(&p.infoField) +// @ ensures acc(&p.hopField, R20) +// @ ensures sl.Bytes(ub, 0, len(ub)) +// @ ensures acc(&p.path, R20) +// @ ensures reserr == nil ==> p.scionLayer.Mem(ub) +// @ ensures reserr == nil ==> p.path == p.scionLayer.GetScionPath(ub) +// @ ensures reserr != nil ==> p.scionLayer.NonInitMem() +// @ ensures reserr != nil ==> reserr.ErrorMem() +// Postconditions for IO: +// @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures reserr == nil ==> len(absPkt(ub).CurrSeg.Future) >= 0 +// @ ensures reserr == nil ==> absPkt(ub) == AbsProcessEgress(old(absPkt(ub))) +// @ ensures reserr == nil ==> old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub) // @ decreases func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr error) { // @ ghost ubPath := p.scionLayer.UBPath(ub) @@ -2421,19 +2731,36 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr // @ ghost startScionP := p.scionLayer.PathScionStartIdx(ub) // @ ghost endScionP := p.scionLayer.PathScionEndIdx(ub) - // @ unfold p.scionLayer.Mem(ub) + // @ unfold acc(p.scionLayer.Mem(ub), 1-R55) // @ ghost if typeOf(p.scionLayer.Path) == *epic.Path { - // @ unfold p.scionLayer.Path.Mem(ubPath) + // @ unfold acc(p.scionLayer.Path.Mem(ubPath), 1-R55) + // @ } + // @ sl.SplitRange_Bytes(ub, startScionP, endScionP, HalfPerm) + // @ sl.SplitByIndex_Bytes(ub, 0, startScionP, slayers.CmnHdrLen, R54) + // @ sl.Reslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) + // @ p.AbsPktToSubSliceAbsPkt(ub, startScionP, endScionP) + // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, startScionP) + // @ reveal p.EqAbsInfoField(absPkt(ub)) + // @ reveal p.EqAbsHopField(absPkt(ub)) + // @ sl.SplitRange_Bytes(ub, startScionP, endScionP, HalfPerm) + // @ reveal p.scionLayer.ValidHeaderOffset(ub, startScionP) + // @ unfold acc(p.scionLayer.Mem(ub), R55) + // @ ghost if typeOf(p.scionLayer.Path) == *epic.Path { + // @ unfold acc(p.scionLayer.Path.Mem(ubPath), R55) // @ } - // @ sl.SplitRange_Bytes(ub, startScionP, endScionP, writePerm) - // @ ghost defer sl.CombineRange_Bytes(ub, startScionP, endScionP, writePerm) // we are the egress router and if we go in construction direction we // need to update the SegID. if p.infoField.ConsDir { - p.infoField.UpdateSegID(p.hopField.Mac) + p.infoField.UpdateSegID(p.hopField.Mac /*@, p.hopField.ToIO_HF() @*/) + // @ assert path.AbsUInfoFromUint16(p.infoField.SegID) == + // @ old(io.upd_uinfo(path.AbsUInfoFromUint16(p.infoField.SegID), p.hopField.ToIO_HF())) // @ assume 0 <= p.path.GetCurrINF(ubScionPath) if err := p.path.SetInfoField(p.infoField, int( /*@ unfolding acc(p.path.Mem(ubScionPath), R45) in (unfolding acc(p.path.Base.Mem(), R50) in @*/ p.path.PathMeta.CurrINF /*@ ) @*/) /*@ , ubScionPath @*/); err != nil { // TODO parameter problem invalid path + // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ sl.CombineAtIndex_Bytes(ub, 0, startScionP, slayers.CmnHdrLen, R54) + // @ ghost sl.CombineRange_Bytes(ub, startScionP, endScionP, writePerm) // @ p.path.DowngradePerm(ubScionPath) // @ ghost if typeOf(p.scionLayer.Path) == *epic.Path { // @ fold p.scionLayer.Path.NonInitMem() @@ -2445,6 +2772,9 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr } } if err := p.path.IncPath( /*@ ubScionPath @*/ ); err != nil { + // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ sl.CombineAtIndex_Bytes(ub, 0, startScionP, slayers.CmnHdrLen, R54) + // @ ghost sl.CombineRange_Bytes(ub, startScionP, endScionP, writePerm) // @ ghost if typeOf(p.scionLayer.Path) == *epic.Path { // @ fold p.scionLayer.Path.NonInitMem() // @ } @@ -2455,44 +2785,112 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr return serrors.WrapStr("incrementing path", err) } // @ ghost if typeOf(p.scionLayer.Path) == *epic.Path { - // @ fold p.scionLayer.Path.Mem(ubPath) + // @ fold acc(p.scionLayer.Path.Mem(ubPath), R55) // @ } - // @ fold p.scionLayer.Mem(ub) + // @ fold acc(p.scionLayer.Mem(ub), R55) + // @ assert reveal p.scionLayer.ValidHeaderOffset(ub, startScionP) + // @ ghost sl.CombineRange_Bytes(ub, startScionP, endScionP, HalfPerm) + // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) + // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ sl.CombineAtIndex_Bytes(ub, 0, startScionP, slayers.CmnHdrLen, R54) + // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startScionP) + // @ p.SubSliceAbsPktToAbsPkt(ub, startScionP, endScionP) + // @ ghost sl.CombineRange_Bytes(ub, startScionP, endScionP, HalfPerm) + // @ absPktFutureLemma(ub) + // @ assert absPkt(ub) == reveal AbsProcessEgress(old(absPkt(ub))) + // @ ghost if typeOf(p.scionLayer.Path) == *epic.Path { + // @ fold acc(p.scionLayer.Path.Mem(ubPath), 1-R55) + // @ } + // @ fold acc(p.scionLayer.Mem(ub), 1-R55) return nil } -// @ requires acc(&p.path, R20) -// @ requires p.scionLayer.Mem(ub) -// @ requires p.path == p.scionLayer.GetScionPath(ub) -// @ preserves acc(&p.segmentChange) && acc(&p.hopField) && acc(&p.infoField) -// @ preserves sl.AbsSlice_Bytes(ub, 0, len(ub)) -// @ ensures acc(&p.path, R20) -// @ ensures reserr == nil ==> ( -// @ p.scionLayer.Mem(ub) && -// @ p.scionLayer.UBPath(ub) === old(p.scionLayer.UBPath(ub)) && -// @ p.scionLayer.GetScionPath(ub) === old(p.scionLayer.GetScionPath(ub)) && -// @ p.path == p.scionLayer.GetScionPath(ub)) -// @ ensures reserr != nil ==> p.scionLayer.NonInitMem() -// @ ensures p.segmentChange -// @ ensures respr === processResult{} -// @ ensures reserr != nil ==> reserr.ErrorMem() +// @ requires acc(&p.path, R20) +// @ requires p.scionLayer.Mem(ub) +// @ requires p.path == p.scionLayer.GetScionPath(ub) +// @ requires sl.Bytes(ub, 0, len(ub)) +// @ requires acc(&p.segmentChange) +// @ requires acc(&p.hopField) +// @ requires acc(&p.infoField) +// Preconditions for IO: +// @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires p.GetIsXoverSpec(ub) +// @ requires let ubPath := p.scionLayer.UBPath(ub) in +// @ let ubScionPath := p.scionLayer.UBScionPath(ub) in +// @ (unfolding acc(p.scionLayer.Mem(ub), _) in +// @ typeOf(p.scionLayer.Path) == *epic.Path ? +// @ (unfolding acc(p.scionLayer.Path.Mem(ubPath), _) in +// @ p.path.GetBase(ubScionPath)) == currBase : +// @ p.path.GetBase(ubScionPath) == currBase) +// @ requires currBase.Valid() +// @ ensures acc(&p.segmentChange) +// @ ensures acc(&p.hopField) +// @ ensures acc(&p.infoField) +// @ ensures sl.Bytes(ub, 0, len(ub)) +// @ ensures acc(&p.path, R20) +// @ ensures reserr == nil ==> p.scionLayer.Mem(ub) +// @ ensures reserr == nil ==> p.scionLayer.UBPath(ub) === old(p.scionLayer.UBPath(ub)) +// @ ensures reserr == nil ==> p.scionLayer.GetScionPath(ub) == old(p.scionLayer.GetScionPath(ub)) +// @ ensures reserr != nil ==> p.scionLayer.NonInitMem() +// @ ensures p.segmentChange +// @ ensures respr === processResult{} +// @ ensures reserr != nil ==> reserr.ErrorMem() +// Postconditions for IO: +// @ ensures reserr == nil ==> len(old(absPkt(ub)).CurrSeg.Future) == 1 +// @ ensures reserr == nil ==> old(absPkt(ub)).LeftSeg != none[io.IO_seg2] +// @ ensures reserr == nil ==> len(get(old(absPkt(ub)).LeftSeg).Future) > 0 +// @ ensures reserr == nil ==> len(get(old(absPkt(ub)).LeftSeg).History) == 0 +// @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures reserr == nil ==> absPkt(ub).PathNotFullyTraversed() +// @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) +// @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(ub)) +// @ ensures reserr == nil ==> absPkt(ub) == AbsDoXover(old(absPkt(ub))) +// @ ensures reserr == nil ==> old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub) +// @ ensures reserr == nil ==> +// @ let ubPath := p.scionLayer.UBPath(ub) in +// @ let ubScionPath := p.scionLayer.UBScionPath(ub) in +// @ (unfolding acc(p.scionLayer.Mem(ub), _) in +// @ p.path == p.scionLayer.GetScionPath(ub) && +// @ (typeOf(p.scionLayer.Path) == *epic.Path ? +// @ (unfolding acc(p.scionLayer.Path.Mem(ubPath), _) in +// @ p.path.GetBase(ubScionPath)) == currBase.IncPathSpec() : +// @ p.path.GetBase(ubScionPath) == currBase.IncPathSpec()) && +// @ currBase.IncPathSpec().Valid()) // @ decreases -func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte @*/ ) (respr processResult, reserr error) { +func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte, ghost currBase scion.Base @*/ ) (respr processResult, reserr error) { p.segmentChange = true - // @ ghost startP := p.scionLayer.PathStartIdx(ub) - // @ ghost endP := p.scionLayer.PathEndIdx(ub) + // @ ghost startP := p.scionLayer.PathStartIdx(ub) + // @ ghost endP := p.scionLayer.PathEndIdx(ub) + // @ ghost ubPath := ub[startP:endP] // @ ghost startScionP := p.scionLayer.PathScionStartIdx(ub) // @ ghost endScionP := p.scionLayer.PathScionEndIdx(ub) - // @ unfold p.scionLayer.Mem(ub) - // @ ghost ubPath := ub[startP:endP] + + // @ unfold acc(p.scionLayer.Mem(ub), 1-R55) // @ ghost if typeOf(p.scionLayer.Path) == *epic.Path { - // @ unfold p.scionLayer.Path.Mem(ubPath) + // @ unfold acc(p.scionLayer.Path.Mem(ubPath), 1-R55) // @ } // @ ghost ubScionPath := ub[startScionP:endScionP] - // @ sl.SplitRange_Bytes(ub, startScionP, endScionP, writePerm) - // @ ghost defer sl.CombineRange_Bytes(ub, startScionP, endScionP, writePerm) + + // @ sl.SplitRange_Bytes(ub, startScionP, endScionP, HalfPerm) + // @ sl.SplitByIndex_Bytes(ub, 0, startScionP, slayers.CmnHdrLen, R54) + // @ sl.Reslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) + // @ p.AbsPktToSubSliceAbsPkt(ub, startScionP, endScionP) + // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, startScionP) + // @ p.path.XoverLemma(ubScionPath) + // @ reveal p.EqAbsInfoField(absPkt(ub)) + // @ reveal p.EqAbsHopField(absPkt(ub)) + // @ sl.SplitRange_Bytes(ub, startScionP, endScionP, HalfPerm) + // @ reveal p.scionLayer.ValidHeaderOffset(ub, startScionP) + // @ unfold acc(p.scionLayer.Mem(ub), R55) + // @ ghost if typeOf(p.scionLayer.Path) == *epic.Path { + // @ unfold acc(p.scionLayer.Path.Mem(ubPath), R55) + // @ } if err := p.path.IncPath( /*@ ubScionPath @*/ ); err != nil { // TODO parameter problem invalid path + // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ sl.CombineAtIndex_Bytes(ub, 0, startScionP, slayers.CmnHdrLen, R54) + // @ ghost sl.CombineRange_Bytes(ub, startScionP, endScionP, writePerm) // @ ghost if typeOf(p.scionLayer.Path) == *epic.Path { // @ fold p.scionLayer.Path.NonInitMem() // @ } @@ -2501,29 +2899,62 @@ func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte @*/ ) (respr process // @ fold p.scionLayer.NonInitMem() return processResult{}, serrors.WrapStr("incrementing path", err) } + // @ ghost if typeOf(p.scionLayer.Path) == *epic.Path { + // @ fold acc(p.scionLayer.Path.Mem(ubPath), R55) + // @ } + // @ fold acc(p.scionLayer.Mem(ub), R55) + // @ assert reveal p.scionLayer.ValidHeaderOffset(ub, startScionP) + // @ ghost sl.CombineRange_Bytes(ub, startScionP, endScionP, HalfPerm) + // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) + // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ sl.CombineAtIndex_Bytes(ub, 0, startScionP, slayers.CmnHdrLen, R54) + // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startScionP) + // @ p.SubSliceAbsPktToAbsPkt(ub, startScionP, endScionP) + // @ assert len(get(old(absPkt(ub)).LeftSeg).Future) > 0 + // @ assert len(get(old(absPkt(ub)).LeftSeg).History) == 0 + // @ assert slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) + // @ assert absPkt(ub) == reveal AbsDoXover(old(absPkt(ub))) var err error - if p.hopField, err = p.path.GetCurrentHopField( /*@ ubScionPath @*/ ); err != nil { + // (VerifiedSCION) Due to an incompleteness (https://github.com/viperproject/gobra/issues/770), + // we introduce a temporary variable to be able to call `path.AbsMacArrayCongruence()`. + var tmpHopField path.HopField + if tmpHopField, err = p.path.GetCurrentHopField( /*@ ubScionPath @*/ ); err != nil { + // @ ghost sl.CombineRange_Bytes(ub, startScionP, endScionP, writePerm) // @ ghost if typeOf(p.scionLayer.Path) == *epic.Path { - // @ fold p.scionLayer.Path.Mem(ubPath) + // @ fold acc(p.scionLayer.Path.Mem(ubPath), 1-R55) // @ } - // @ fold p.scionLayer.Mem(ub) + // @ fold acc(p.scionLayer.Mem(ub), 1-R55) // @ p.scionLayer.DowngradePerm(ub) // TODO parameter problem invalid path return processResult{}, err } + p.hopField = tmpHopField + // @ path.AbsMacArrayCongruence(p.hopField.Mac, tmpHopField.Mac) + // @ assert p.hopField.ToIO_HF() == tmpHopField.ToIO_HF() + // @ assert reveal p.path.CorrectlyDecodedHf(ubScionPath, tmpHopField) + // @ assert reveal p.path.CorrectlyDecodedHf(ubScionPath, p.hopField) if p.infoField, err = p.path.GetCurrentInfoField( /*@ ubScionPath @*/ ); err != nil { + // @ ghost sl.CombineRange_Bytes(ub, startScionP, endScionP, writePerm) // @ ghost if typeOf(p.scionLayer.Path) == *epic.Path { - // @ fold p.scionLayer.Path.Mem(ubPath) + // @ fold acc(p.scionLayer.Path.Mem(ubPath), 1-R55) // @ } - // @ fold p.scionLayer.Mem(ub) + // @ fold acc(p.scionLayer.Mem(ub), 1-R55) // @ p.scionLayer.DowngradePerm(ub) // TODO parameter problem invalid path return processResult{}, err } // @ ghost if typeOf(p.scionLayer.Path) == *epic.Path { - // @ fold p.scionLayer.Path.Mem(ubPath) + // @ fold acc(p.scionLayer.Path.Mem(ubPath), 1-R55) // @ } - // @ fold p.scionLayer.Mem(ub) + // @ ghost sl.CombineRange_Bytes(ub, startScionP, endScionP, HalfPerm) + // @ p.SubSliceAbsPktToAbsPkt(ub, startScionP, endScionP) + // @ absPktFutureLemma(ub) + // @ p.path.DecodingLemma(ubScionPath, p.infoField, p.hopField) + // @ assert reveal p.path.EqAbsInfoField(p.path.absPkt(ubScionPath), p.infoField.ToAbsInfoField()) + // @ assert reveal p.path.EqAbsHopField(p.path.absPkt(ubScionPath), p.hopField.ToIO_HF()) + // @ assert reveal p.EqAbsHopField(absPkt(ub)) + // @ assert reveal p.EqAbsInfoField(absPkt(ub)) + // @ fold acc(p.scionLayer.Mem(ub), 1-R55) return processResult{}, nil } @@ -2532,7 +2963,7 @@ func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte @*/ ) (respr process // @ requires acc(&p.infoField, R5) && acc(&p.hopField, R5) // @ requires p.path.GetCurrINF(ubPath) <= p.path.GetNumINF(ubPath) // @ requires p.path.GetCurrHF(ubPath) <= p.path.GetNumHops(ubPath) -// @ preserves acc(sl.AbsSlice_Bytes(ubPath, 0, len(ubPath)), R5) +// @ preserves acc(sl.Bytes(ubPath, 0, len(ubPath)), R5) // @ ensures acc(&p.path, R20) // @ ensures acc(p.path.Mem(ubPath), R5) // @ ensures acc(&p.infoField, R5) && acc(&p.hopField, R5) @@ -2557,27 +2988,49 @@ func (p *scionPacketProcessor) ingressInterface( /*@ ghost ubPath []byte @*/ ) u return hop.ConsEgress } -// @ preserves acc(&p.infoField, R20) -// @ preserves acc(&p.hopField, R20) +// @ requires acc(&p.infoField, R21) +// @ requires acc(&p.hopField, R21) +// @ ensures acc(&p.infoField, R21) +// @ ensures acc(&p.hopField, R21) +// contracts for IO-spec +// @ requires oldPkt.PathNotFullyTraversed() +// @ requires p.EqAbsInfoField(oldPkt) +// @ requires p.EqAbsHopField(oldPkt) +// @ ensures p.EqAbsInfoField(oldPkt) +// @ ensures p.EqAbsHopField(oldPkt) +// @ ensures AbsEgressInterfaceConstraint(oldPkt, path.ifsToIO_ifs(egress)) // @ decreases -func (p *scionPacketProcessor) egressInterface() uint16 { +func (p *scionPacketProcessor) egressInterface( /*@ ghost oldPkt io.IO_pkt2 @*/ ) (egress uint16) { + // @ reveal p.EqAbsInfoField(oldPkt) + // @ reveal p.EqAbsHopField(oldPkt) if p.infoField.ConsDir { + // @ assert reveal AbsEgressInterfaceConstraint(oldPkt, path.ifsToIO_ifs(p.hopField.ConsEgress)) return p.hopField.ConsEgress } + // @ assert reveal AbsEgressInterfaceConstraint(oldPkt, path.ifsToIO_ifs(p.hopField.ConsIngress)) return p.hopField.ConsIngress } // @ requires acc(&p.d, R20) && acc(p.d.Mem(), _) -// @ preserves acc(&p.infoField, R20) -// @ preserves acc(&p.hopField, R20) -// @ preserves acc(&p.ingressID, R20) +// @ requires acc(&p.infoField, R20) +// @ requires acc(&p.hopField, R20) +// @ preserves acc(&p.ingressID, R21) +// @ ensures acc(&p.infoField, R20) +// @ ensures acc(&p.hopField, R20) // @ ensures acc(&p.d, R20) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> -// @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ reserr != nil && sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() -func (p *scionPacketProcessor) validateEgressUp() (respr processResult, reserr error) { - egressID := p.egressInterface() +// contracts for IO-spec +// @ requires oldPkt.PathNotFullyTraversed() +// @ requires p.EqAbsInfoField(oldPkt) +// @ requires p.EqAbsHopField(oldPkt) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported +// @ decreases 0 if sync.IgnoreBlockingForTermination() +func (p *scionPacketProcessor) validateEgressUp( /*@ ghost oldPkt io.IO_pkt2 @*/ ) (respr processResult, reserr error) { + egressID := p.egressInterface( /*@ oldPkt @ */ ) // @ p.d.getBfdSessionsMem() // @ ghost if p.d.bfdSessions != nil { unfold acc(accBfdSession(p.d.bfdSessions), _) } if v, ok := p.d.bfdSessions[egressID]; ok { @@ -2598,7 +3051,7 @@ func (p *scionPacketProcessor) validateEgressUp() (respr processResult, reserr e Egress: uint64(egressID), } } - // @ TODO() + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP return p.packSCMP(typ, 0, scmpP, serrors.New("bfd session down")) } } @@ -2611,25 +3064,46 @@ func (p *scionPacketProcessor) validateEgressUp() (respr processResult, reserr e // @ requires acc(p.scionLayer.Mem(ub), R10) // @ requires p.path === p.scionLayer.GetScionPath(ub) // @ requires acc(&p.d, R20) && acc(p.d.Mem(), _) -// @ preserves sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires sl.Bytes(ub, 0, len(ub)) +// @ requires acc(&p.ingressID, R21) +// @ requires acc(&p.hopField) // @ preserves acc(&p.lastLayer, R19) // @ preserves p.lastLayer != nil // @ preserves (&p.scionLayer !== p.lastLayer && llIsNil) ==> // @ acc(p.lastLayer.Mem(nil), R15) // @ preserves (&p.scionLayer !== p.lastLayer && !llIsNil) ==> // @ acc(p.lastLayer.Mem(ub[startLL:endLL]), R15) -// @ preserves acc(&p.ingressID, R20) // @ preserves acc(&p.infoField, R20) -// @ preserves acc(&p.hopField) +// @ ensures acc(&p.hopField) +// @ ensures acc(&p.ingressID, R21) +// @ ensures sl.Bytes(ub, 0, len(ub)) // @ ensures acc(&p.path, R20) // @ ensures acc(p.scionLayer.Mem(ub), R10) -// @ ensures acc(&p.d, R20) +// @ ensures acc(&p.d, R20) && acc(p.d.Mem(), _) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> -// @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ reserr != nil && sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// constracts for IO-spec +// @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires p.DstIsLocalIngressID(ub) +// @ requires p.LastHopLen(ub) +// @ requires absPkt(ub).PathNotFullyTraversed() +// @ requires p.EqAbsHopField(absPkt(ub)) +// @ ensures reserr == nil ==> p.DstIsLocalIngressID(ub) +// @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures reserr == nil ==> p.LastHopLen(ub) +// @ ensures reserr == nil ==> absPkt(ub).PathNotFullyTraversed() +// @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) +// @ ensures reserr == nil ==> absPkt(ub) == old(absPkt(ub)) +// @ ensures reserr == nil ==> old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int @*/ ) (respr processResult, reserr error) { + // @ reveal p.EqAbsHopField(absPkt(ub)) + // @ assert let fut := absPkt(ub).CurrSeg.Future in + // @ fut == seq[io.IO_HF]{p.hopField.ToIO_HF()} ++ fut[1:] // @ ghost ubPath := p.scionLayer.UBPath(ub) // @ ghost startP := p.scionLayer.PathStartIdx(ub) // @ ghost endP := p.scionLayer.PathEndIdx(ub) @@ -2655,27 +3129,45 @@ func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, gh // @ } // (VerifiedSCION) the following is guaranteed by the type system, but Gobra cannot prove it yet // @ assume 0 <= p.path.GetCurrHF(ubScionPath) - // @ sl.SplitRange_Bytes(ub, startScionP, endScionP, writePerm) + // @ reveal p.LastHopLen(ub) + // @ sl.SplitRange_Bytes(ub, startScionP, endScionP, HalfPerm) + // @ sl.SplitByIndex_Bytes(ub, 0, startScionP, slayers.CmnHdrLen, R54) + // @ sl.Reslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) + // @ p.AbsPktToSubSliceAbsPkt(ub, startScionP, endScionP) + // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, startScionP) + // @ sl.SplitRange_Bytes(ub, startScionP, endScionP, HalfPerm) if err := p.path.SetHopField(p.hopField, int( /*@ unfolding acc(p.path.Mem(ubScionPath), R50) in (unfolding acc(p.path.Base.Mem(), R55) in @*/ p.path.PathMeta.CurrHF /*@ ) @*/) /*@ , ubScionPath @*/); err != nil { + // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ sl.CombineAtIndex_Bytes(ub, 0, startScionP, slayers.CmnHdrLen, R54) // @ sl.CombineRange_Bytes(ub, startScionP, endScionP, writePerm) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("update hop field", err) } - // @ sl.CombineRange_Bytes(ub, startScionP, endScionP, writePerm) - /*@ - ghost var ubLL []byte - ghost if &p.scionLayer === p.lastLayer { - ubLL = ub - } else if llIsNil { - ubLL = nil - sl.NilAcc_Bytes() - } else { - ubLL = ub[startLL:endLL] - sl.SplitRange_Bytes(ub, startLL, endLL, writePerm) - ghost defer sl.CombineRange_Bytes(ub, startLL, endLL, writePerm) - } - @*/ - return p.handleSCMPTraceRouteRequest(p.ingressID /*@ , ubLL @*/) + // @ sl.CombineRange_Bytes(ub, startScionP, endScionP, HalfPerm) + // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) + // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ sl.CombineAtIndex_Bytes(ub, 0, startScionP, slayers.CmnHdrLen, R54) + // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startScionP) + // @ p.SubSliceAbsPktToAbsPkt(ub, startScionP, endScionP) + // @ absPktFutureLemma(ub) + // @ assert reveal p.EqAbsHopField(absPkt(ub)) + // @ assert reveal p.LastHopLen(ub) + // @ assert p.scionLayer.EqAbsHeader(ub) + // @ sl.CombineRange_Bytes(ub, startScionP, endScionP, HalfPerm) + + // @ ghost var ubLL []byte + // @ ghost if &p.scionLayer === p.lastLayer { + // @ ubLL = ub + // @ } else if llIsNil { + // @ ubLL = nil + // @ sl.NilAcc_Bytes() + // @ } else { + // @ ubLL = ub[startLL:endLL] + // @ sl.SplitRange_Bytes(ub, startLL, endLL, R1) + // @ ghost defer sl.CombineRange_Bytes(ub, startLL, endLL, R1) + // @ } + return p.handleSCMPTraceRouteRequest(p.ingressID /*@, ubLL @*/) } // @ preserves acc(&p.infoField, R20) @@ -2690,39 +3182,58 @@ func (p *scionPacketProcessor) ingressRouterAlertFlag() (res *bool) { // @ requires 0 <= startLL && startLL <= endLL && endLL <= len(ub) // @ requires acc(&p.path, R20) -// @ requires acc(p.scionLayer.Mem(ub), R14) +// @ requires acc(p.scionLayer.Mem(ub), R13) // @ requires p.path === p.scionLayer.GetScionPath(ub) // @ requires acc(&p.d, R20) && acc(p.d.Mem(), _) -// @ preserves sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires sl.Bytes(ub, 0, len(ub)) +// @ requires acc(&p.infoField, R20) +// @ requires acc(&p.hopField) // @ preserves acc(&p.lastLayer, R19) // @ preserves p.lastLayer != nil // @ preserves (&p.scionLayer !== p.lastLayer && llIsNil) ==> // @ acc(p.lastLayer.Mem(nil), R15) // @ preserves (&p.scionLayer !== p.lastLayer && !llIsNil) ==> // @ acc(p.lastLayer.Mem(ub[startLL:endLL]), R15) -// @ preserves acc(&p.ingressID, R20) -// @ preserves acc(&p.infoField, R20) -// @ preserves acc(&p.hopField) +// @ preserves acc(&p.ingressID, R21) +// @ ensures acc(&p.infoField, R20) +// @ ensures acc(&p.hopField) +// @ ensures sl.Bytes(ub, 0, len(ub)) // @ ensures acc(&p.path, R20) -// @ ensures acc(p.scionLayer.Mem(ub), R14) +// @ ensures acc(p.scionLayer.Mem(ub), R13) // @ ensures acc(&p.d, R20) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> -// @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ reserr != nil && sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// constracts for IO-spec +// @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires absPkt(ub).PathNotFullyTraversed() +// @ requires p.EqAbsHopField(absPkt(ub)) +// @ requires p.EqAbsInfoField(absPkt(ub)) +// @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures reserr == nil ==> absPkt(ub).PathNotFullyTraversed() +// @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) +// @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(ub)) +// @ ensures reserr == nil ==> absPkt(ub) == old(absPkt(ub)) +// @ ensures reserr == nil ==> old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int @*/ ) (respr processResult, reserr error) { + // @ reveal p.EqAbsHopField(absPkt(ub)) + // @ assert let fut := absPkt(ub).CurrSeg.Future in + // @ fut == seq[io.IO_HF]{p.hopField.ToIO_HF()} ++ fut[1:] // @ ghost ubPath := p.scionLayer.UBPath(ub) // @ ghost ubScionPath := p.scionLayer.UBScionPath(ub) - // @ ghost startScion := p.scionLayer.PathScionStartIdx(ub) - // @ ghost endScion := p.scionLayer.PathScionEndIdx(ub) + // @ ghost startScionP := p.scionLayer.PathScionStartIdx(ub) + // @ ghost endScionP := p.scionLayer.PathScionEndIdx(ub) alert := p.egressRouterAlertFlag() if !*alert { // @ fold p.d.validResult(processResult{}, false) return processResult{}, nil } - egressID := p.egressInterface() + egressID := p.egressInterface( /*@ absPkt(ub) @*/ ) // @ p.d.getExternalMem() // @ if p.d.external != nil { unfold acc(accBatchConn(p.d.external), _) } if _, ok := p.d.external[egressID]; !ok { @@ -2739,30 +3250,46 @@ func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, gho // (VerifiedSCION) the following is guaranteed by the type system, // but Gobra cannot prove it yet // @ assume 0 <= p.path.GetCurrHF(ubScionPath) - // @ sl.SplitRange_Bytes(ub, startScion, endScion, writePerm) + // @ sl.SplitRange_Bytes(ub, startScionP, endScionP, HalfPerm) + // @ sl.SplitByIndex_Bytes(ub, 0, startScionP, slayers.CmnHdrLen, R54) + // @ sl.Reslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) + // @ p.AbsPktToSubSliceAbsPkt(ub, startScionP, endScionP) + // @ p.scionLayer.ValidHeaderOffsetToSubSliceLemma(ub, startScionP) + // @ sl.SplitRange_Bytes(ub, startScionP, endScionP, HalfPerm) if err := p.path.SetHopField(p.hopField, int( /*@ unfolding acc(p.path.Mem(ubScionPath), R50) in (unfolding acc(p.path.Base.Mem(), R55) in @*/ p.path.PathMeta.CurrHF /*@ ) @*/) /*@ , ubScionPath @*/); err != nil { - // @ sl.CombineRange_Bytes(ub, startScion, endScion, writePerm) + // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ sl.CombineAtIndex_Bytes(ub, 0, startScionP, slayers.CmnHdrLen, R54) + // @ sl.CombineRange_Bytes(ub, startScionP, endScionP, writePerm) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("update hop field", err) } - // @ sl.CombineRange_Bytes(ub, startScion, endScion, writePerm) - /*@ - ghost var ubLL []byte - ghost if &p.scionLayer === p.lastLayer { - ubLL = ub - } else if llIsNil { - ubLL = nil - sl.NilAcc_Bytes() - } else { - ubLL = ub[startLL:endLL] - sl.SplitRange_Bytes(ub, startLL, endLL, writePerm) - ghost defer sl.CombineRange_Bytes(ub, startLL, endLL, writePerm) - } - @*/ - return p.handleSCMPTraceRouteRequest(egressID /*@ , ubLL @*/) + // @ sl.CombineRange_Bytes(ub, startScionP, endScionP, HalfPerm) + // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) + // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) + // @ sl.CombineAtIndex_Bytes(ub, 0, startScionP, slayers.CmnHdrLen, R54) + // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startScionP) + // @ p.SubSliceAbsPktToAbsPkt(ub, startScionP, endScionP) + // @ absPktFutureLemma(ub) + // @ assert reveal p.EqAbsHopField(absPkt(ub)) + // @ assert reveal p.EqAbsInfoField(absPkt(ub)) + // @ sl.CombineRange_Bytes(ub, startScionP, endScionP, HalfPerm) + + // @ ghost var ubLL []byte + // @ ghost if &p.scionLayer === p.lastLayer { + // @ ubLL = ub + // @ } else if llIsNil { + // @ ubLL = nil + // @ sl.NilAcc_Bytes() + // @ } else { + // @ ubLL = ub[startLL:endLL] + // @ sl.SplitRange_Bytes(ub, startLL, endLL, R1) + // @ ghost defer sl.CombineRange_Bytes(ub, startLL, endLL, R1) + // @ } + return p.handleSCMPTraceRouteRequest(egressID /*@, ubLL@*/) } -// @ preserves acc(&p.infoField, R20) +// @ preserves acc(&p.infoField, R21) // @ ensures res == &p.hopField.IngressRouterAlert || res == &p.hopField.EgressRouterAlert // @ decreases func (p *scionPacketProcessor) egressRouterAlertFlag() (res *bool) { @@ -2774,15 +3301,18 @@ func (p *scionPacketProcessor) egressRouterAlertFlag() (res *bool) { // @ requires acc(&p.lastLayer, R20) // @ requires p.lastLayer != nil && acc(p.lastLayer.Mem(ubLastLayer), R15) -// @ requires acc(&p.d, R20) && acc(p.d.Mem(), _) -// @ preserves sl.AbsSlice_Bytes(ubLastLayer, 0, len(ubLastLayer)) +// @ requires acc(&p.d, R21) && acc(p.d.Mem(), _) +// @ preserves acc(sl.Bytes(ubLastLayer, 0, len(ubLastLayer)), R1) // @ ensures acc(&p.lastLayer, R20) // @ ensures acc(p.lastLayer.Mem(ubLastLayer), R15) -// @ ensures acc(&p.d, R20) +// @ ensures acc(&p.d, R21) && acc(p.d.Mem(), _) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> -// @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ reserr != nil && sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases func (p *scionPacketProcessor) handleSCMPTraceRouteRequest( interfaceID uint16 /*@ , ghost ubLastLayer []byte @*/) (respr processResult, reserr error) { @@ -2795,8 +3325,8 @@ func (p *scionPacketProcessor) handleSCMPTraceRouteRequest( scionPld /*@ , start, end @*/ := p.lastLayer.LayerPayload( /*@ ubLastLayer @*/ ) // @ assert scionPld === ubLastLayer[start:end] || scionPld == nil // @ if scionPld == nil { sl.NilAcc_Bytes() } else { - // @ sl.SplitRange_Bytes(ubLastLayer, start, end, writePerm) - // @ ghost defer sl.CombineRange_Bytes(ubLastLayer, start, end, writePerm) + // @ sl.SplitRange_Bytes(ubLastLayer, start, end, R1) + // @ ghost defer sl.CombineRange_Bytes(ubLastLayer, start, end, R1) // @ } // @ gopacket.AssertInvariantNilDecodeFeedback() var scmpH /*@@@*/ slayers.SCMP @@ -2816,8 +3346,8 @@ func (p *scionPacketProcessor) handleSCMPTraceRouteRequest( // @ fold scmpP.NonInitMem() // @ unfold scmpH.Mem(scionPld) // @ unfold scmpH.BaseLayer.Mem(scionPld, 4) - // @ sl.SplitRange_Bytes(scionPld, 4, len(scionPld), writePerm) - // @ ghost defer sl.CombineRange_Bytes(scionPld, 4, len(scionPld), writePerm) + // @ sl.SplitRange_Bytes(scionPld, 4, len(scionPld), R1) + // @ ghost defer sl.CombineRange_Bytes(scionPld, 4, len(scionPld), R1) if err := scmpP.DecodeFromBytes(scmpH.Payload, gopacket.NilDecodeFeedback); err != nil { log.Debug("Parsing SCMPTraceroute", "err", err) // @ fold p.d.validResult(processResult{}, false) @@ -2832,16 +3362,21 @@ func (p *scionPacketProcessor) handleSCMPTraceRouteRequest( IA: p.d.localIA, Interface: uint64(interfaceID), } - // @ TODO() + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP return p.packSCMP(slayers.SCMPTypeTracerouteReply, 0, &scmpP, nil) } // @ preserves acc(p.scionLayer.Mem(ubScionL), R20) // @ preserves acc(&p.d, R50) && acc(p.d.Mem(), _) // @ ensures p.d.validResult(respr, false) -// @ ensures respr.OutPkt != nil ==> reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) -// @ ensures reserr == nil ==> int(p.scionLayer.GetPayloadLen(ubScionL)) == len(p.scionLayer.GetPayload(ubScionL)) +// @ ensures respr.OutPkt != nil ==> +// @ reserr != nil && sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ ensures reserr == nil ==> +// @ int(p.scionLayer.GetPayloadLen(ubScionL)) == len(p.scionLayer.GetPayload(ubScionL)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) (respr processResult, reserr error) { // @ unfold acc(p.scionLayer.Mem(ubScionL), R20) @@ -2850,7 +3385,7 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) ( // @ fold p.d.validResult(processResult{}, false) return processResult{}, nil } - // @ TODO() + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP return p.packSCMP( slayers.SCMPTypeParameterProblem, slayers.SCMPCodeInvalidPacketSize, @@ -2869,7 +3404,9 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) ( // @ requires acc(&p.path, R10) // @ requires p.scionLayer.Mem(ub) // @ requires p.path == p.scionLayer.GetScionPath(ub) -// @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires sl.Bytes(ub, 0, len(ub)) +// @ requires acc(&p.ingressID, R20) +// @ requires acc(&p.segmentChange) && !p.segmentChange // @ preserves acc(&p.srcAddr, R10) && acc(p.srcAddr.Mem(), _) // @ preserves acc(&p.lastLayer, R10) // @ preserves p.lastLayer != nil @@ -2877,85 +3414,123 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) ( // @ acc(p.lastLayer.Mem(nil), R10) // @ preserves (p.lastLayer !== &p.scionLayer && !llIsNil) ==> // @ acc(p.lastLayer.Mem(ub[startLL:endLL]), R10) -// @ preserves acc(&p.ingressID, R20) // @ preserves acc(&p.infoField) // @ preserves acc(&p.hopField) -// @ preserves acc(&p.segmentChange) // @ preserves acc(&p.mac, R10) && p.mac != nil && p.mac.Mem() // @ preserves acc(&p.macBuffers.scionInput, R10) -// @ preserves sl.AbsSlice_Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) +// @ preserves sl.Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) // @ preserves acc(&p.cachedMac) +// @ ensures acc(&p.segmentChange) +// @ ensures acc(&p.ingressID, R20) // @ ensures acc(&p.d, R5) // @ ensures acc(&p.path, R10) // @ ensures acc(&p.rawPkt, R1) -// @ ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), 1 - R15) +// @ ensures acc(sl.Bytes(ub, 0, len(ub)), 1 - R15) // @ ensures p.d.validResult(respr, addrAliasesPkt) // @ ensures addrAliasesPkt ==> ( // @ respr.OutAddr != nil && -// @ (acc(respr.OutAddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R15))) -// @ ensures !addrAliasesPkt ==> acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R15) +// @ (acc(respr.OutAddr.Mem(), R15) --* acc(sl.Bytes(ub, 0, len(ub)), R15))) +// @ ensures !addrAliasesPkt ==> acc(sl.Bytes(ub, 0, len(ub)), R15) // @ ensures respr.OutPkt !== ub && respr.OutPkt != nil ==> -// @ sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr == nil ==> p.scionLayer.Mem(ub) // @ ensures reserr == nil ==> p.path == p.scionLayer.GetScionPath(ub) // @ ensures reserr != nil ==> p.scionLayer.NonInitMem() // @ ensures reserr != nil ==> reserr.ErrorMem() -func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int @*/ ) (respr processResult, reserr error /*@, addrAliasesPkt bool @*/) { +// contracts for IO-spec +// @ requires p.d.DpAgreesWithSpec(dp) +// @ requires dp.Valid() +// @ requires p.scionLayer.EqAbsHeader(ub) && p.scionLayer.EqPathType(ub) && p.scionLayer.ValidScionInitSpec(ub) +// @ requires acc(ioLock.LockP(), _) +// @ requires ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> +// @ requires let absPkt := absIO_val(ub, p.ingressID) in +// @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, path.ifsToIO_ifs(p.ingressID), absPkt.IO_val_Pkt2_2) +// @ ensures reserr == nil && newAbsPkt.isIO_val_Pkt2 ==> +// @ ElemWitness(ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2) +// @ ensures respr.OutPkt != nil ==> +// @ newAbsPkt == absIO_val(respr.OutPkt, respr.EgressID) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ newAbsPkt.isIO_val_Unsupported +// @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) +// @ decreases 0 if sync.IgnoreBlockingForTermination() +// @ #backend[stateConsolidationMode(6)] +func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost ioLock gpointer[gsync.GhostMutex], ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error /*@, ghost addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { if r, err := p.parsePath( /*@ ub @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ - } + return r, err /*@, false, absReturnErr(r) @*/ + } + // @ ghost var oldPkt io.IO_pkt2 + // @ ghost if(slayers.IsSupportedPkt(ub)) { + // @ absIO_valLemma(ub, p.ingressID) + // @ oldPkt = absIO_val(ub, p.ingressID).IO_val_Pkt2_2 + // @ } else { + // @ absPktFutureLemma(ub) + // @ oldPkt = absPkt(ub) + // @ } + // @ nextPkt := oldPkt if r, err := p.validateHopExpiry(); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(r) @*/ } - if r, err := p.validateIngressID(); err != nil { + if r, err := p.validateIngressID( /*@ nextPkt @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(r) @*/ } + // @ assert AbsValidateIngressIDConstraint(nextPkt, path.ifsToIO_ifs(p.ingressID)) if r, err := p.validatePktLen( /*@ ub @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(r) @*/ } if r, err := p.validateTransitUnderlaySrc( /*@ ub @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(r) @*/ } if r, err := p.validateSrcDstIA( /*@ ub @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(r) @*/ } if err := p.updateNonConsDirIngressSegID( /*@ ub @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return processResult{}, err /*@, false @*/ + return processResult{}, err /*@, false, absReturnErr(processResult{}) @*/ } - if r, err := p.verifyCurrentMAC(); err != nil { + // @ assert absPkt(ub) == AbsUpdateNonConsDirIngressSegID(oldPkt, path.ifsToIO_ifs(p.ingressID)) + // @ nextPkt = absPkt(ub) + // @ AbsValidateIngressIDLemma(oldPkt, nextPkt, path.ifsToIO_ifs(p.ingressID)) + if r, err := p.verifyCurrentMAC( /*@ nextPkt, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(r) @*/ } + // @ assert AbsVerifyCurrentMACConstraint(nextPkt, dp) if r, err := p.handleIngressRouterAlert( /*@ ub, llIsNil, startLL, endLL @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(r) @*/ } - + // @ assert nextPkt == absPkt(ub) // Inbound: pkts destined to the local IA. // @ p.d.getLocalIA() if /*@ unfolding acc(p.scionLayer.Mem(ub), R50) in (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in @*/ p.scionLayer.DstIA /*@ ) @*/ == p.d.localIA { + // @ assert p.DstIsLocalIngressID(ub) + // @ assert unfolding acc(p.scionLayer.Mem(ub), R50) in (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in p.scionLayer.DstIA) == p.d.localIA + // @ p.LocalDstLemma(ub) + // @ assert p.ingressID != 0 + // @ assert len(nextPkt.CurrSeg.Future) == 1 a, r, err /*@, aliasesUb @*/ := p.resolveInbound( /*@ ub @*/ ) if err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, aliasesUb @*/ + return r, err /*@, aliasesUb, absReturnErr(r) @*/ } // @ p.d.getInternal() // @ unfold p.d.validResult(r, aliasesUb) // @ fold p.d.validResult(processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, aliasesUb) // @ assert ub === p.rawPkt - return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@, aliasesUb @*/ + // @ ghost if(slayers.IsSupportedPkt(ub)) { + // @ InternalEnterEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, none[io.IO_ifs], ioLock, ioSharedArg, dp) + // @ } + // @ newAbsPkt = reveal absIO_val(p.rawPkt, 0) + return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@, aliasesUb, newAbsPkt @*/ } - // Outbound: pkts leaving the local IA. // BRTransit: pkts leaving from the same BR different interface. - // @ unfold acc(p.scionLayer.Mem(ub), R3) // @ ghost ubPath := p.scionLayer.UBPath(ub) // @ ghost ubScionPath := p.scionLayer.UBScionPath(ub) @@ -2967,77 +3542,112 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ ghost isXover := false if p.path.IsXover( /*@ ubScionPath @*/ ) { // @ isXover = true - // @ ghost if typeOf(p.scionLayer.Path) == *epic.Path { + // @ ghost currBase := p.path.GetBase(ubScionPath) + // @ ghost if isEpic { // @ fold acc(p.scionLayer.Path.Mem(ubPath), R5) // @ } // @ fold acc(p.scionLayer.Mem(ub), R3) - if r, err := p.doXover( /*@ ub @*/ ); err != nil { - // @ fold p.d.validResult(r, false) - return r, err /*@, false @*/ + if r, err := p.doXover( /*@ ub, currBase @*/ ); err != nil { + // @ fold p.d.validResult(processResult{}, false) + return r, err /*@, false, absReturnErr(r) @*/ } + // @ assert absPkt(ub) == AbsDoXover(nextPkt) + // @ AbsValidateIngressIDXoverLemma(nextPkt, AbsDoXover(nextPkt), path.ifsToIO_ifs(p.ingressID)) + // @ nextPkt = absPkt(ub) if r, err := p.validateHopExpiry(); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, serrors.WithCtx(err, "info", "after xover") /*@, false @*/ + return r, serrors.WithCtx(err, "info", "after xover") /*@, false, absReturnErr(r) @*/ } // verify the new block - if r, err := p.verifyCurrentMAC(); err != nil { + if r, err := p.verifyCurrentMAC( /*@ nextPkt, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, serrors.WithCtx(err, "info", "after xover") /*@, false @*/ + return r, serrors.WithCtx(err, "info", "after xover") /*@, false, absReturnErr(r) @*/ } + // @ assert AbsVerifyCurrentMACConstraint(nextPkt, dp) + // @ unfold acc(p.scionLayer.Mem(ub), R3) } // @ ghost if isEpic && !isXover { // @ fold acc(p.scionLayer.Path.Mem(ubPath), R5) // @ } + // @ assert p.path.GetBase(ubScionPath).Valid() + // @ p.path.GetBase(ubScionPath).NotIsXoverAfterIncPath() // @ fold acc(p.scionLayer.Mem(ub), R3) - if r, err := p.validateEgressID(); err != nil { + // @ assert p.segmentChange ==> nextPkt.RightSeg != none[io.IO_seg2] + if r, err := p.validateEgressID( /*@ nextPkt, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(r) @*/ } + // @ assert !p.segmentChange ==> AbsValidateEgressIDConstraint(nextPkt, (p.ingressID != 0), dp) + // @ assert p.segmentChange ==> p.ingressID != 0 && AbsValidateEgressIDConstraintXover(nextPkt, dp) // handle egress router alert before we check if it's up because we want to // send the reply anyway, so that trace route can pinpoint the exact link // that failed. if r, err := p.handleEgressRouterAlert( /*@ ub, llIsNil, startLL, endLL @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(r) @*/ } - if r, err := p.validateEgressUp(); err != nil { + // @ assert nextPkt == absPkt(ub) + if r, err := p.validateEgressUp( /*@ nextPkt @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(r) @*/ } - egressID := p.egressInterface() + // @ assert nextPkt == absPkt(ub) + egressID := p.egressInterface( /*@ nextPkt @*/ ) + // @ assert AbsEgressInterfaceConstraint(nextPkt, path.ifsToIO_ifs(egressID)) // @ p.d.getExternalMem() // @ if p.d.external != nil { unfold acc(accBatchConn(p.d.external), _) } if c, ok := p.d.external[egressID]; ok { + // @ p.d.getDomExternalLemma() + // @ p.d.EgressIDNotZeroLemma(egressID, dp) if err := p.processEgress( /*@ ub @*/ ); err != nil { // @ fold p.d.validResult(processResult{}, false) - return processResult{}, err /*@, false @*/ + return processResult{}, err /*@, false, absReturnErr(processResult{}) @*/ } - // @ p.d.InDomainExternalInForwardingMetrics2(egressID) + // @ p.d.InDomainExternalInForwardingMetrics(egressID) + // @ assert absPkt(ub) == AbsProcessEgress(nextPkt) + // @ nextPkt = absPkt(ub) + // @ ghost if(slayers.IsSupportedPkt(ub)) { + // @ ghost if(!p.segmentChange) { + // @ ExternalEnterOrExitEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, path.ifsToIO_ifs(egressID), ioLock, ioSharedArg, dp) + // @ } else { + // @ XoverEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, path.ifsToIO_ifs(egressID), ioLock, ioSharedArg, dp) + // @ } + // @ } + // @ newAbsPkt = reveal absIO_val(p.rawPkt, egressID) // @ fold p.d.validResult(processResult{EgressID: egressID, OutConn: c, OutPkt: p.rawPkt}, false) - return processResult{EgressID: egressID, OutConn: c, OutPkt: p.rawPkt}, nil /*@, false @*/ + return processResult{EgressID: egressID, OutConn: c, OutPkt: p.rawPkt}, nil /*@, false, newAbsPkt @*/ } - + // @ p.d.getDomExternalLemma() + // @ p.IngressIDNotZeroLemma(nextPkt, egressID) // ASTransit: pkts leaving from another AS BR. // @ p.d.getInternalNextHops() // @ ghost if p.d.internalNextHops != nil { unfold acc(accAddr(p.d.internalNextHops), _) } if a, ok := p.d.internalNextHops[egressID]; ok { // @ p.d.getInternal() + // @ ghost if(slayers.IsSupportedPkt(ub)) { + // @ if(!p.segmentChange) { + // @ InternalEnterEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, none[io.IO_ifs], ioLock, ioSharedArg, dp) + // @ } else { + // @ XoverEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, none[io.IO_ifs], ioLock, ioSharedArg, dp) + // @ } + // @ } + // @ newAbsPkt = reveal absIO_val(p.rawPkt, 0) // @ fold p.d.validResult(processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, false) - return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@, false @*/ + return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@, false, newAbsPkt @*/ } errCode := slayers.SCMPCodeUnknownHopFieldEgress if !p.infoField.ConsDir { errCode = slayers.SCMPCodeUnknownHopFieldIngress } - // @ TODO() // @ p.scionLayer.DowngradePerm(ub) + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP tmp, err := p.packSCMP( slayers.SCMPTypeParameterProblem, errCode, &slayers.SCMPParameterProblem{Pointer: p.currentHopPointer( /*@ nil @*/ )}, cannotRoute, ) - return tmp, err /*@, false @*/ + return tmp, err /*@, false, absReturnErr(tmp) @*/ } // @ requires acc(&p.rawPkt, R15) @@ -3045,52 +3655,57 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ requires acc(&p.ingressID, R15) // @ requires acc(&p.d, R15) && acc(p.d.Mem(), _) && p.d.WellConfigured() // @ requires p.d.getValSvc() != nil -// @ requires sl.AbsSlice_Bytes(p.rawPkt, 0, len(p.rawPkt)) +// @ requires sl.Bytes(p.rawPkt, 0, len(p.rawPkt)) // @ preserves acc(&p.mac, R10) // @ preserves p.mac != nil && p.mac.Mem() // @ preserves acc(&p.macBuffers.scionInput, R10) -// @ preserves sl.AbsSlice_Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) +// @ preserves sl.Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) // @ preserves acc(&p.buffer, R10) && p.buffer != nil && p.buffer.Mem() // @ ensures acc(&p.rawPkt, R15) // @ ensures p.scionLayer.Mem(p.rawPkt) // @ ensures acc(&p.ingressID, R15) // @ ensures acc(&p.d, R15) // @ ensures p.d.validResult(respr, addrAliasesPkt) -// @ ensures acc(sl.AbsSlice_Bytes(p.rawPkt, 0, len(p.rawPkt)), 1 - R15) +// @ ensures acc(sl.Bytes(p.rawPkt, 0, len(p.rawPkt)), 1 - R15) // @ ensures addrAliasesPkt ==> ( // @ respr.OutAddr != nil && // @ let rawPkt := p.rawPkt in -// @ (acc(respr.OutAddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(rawPkt, 0, len(rawPkt)), R15))) -// @ ensures !addrAliasesPkt ==> acc(sl.AbsSlice_Bytes(p.rawPkt, 0, len(p.rawPkt)), R15) +// @ (acc(respr.OutAddr.Mem(), R15) --* acc(sl.Bytes(rawPkt, 0, len(rawPkt)), R15))) +// @ ensures !addrAliasesPkt ==> acc(sl.Bytes(p.rawPkt, 0, len(p.rawPkt)), R15) // @ ensures respr.OutPkt !== p.rawPkt && respr.OutPkt != nil ==> -// @ sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) +// @ sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() -func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error /*@ , addrAliasesPkt bool @*/) { +// contracts for IO-spec +// @ requires p.scionLayer.EqPathType(p.rawPkt) +// @ requires !slayers.IsSupportedPkt(p.rawPkt) +// @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) +// @ ensures respr.OutPkt != nil ==> +// @ newAbsPkt == absIO_val(respr.OutPkt, respr.EgressID) && +// @ newAbsPkt.isIO_val_Unsupported +// @ decreases 0 if sync.IgnoreBlockingForTermination() +func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error /*@ , ghost addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { // @ ghost ubScionL := p.rawPkt // @ p.scionLayer.ExtractAcc(ubScionL) s := p.scionLayer // @ ghost ubPath := p.scionLayer.UBPath(ubScionL) - // @ unfold acc(p.scionLayer.Mem(ubScionL), 1-R15) - // @ apply acc(&p.scionLayer, R16) --* acc(p.scionLayer.Mem(ubScionL), R15) // @ unfold acc(p.scionLayer.Mem(ubScionL), R15) + // @ defer fold acc(p.scionLayer.Mem(ubScionL), R15) + // @ apply acc(&p.scionLayer, R16) --* acc(p.scionLayer.Mem(ubScionL), R15) // @ assert s.Path === p.scionLayer.Path - // @ assert s.Path.Mem(ubPath) ohp, ok := s.Path.(*onehop.Path) if !ok { // TODO parameter problem -> invalid path // @ establishMemMalformedPath() - // @ fold p.scionLayer.Mem(ubScionL) // @ fold p.d.validResult(processResult{}, false) - return processResult{}, malformedPath /*@ , false @*/ + return processResult{}, malformedPath /*@ , false, absReturnErr(processResult{}) @*/ } if /*@ unfolding acc(s.Path.Mem(ubPath), R50) in @*/ !ohp.Info.ConsDir { // TODO parameter problem -> invalid path // @ establishMemMalformedPath() - // @ defer fold p.scionLayer.Mem(ubScionL) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr( "OneHop path in reverse construction direction is not allowed", - malformedPath, "srcIA", s.SrcIA, "dstIA", s.DstIA) /*@ , false @*/ + malformedPath, "srcIA", s.SrcIA, "dstIA", s.DstIA) /*@ , false, absReturnErr(processResult{}) @*/ } // OHP leaving our IA @@ -3099,116 +3714,102 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / if !p.d.localIA.Equal(s.SrcIA) { // @ establishCannotRoute() // TODO parameter problem -> invalid path - // @ defer fold p.scionLayer.Mem(ubScionL) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("bad source IA", cannotRoute, "type", "ohp", "egress", ( /*@ unfolding acc(ohp.Mem(ubPath), R50) in (unfolding acc(ohp.FirstHop.Mem(), R55) in @*/ ohp.FirstHop.ConsEgress /*@ ) @*/), - "localIA", p.d.localIA, "srcIA", s.SrcIA) /*@ , false @*/ + "localIA", p.d.localIA, "srcIA", s.SrcIA) /*@ , false, absReturnErr(processResult{}) @*/ } // @ p.d.getNeighborIAs() neighborIA, ok := p.d.neighborIAs[ /*@ unfolding acc(ohp.Mem(ubPath), R50) in (unfolding acc(ohp.FirstHop.Mem(), R55) in @*/ ohp.FirstHop.ConsEgress /*@ ) @*/] if !ok { // @ establishCannotRoute() // TODO parameter problem invalid interface - // @ defer fold p.scionLayer.Mem(ubScionL) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WithCtx(cannotRoute, - "type", "ohp", "egress", ( /*@ unfolding acc(ohp.Mem(ubPath), R50) in (unfolding acc(ohp.FirstHop.Mem(), R55) in @*/ ohp.FirstHop.ConsEgress /*@ ) @*/)) /*@ , false @*/ + "type", "ohp", "egress", ( /*@ unfolding acc(ohp.Mem(ubPath), R50) in (unfolding acc(ohp.FirstHop.Mem(), R55) in @*/ ohp.FirstHop.ConsEgress /*@ ) @*/)) /*@ , false, absReturnErr(processResult{}) @*/ } if !neighborIA.Equal(s.DstIA) { // @ establishCannotRoute() - // @ defer fold p.scionLayer.Mem(ubScionL) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("bad destination IA", cannotRoute, "type", "ohp", "egress", ( /*@ unfolding acc(ohp.Mem(ubPath), R50) in (unfolding acc(ohp.FirstHop.Mem(), R55) in @*/ ohp.FirstHop.ConsEgress /*@ ) @*/), - "neighborIA", neighborIA, "dstIA", s.DstIA) /*@ , false @*/ + "neighborIA", neighborIA, "dstIA", s.DstIA) /*@ , false, absReturnErr(processResult{}) @*/ } - // @ unfold s.Path.Mem(ubPath) - // @ unfold ohp.FirstHop.Mem() - // @ preserves acc(&ohp.Info, R15) && acc(&ohp.FirstHop, R15) - // @ preserves acc(&p.macBuffers.scionInput, R15) - // @ preserves acc(&p.mac, R15) && p.mac != nil && p.mac.Mem() - // @ preserves sl.AbsSlice_Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) + // @ unfold acc(ohp.Mem(ubPath), R50) + // @ defer fold acc(ohp.Mem(ubPath), R50) + // @ unfold acc(ohp.FirstHop.Mem(), R54) + // @ defer fold acc(ohp.FirstHop.Mem(), R54) + // @ preserves acc(&ohp.Info, R55) && acc(&ohp.FirstHop, R55) + // @ preserves acc(&p.macBuffers.scionInput, R55) + // @ preserves acc(&p.mac, R55) && p.mac != nil && p.mac.Mem() + // @ preserves sl.Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) // @ decreases // @ outline ( mac /*@@@*/ := path.MAC(p.mac, ohp.Info, ohp.FirstHop, p.macBuffers.scionInput) // (VerifiedSCION) introduced separate copy to avoid exposing quantified permissions outside the scope of this outline block. macCopy := mac - // @ fold acc(sl.AbsSlice_Bytes(ohp.FirstHop.Mac[:], 0, len(ohp.FirstHop.Mac[:])), R20) - // @ fold acc(sl.AbsSlice_Bytes(mac[:], 0, len(mac)), R20) + // @ fold acc(sl.Bytes(ohp.FirstHop.Mac[:], 0, len(ohp.FirstHop.Mac[:])), R56) + // @ fold acc(sl.Bytes(mac[:], 0, len(mac)), R56) compRes := subtle.ConstantTimeCompare(ohp.FirstHop.Mac[:], mac[:]) == 0 - // @ unfold acc(sl.AbsSlice_Bytes(ohp.FirstHop.Mac[:], 0, len(ohp.FirstHop.Mac[:])), R20) + // @ unfold acc(sl.Bytes(ohp.FirstHop.Mac[:], 0, len(ohp.FirstHop.Mac[:])), R56) // @ ) if compRes { - // @ defer fold p.scionLayer.Mem(ubScionL) - // @ defer fold s.Path.Mem(ubPath) - // @ defer fold ohp.FirstHop.Mem() // TODO parameter problem -> invalid MAC // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.New("MAC", "expected", fmt.Sprintf("%x", macCopy), - "actual", fmt.Sprintf("%x", ohp.FirstHop.Mac), "type", "ohp") /*@ , false @*/ + "actual", fmt.Sprintf("%x", ohp.FirstHop.Mac), "type", "ohp") /*@ , false, absReturnErr(processResult{}) @*/ } - ohp.Info.UpdateSegID(ohp.FirstHop.Mac) - // @ fold ohp.FirstHop.Mem() - // @ fold s.Path.Mem(ubPath) - // @ fold p.scionLayer.Mem(ubScionL) + // @ assert reveal p.scionLayer.EqPathType(p.rawPkt) + // @ unfold acc(p.scionLayer.Mem(ubScionL), 1-R15) + // @ unfold acc(s.Path.Mem(ubPath), 1-R50) + ohp.Info.UpdateSegID(ohp.FirstHop.Mac /*@, ohp.FirstHop.ToIO_HF() @*/) + // @ fold acc(s.Path.Mem(ubPath), 1-R50) + // @ fold acc(p.scionLayer.Mem(ubScionL), 1-R15) + // @ assert reveal p.scionLayer.EqPathType(p.rawPkt) // (VerifiedSCION) the second parameter was changed from 's' to 'p.scionLayer' due to the // changes made to 'updateSCIONLayer'. if err := updateSCIONLayer(p.rawPkt, &p.scionLayer /* s */, p.buffer); err != nil { // @ fold p.d.validResult(processResult{}, false) - return processResult{}, err /*@ , false @*/ + return processResult{}, err /*@ , false, absReturnErr(processResult{}) @*/ } - // @ unfold p.scionLayer.Mem(ubScionL) - // @ defer fold p.scionLayer.Mem(ubScionL) - // @ unfold s.Path.Mem(ubPath) - // @ defer fold s.Path.Mem(ubPath) - // @ unfold ohp.FirstHop.Mem() - // @ defer fold ohp.FirstHop.Mem() // OHP should always be directed to the correct BR. // @ p.d.getExternalMem() // @ ghost if p.d.external != nil { unfold acc(accBatchConn(p.d.external), _) } if c, ok := p.d.external[ohp.FirstHop.ConsEgress]; ok { - // (VerifiedSCION) the following must hold, obviously. - // Unfortunately, Gobra struggles with instantiating the body - // of the function. - // @ assume ohp.FirstHop.ConsEgress in p.d.getDomExternal() - // buffer should already be correct - // (VerifiedSCION) TODO: we need to add a pre to run that says that the - // domain of forwardingMetrics is the same as the one for external + // @ p.d.getDomExternalLemma() + // @ assert ohp.FirstHop.ConsEgress in p.d.getDomExternal() // @ p.d.InDomainExternalInForwardingMetrics(ohp.FirstHop.ConsEgress) // @ fold p.d.validResult(processResult{EgressID: ohp.FirstHop.ConsEgress, OutConn: c, OutPkt: p.rawPkt}, false) return processResult{EgressID: ohp.FirstHop.ConsEgress, OutConn: c, OutPkt: p.rawPkt}, - nil /*@ , false @*/ + nil /*@ , false, reveal absIO_val(respr.OutPkt, respr.EgressID) @*/ } // TODO parameter problem invalid interface // @ establishCannotRoute() // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WithCtx(cannotRoute, "type", "ohp", - "egress", ohp.FirstHop.ConsEgress, "consDir", ohp.Info.ConsDir) /*@ , false @*/ + "egress", ohp.FirstHop.ConsEgress, "consDir", ohp.Info.ConsDir) /*@ , false, absReturnErr(processResult{}) @*/ } - // OHP entering our IA // @ p.d.getLocalIA() if !p.d.localIA.Equal(s.DstIA) { // @ establishCannotRoute() - // @ defer fold p.scionLayer.Mem(ubScionL) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("bad destination IA", cannotRoute, "type", "ohp", "ingress", p.ingressID, - "localIA", p.d.localIA, "dstIA", s.DstIA) /*@ , false @*/ + "localIA", p.d.localIA, "dstIA", s.DstIA) /*@ , false, absReturnErr(processResult{}) @*/ } // @ p.d.getNeighborIAs() neighborIA := p.d.neighborIAs[p.ingressID] if !neighborIA.Equal(s.SrcIA) { // @ establishCannotRoute() - // @ defer fold p.scionLayer.Mem(ubScionL) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("bad source IA", cannotRoute, "type", "ohp", "ingress", p.ingressID, - "neighborIA", neighborIA, "srcIA", s.SrcIA) /*@ , false @*/ + "neighborIA", neighborIA, "srcIA", s.SrcIA) /*@ , false, absReturnErr(processResult{}) @*/ } - + // @ assert reveal p.scionLayer.EqPathType(p.rawPkt) + // @ unfold acc(p.scionLayer.Mem(ubScionL), 1-R15) // @ unfold s.Path.Mem(ubPath) // @ unfold ohp.SecondHop.Mem() ohp.SecondHop = path.HopField{ @@ -3224,48 +3825,50 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / ohp.SecondHop.Mac = path.MAC(p.mac, ohp.Info, ohp.SecondHop, p.macBuffers.scionInput) // @ fold ohp.SecondHop.Mem() // @ fold s.Path.Mem(ubPath) + // @ fold acc(p.scionLayer.Mem(ubScionL), 1-R15) + // @ assert reveal p.scionLayer.EqPathType(p.rawPkt) // (VerifiedSCION) the second parameter was changed from 's' to 'p.scionLayer' due to the // changes made to 'updateSCIONLayer'. - // @ fold p.scionLayer.Mem(ubScionL) if err := updateSCIONLayer(p.rawPkt, &p.scionLayer /* s */, p.buffer); err != nil { // @ fold p.d.validResult(processResult{}, false) - return processResult{}, err /*@ , false @*/ + return processResult{}, err /*@ , false, absReturnErr(processResult{}) @*/ } // (VerifiedSCION) the parameter was changed from 's' to '&p.scionLayer' due to the // changes made to 'resolveLocalDst'. a, err /*@ , addrAliases @*/ := p.d.resolveLocalDst(&p.scionLayer /* s */ /*@ , ubScionL @*/) if err != nil { // @ ghost if addrAliases { - // @ apply acc(a.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ubScionL, 0, len(ubScionL)), R15) + // @ apply acc(a.Mem(), R15) --* acc(sl.Bytes(ubScionL, 0, len(ubScionL)), R15) // @ } // @ fold p.d.validResult(processResult{}, false) - return processResult{}, err /*@ , false @*/ + return processResult{}, err /*@ , false, absReturnErr(processResult{}) @*/ } // @ p.d.getInternal() // @ assert p.d.internal != nil ==> acc(p.d.internal.Mem(), _) // @ fold p.d.validResult(processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, addrAliases) - return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@ , addrAliases @*/ + return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@ , addrAliases, reveal absIO_val(respr.OutPkt, 0) @*/ } // @ requires acc(d.Mem(), _) // @ requires d.getValSvc() != nil -// @ requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R15) +// @ requires acc(sl.Bytes(ub, 0, len(ub)), R15) // @ preserves acc(s.Mem(ub), R14) -// @ ensures !addrAliasesUb ==> acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R15) +// @ ensures !addrAliasesUb ==> acc(sl.Bytes(ub, 0, len(ub)), R15) // @ ensures !addrAliasesUb && resaddr != nil ==> acc(resaddr.Mem(), _) // @ ensures addrAliasesUb ==> resaddr != nil // @ ensures addrAliasesUb ==> acc(resaddr.Mem(), R15) -// @ ensures addrAliasesUb ==> (acc(resaddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R15)) +// @ ensures addrAliasesUb ==> (acc(resaddr.Mem(), R15) --* acc(sl.Bytes(ub, 0, len(ub)), R15)) // @ ensures reserr != nil ==> reserr.ErrorMem() // (VerifiedSCION) the type of 's' was changed from slayers.SCION to *slayers.SCION. This makes // specs a lot easier and, makes the implementation faster as well by avoiding passing large data-structures // by value. We should consider porting merging this in upstream SCION. -func (d *DataPlane) resolveLocalDst(s *slayers.SCION /*@, ghost ub []byte @*/) (resaddr *net.UDPAddr, reserr error /*@ , addrAliasesUb bool @*/) { +// @ decreases 0 if sync.IgnoreBlockingForTermination() +func (d *DataPlane) resolveLocalDst(s *slayers.SCION /*@, ghost ub []byte @*/) (resaddr *net.UDPAddr, reserr error /*@ , ghost addrAliasesUb bool @*/) { // @ ghost start, end := s.ExtractAcc(ub) // @ assert s.RawDstAddr === ub[start:end] // @ sl.SplitRange_Bytes(ub, start, end, R15) - // @ assert acc(sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R15) + // @ assert acc(sl.Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)), R15) dst, err := s.DstAddr() // @ apply acc(s, R16) --* acc(s.Mem(ub), R15) if err != nil { @@ -3280,20 +3883,20 @@ func (d *DataPlane) resolveLocalDst(s *slayers.SCION /*@, ghost ub []byte @*/) ( // @ d.getSvcMem() a, ok := d.svc.Any(v.Base()) if !ok { - // @ apply acc(dst.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R15) + // @ apply acc(dst.Mem(), R15) --* acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R15) // @ sl.CombineRange_Bytes(ub, start, end, R15) // @ establishNoSVCBackend() return nil, noSVCBackend /*@ , false @*/ } - // @ apply acc(dst.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R15) + // @ apply acc(dst.Mem(), R15) --* acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R15) // @ sl.CombineRange_Bytes(ub, start, end, R15) return a, nil /*@ , false @*/ case *net.IPAddr: tmp := addEndhostPort(v) - // @ package acc(tmp.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R15) { + // @ package acc(tmp.Mem(), R15) --* acc(sl.Bytes(ub, 0, len(ub)), R15) { // @ apply acc(tmp.Mem(), R15) --* acc(v.Mem(), R15) // @ assert acc(dst.Mem(), R15) - // @ apply acc(dst.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R15) + // @ apply acc(dst.Mem(), R15) --* acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R15) // @ sl.CombineRange_Bytes(ub, start, end, R15) // @ } return tmp, nil /*@ , true @*/ @@ -3310,14 +3913,14 @@ func addEndhostPort(dst *net.IPAddr) (res *net.UDPAddr) { // @ unfold acc(dst.Mem(), R15) tmp := &net.UDPAddr{IP: dst.IP, Port: topology.EndhostPort} // @ assert forall i int :: { &tmp.IP[i] } 0 <= i && i < len(tmp.IP) ==> acc(&tmp.IP[i], R15) - // @ fold acc(sl.AbsSlice_Bytes(tmp.IP, 0, len(tmp.IP)), R15) + // @ fold acc(sl.Bytes(tmp.IP, 0, len(tmp.IP)), R15) // @ fold acc(tmp.Mem(), R15) // @ package (acc(tmp.Mem(), R15) --* acc(dst.Mem(), R15)) { // @ assert acc(dst, R15) // @ assert acc(tmp, R50) // @ assert dst.IP === tmp.IP // @ unfold acc(tmp.Mem(), R15) - // @ unfold acc(sl.AbsSlice_Bytes(tmp.IP, 0, len(tmp.IP)), R15) + // @ unfold acc(sl.Bytes(tmp.IP, 0, len(tmp.IP)), R15) // @ assert forall i int :: { &tmp.IP[i] } 0 <= i && i < len(tmp.IP) ==> acc(&tmp.IP[i], R15) // @ assert forall i int :: { &dst.IP[i] } 0 <= i && i < len(dst.IP) ==> acc(&dst.IP[i], R15) // @ fold acc(dst.Mem(), R15) @@ -3330,10 +3933,16 @@ func addEndhostPort(dst *net.IPAddr) (res *net.UDPAddr) { // the scion.Raw path. // @ requires acc(s.Mem(rawPkt), R00) // @ requires s.HasOneHopPath(rawPkt) +// @ requires sl.Bytes(rawPkt, 0, len(rawPkt)) // @ preserves buffer != nil && buffer.Mem() -// @ preserves sl.AbsSlice_Bytes(rawPkt, 0, len(rawPkt)) +// pres for IO: +// @ requires s.EqPathType(rawPkt) +// @ requires !slayers.IsSupportedPkt(rawPkt) +// @ ensures sl.Bytes(rawPkt, 0, len(rawPkt)) // @ ensures acc(s.Mem(rawPkt), R00) // @ ensures res != nil ==> res.ErrorMem() +// post for IO: +// @ ensures res == nil ==> !slayers.IsSupportedPkt(rawPkt) // @ decreases // (VerifiedSCION) the type of 's' was changed from slayers.SCION to *slayers.SCION. This makes // specs a lot easier and, makes the implementation faster as well by avoiding passing large data-structures @@ -3345,23 +3954,26 @@ func updateSCIONLayer(rawPkt []byte, s *slayers.SCION, buffer gopacket.Serialize if err := s.SerializeTo(buffer, gopacket.SerializeOptions{} /*@ , rawPkt @*/); err != nil { return err } + // @ reveal slayers.IsSupportedRawPkt(buffer.View()) // TODO(lukedirtwalker): We should add a method to the scion layers // which can write into the existing buffer, see also the discussion in // https://fsnets.slack.com/archives/C8ADBBG0J/p1592805884250700 rawContents := buffer.Bytes() + // @ assert !(reveal slayers.IsSupportedPkt(rawContents)) // @ s.InferSizeOHP(rawPkt) // @ assert len(rawContents) <= len(rawPkt) - // @ unfold sl.AbsSlice_Bytes(rawPkt, 0, len(rawPkt)) - // @ unfold acc(sl.AbsSlice_Bytes(rawContents, 0, len(rawContents)), R20) + // @ unfold sl.Bytes(rawPkt, 0, len(rawPkt)) + // @ unfold acc(sl.Bytes(rawContents, 0, len(rawContents)), R20) // (VerifiedSCION) proving that the reslicing operation below is safe // was tricky and required enriching (non-modularly) the invariants of *onehop.Path // and *slayers.SCION. // @ assert forall i int :: { &rawPkt[:len(rawContents)][i] }{ &rawPkt[i] } 0 <= i && i < len(rawContents) ==> // @ &rawPkt[i] == &rawPkt[:len(rawContents)][i] copy(rawPkt[:len(rawContents)], rawContents /*@ , R20 @*/) - // @ fold sl.AbsSlice_Bytes(rawPkt, 0, len(rawPkt)) - // @ fold acc(sl.AbsSlice_Bytes(rawContents, 0, len(rawContents)), R20) + // @ fold sl.Bytes(rawPkt, 0, len(rawPkt)) + // @ fold acc(sl.Bytes(rawContents, 0, len(rawContents)), R20) // @ buffer.RestoreMem(rawContents) + // @ assert !(reveal slayers.IsSupportedPkt(rawPkt)) return nil } @@ -3462,10 +4074,10 @@ func (b *bfdSend) Send(bfd *layers.BFD) error { // @ requires acc(&p.d, _) && acc(p.d.Mem(), _) // @ requires acc(p.scionLayer.Mem(ub), R4) // @ requires p.scionLayer.ValidPathMetaData(ub) -// @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires sl.Bytes(ub, 0, len(ub)) // @ requires acc(&p.ingressID, R15) // @ ensures acc(p.scionLayer.Mem(ub), R4) -// @ ensures sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ ensures sl.Bytes(ub, 0, len(ub)) // @ ensures acc(&p.ingressID, R15) // @ decreases func (p *scionPacketProcessor) prepareSCMP( @@ -3504,15 +4116,13 @@ func (p *scionPacketProcessor) prepareSCMP( return nil, serrors.WithCtx(cannotRoute, "details", "unsupported path type", "path type", pathType) } - /*@ - scionBuf := epicPath.GetUnderlyingScionPathBuf(ubPath) - unfold acc(epicPath.Mem(ubPath), R4) - assert ubPath[epic.MetadataLen:] === scionBuf - epicPathUb = ubPath - ubPath = scionBuf - startP += epic.MetadataLen - assert ubPath === ub[startP:endP] - @*/ + // @ scionBuf := epicPath.GetUnderlyingScionPathBuf(ubPath) + // @ unfold acc(epicPath.Mem(ubPath), R4) + // @ assert ubPath[epic.MetadataLen:] === scionBuf + // @ epicPathUb = ubPath + // @ ubPath = scionBuf + // @ startP += epic.MetadataLen + // @ assert ubPath === ub[startP:endP] path = epicPath.ScionPath // @ pathFromEpic = true default: @@ -3520,46 +4130,40 @@ func (p *scionPacketProcessor) prepareSCMP( return nil, serrors.WithCtx(cannotRoute, "details", "unsupported path type", "path type", pathType) } - /*@ - assert pathType == scion.PathType || pathType == epic.PathType - assert typeOf(p.scionLayer.Path) == type[*scion.Raw] || typeOf(p.scionLayer.Path) == type[*epic.Path] - assert !pathFromEpic ==> typeOf(p.scionLayer.Path) == type[*scion.Raw] - assert pathFromEpic ==> typeOf(p.scionLayer.Path) == type[*epic.Path] - sl.SplitRange_Bytes(ub, startP, endP, writePerm) - @*/ + // @ assert pathType == scion.PathType || pathType == epic.PathType + // @ assert typeOf(p.scionLayer.Path) == type[*scion.Raw] || typeOf(p.scionLayer.Path) == type[*epic.Path] + // @ assert !pathFromEpic ==> typeOf(p.scionLayer.Path) == type[*scion.Raw] + // @ assert pathFromEpic ==> typeOf(p.scionLayer.Path) == type[*epic.Path] + // @ sl.SplitRange_Bytes(ub, startP, endP, writePerm) decPath, err := path.ToDecoded( /*@ ubPath @*/ ) if err != nil { - /*@ - sl.CombineRange_Bytes(ub, startP, endP, writePerm) - ghost if pathFromEpic { - epicPath := p.scionLayer.Path.(*epic.Path) - assert acc(path.Mem(ubPath), R4) - fold acc(epicPath.Mem(epicPathUb), R4) - } else { - rawPath := p.scionLayer.Path.(*scion.Raw) - assert acc(path.Mem(ubPath), R4) - assert acc(rawPath.Mem(ubPath), R4) - } - fold acc(p.scionLayer.Mem(ub), R4) - @*/ + // @ sl.CombineRange_Bytes(ub, startP, endP, writePerm) + // @ ghost if pathFromEpic { + // @ epicPath := p.scionLayer.Path.(*epic.Path) + // @ assert acc(path.Mem(ubPath), R4) + // @ fold acc(epicPath.Mem(epicPathUb), R4) + // @ } else { + // @ rawPath := p.scionLayer.Path.(*scion.Raw) + // @ assert acc(path.Mem(ubPath), R4) + // @ assert acc(rawPath.Mem(ubPath), R4) + // @ } + // @ fold acc(p.scionLayer.Mem(ub), R4) return nil, serrors.Wrap(cannotRoute, err, "details", "decoding raw path") } // @ ghost rawPath := path.RawBufferMem(ubPath) revPathTmp, err := decPath.Reverse( /*@ rawPath @*/ ) if err != nil { - /*@ - sl.CombineRange_Bytes(ub, startP, endP, writePerm) - ghost if pathFromEpic { - epicPath := p.scionLayer.Path.(*epic.Path) - assert acc(path.Mem(ubPath), R4) - fold acc(epicPath.Mem(epicPathUb), R4) - } else { - rawPath := p.scionLayer.Path.(*scion.Raw) - assert acc(path.Mem(ubPath), R4) - assert acc(rawPath.Mem(ubPath), R4) - } - fold acc(p.scionLayer.Mem(ub), R4) - @*/ + // @ sl.CombineRange_Bytes(ub, startP, endP, writePerm) + // @ ghost if pathFromEpic { + // @ epicPath := p.scionLayer.Path.(*epic.Path) + // @ assert acc(path.Mem(ubPath), R4) + // @ fold acc(epicPath.Mem(epicPathUb), R4) + // @ } else { + // @ rawPath := p.scionLayer.Path.(*scion.Raw) + // @ assert acc(path.Mem(ubPath), R4) + // @ assert acc(rawPath.Mem(ubPath), R4) + // @ } + // @ fold acc(p.scionLayer.Mem(ub), R4) return nil, serrors.Wrap(cannotRoute, err, "details", "reversing path for SCMP") } // @ assert revPathTmp.Mem(rawPath) @@ -3569,19 +4173,17 @@ func (p *scionPacketProcessor) prepareSCMP( // Revert potential path segment switches that were done during processing. if revPath.IsXover( /*@ rawPath @*/ ) { if err := revPath.IncPath( /*@ rawPath @*/ ); err != nil { - /*@ - sl.CombineRange_Bytes(ub, startP, endP, writePerm) - ghost if pathFromEpic { - epicPath := p.scionLayer.Path.(*epic.Path) - assert acc(path.Mem(ubPath), R4) - fold acc(epicPath.Mem(epicPathUb), R4) - } else { - rawPath := p.scionLayer.Path.(*scion.Raw) - assert acc(path.Mem(ubPath), R4) - assert acc(rawPath.Mem(ubPath), R4) - } - fold acc(p.scionLayer.Mem(ub), R4) - @*/ + // @ sl.CombineRange_Bytes(ub, startP, endP, writePerm) + // @ ghost if pathFromEpic { + // @ epicPath := p.scionLayer.Path.(*epic.Path) + // @ assert acc(path.Mem(ubPath), R4) + // @ fold acc(epicPath.Mem(epicPathUb), R4) + // @ } else { + // @ rawPath := p.scionLayer.Path.(*scion.Raw) + // @ assert acc(path.Mem(ubPath), R4) + // @ assert acc(rawPath.Mem(ubPath), R4) + // @ } + // @ fold acc(p.scionLayer.Mem(ub), R4) return nil, serrors.Wrap(cannotRoute, err, "details", "reverting cross over for SCMP") } } @@ -3592,7 +4194,7 @@ func (p *scionPacketProcessor) prepareSCMP( _, external := p.d.external[p.ingressID] if external { // @ requires revPath.Mem(rawPath) - // @ requires revPath.ValidCurrIdxs(rawPath) + // @ requires revPath.GetBase(rawPath).Valid() // @ ensures revPath.Mem(rawPath) // @ decreases // @ outline( @@ -3602,25 +4204,23 @@ func (p *scionPacketProcessor) prepareSCMP( if infoField.ConsDir { hopField := /*@ unfolding acc(revPath.HopFields[revPath.PathMeta.CurrHF].Mem(), _) in @*/ revPath.HopFields[revPath.PathMeta.CurrHF] - infoField.UpdateSegID(hopField.Mac) + infoField.UpdateSegID(hopField.Mac /*@, hopField.ToIO_HF() @*/) } // @ fold revPath.Base.Mem() // @ fold revPath.Mem(rawPath) // @ ) if err := revPath.IncPath( /*@ rawPath @*/ ); err != nil { - /*@ - sl.CombineRange_Bytes(ub, startP, endP, writePerm) - ghost if pathFromEpic { - epicPath := p.scionLayer.Path.(*epic.Path) - assert acc(path.Mem(ubPath), R4) - fold acc(epicPath.Mem(epicPathUb), R4) - } else { - rawPath := p.scionLayer.Path.(*scion.Raw) - assert acc(path.Mem(ubPath), R4) - assert acc(rawPath.Mem(ubPath), R4) - } - fold acc(p.scionLayer.Mem(ub), R4) - @*/ + // @ sl.CombineRange_Bytes(ub, startP, endP, writePerm) + // @ ghost if pathFromEpic { + // @ epicPath := p.scionLayer.Path.(*epic.Path) + // @ assert acc(path.Mem(ubPath), R4) + // @ fold acc(epicPath.Mem(epicPathUb), R4) + // @ } else { + // @ rawPath := p.scionLayer.Path.(*scion.Raw) + // @ assert acc(path.Mem(ubPath), R4) + // @ assert acc(rawPath.Mem(ubPath), R4) + // @ } + // @ fold acc(p.scionLayer.Mem(ub), R4) return nil, serrors.Wrap(cannotRoute, err, "details", "incrementing path for SCMP") } } @@ -3692,13 +4292,13 @@ func (p *scionPacketProcessor) prepareSCMP( // Returns the last decoded layer. // @ requires base != nil && base.NonInitMem() // @ requires forall i int :: { &opts[i] } 0 <= i && i < len(opts) ==> -// @ (acc(&opts[i], R10) && opts[i] != nil && opts[i].NonInitMem()) +// @ (acc(&opts[i], R10) && opts[i] != nil && opts[i].NonInitMem()) // Due to Viper's very strict injectivity constraints: // @ requires forall i, j int :: { &opts[i], &opts[j] } 0 <= i && i < j && j < len(opts) ==> -// @ opts[i] !== opts[j] -// @ preserves sl.AbsSlice_Bytes(data, 0, len(data)) +// @ opts[i] !== opts[j] +// @ preserves acc(sl.Bytes(data, 0, len(data)), R39) // @ ensures forall i int :: { &opts[i] } 0 <= i && i < len(opts) ==> -// @ (acc(&opts[i], R10) && opts[i] != nil) +// @ (acc(&opts[i], R10) && opts[i] != nil) // @ ensures -1 <= idx && idx < len(opts) // @ ensures len(processed) == len(opts) // @ ensures len(offsets) == len(opts) @@ -3707,19 +4307,24 @@ func (p *scionPacketProcessor) prepareSCMP( // @ ensures reterr == nil && 0 <= idx ==> retl === opts[idx] // @ ensures reterr == nil ==> retl != nil // @ ensures reterr == nil ==> base.Mem(data) +// @ ensures reterr == nil && typeOf(base.GetPath(data)) == *scion.Raw ==> +// @ base.EqAbsHeader(data) && base.ValidScionInitSpec(data) +// @ ensures reterr == nil ==> base.EqPathType(data) // @ ensures forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> -// @ (processed[i] ==> (0 <= offsets[i].start && offsets[i].start <= offsets[i].end && offsets[i].end <= len(data))) +// @ (processed[i] ==> (0 <= offsets[i].start && offsets[i].start <= offsets[i].end && offsets[i].end <= len(data))) // @ ensures reterr == nil ==> forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> -// @ ((processed[i] && !offsets[i].isNil) ==> opts[i].Mem(data[offsets[i].start:offsets[i].end])) +// @ ((processed[i] && !offsets[i].isNil) ==> opts[i].Mem(data[offsets[i].start:offsets[i].end])) // @ ensures reterr == nil ==> forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> -// @ ((processed[i] && offsets[i].isNil) ==> opts[i].Mem(nil)) +// @ ((processed[i] && offsets[i].isNil) ==> opts[i].Mem(nil)) // @ ensures reterr == nil ==> forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> -// @ (!processed[i] ==> opts[i].NonInitMem()) +// @ (!processed[i] ==> opts[i].NonInitMem()) // @ ensures reterr != nil ==> base.NonInitMem() // @ ensures reterr != nil ==> (forall i int :: { &opts[i] } 0 <= i && i < len(opts) ==> opts[i].NonInitMem()) // @ ensures reterr != nil ==> reterr.ErrorMem() // @ decreases -func decodeLayers(data []byte, base gopacket.DecodingLayer, +// (VerifiedSCION) originally, `base` was declared with type `gopacket.DecodingLayer`. This is unnecessarily complicated for a private function +// that is only called once with a parameter of type `*SCION`, and leads to more annyoing post-conditions. +func decodeLayers(data []byte, base *slayers.SCION, opts ...gopacket.DecodingLayer) (retl gopacket.DecodingLayer, reterr error /*@ , ghost processed seq[bool], ghost offsets seq[offsetPair], ghost idx int @*/) { // @ processed = seqs.NewSeqBool(len(opts)) @@ -3729,15 +4334,18 @@ func decodeLayers(data []byte, base gopacket.DecodingLayer, if err := base.DecodeFromBytes(data, gopacket.NilDecodeFeedback); err != nil { return nil, err /*@ , processed, offsets, idx @*/ } - last := base + var last gopacket.DecodingLayer = base optsSlice := ([](gopacket.DecodingLayer))(opts) // @ ghost oldData := data // @ ghost oldStart := 0 // @ ghost oldEnd := len(data) - // @ invariant sl.AbsSlice_Bytes(oldData, 0, len(oldData)) + // @ invariant acc(sl.Bytes(oldData, 0, len(oldData)), R39) // @ invariant base.Mem(oldData) + // @ invariant typeOf(base.GetPath(oldData)) == *scion.Raw ==> + // @ base.EqAbsHeader(oldData) && base.ValidScionInitSpec(oldData) + // @ invariant base.EqPathType(oldData) // @ invariant 0 < len(opts) ==> 0 <= i0 && i0 <= len(opts) // @ invariant forall i int :: {&opts[i]} 0 <= i && i < len(opts) ==> acc(&opts[i], R10) // @ invariant forall i, j int :: {&opts[i], &opts[j]} 0 <= i && i < j && j < len(opts) ==> opts[i] !== opts[j] @@ -3748,15 +4356,15 @@ func decodeLayers(data []byte, base gopacket.DecodingLayer, // @ invariant idx == -1 ==> (last === base && oldStart == 0 && oldEnd == len(oldData)) // @ invariant 0 <= idx ==> (processed[idx] && last === opts[idx]) // @ invariant forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> - // @ (processed[i] ==> (0 <= offsets[i].start && offsets[i].start <= offsets[i].end && offsets[i].end <= len(data))) + // @ (processed[i] ==> (0 <= offsets[i].start && offsets[i].start <= offsets[i].end && offsets[i].end <= len(data))) // @ invariant forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> - // @ ((processed[i] && !offsets[i].isNil) ==> opts[i].Mem(oldData[offsets[i].start:offsets[i].end])) + // @ ((processed[i] && !offsets[i].isNil) ==> opts[i].Mem(oldData[offsets[i].start:offsets[i].end])) // @ invariant forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> - // @ ((processed[i] && offsets[i].isNil) ==> opts[i].Mem(nil)) + // @ ((processed[i] && offsets[i].isNil) ==> opts[i].Mem(nil)) // @ invariant forall i int :: {&opts[i]}{processed[i]} 0 < len(opts) && i0 <= i && i < len(opts) ==> - // @ !processed[i] + // @ !processed[i] // @ invariant forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> - // @ (!processed[i] ==> opts[i].NonInitMem()) + // @ (!processed[i] ==> opts[i].NonInitMem()) // @ invariant gopacket.NilDecodeFeedback.Mem() // @ invariant 0 <= oldStart && oldStart <= oldEnd && oldEnd <= len(oldData) // @ decreases len(opts) - i0 @@ -3766,11 +4374,11 @@ func decodeLayers(data []byte, base gopacket.DecodingLayer, // @ ghost var pos offsetPair // @ ghost var ub []byte // @ ghost if idx == -1 { - // @ pos = offsetPair{0, len(oldData), false} - // @ ub = oldData + // @ pos = offsetPair{0, len(oldData), false} + // @ ub = oldData // @ } else { - // @ pos = offsets[idx] - // @ if pos.isNil { ub = nil } else { ub = oldData[pos.start:pos.end] } + // @ pos = offsets[idx] + // @ if pos.isNil { ub = nil } else { ub = oldData[pos.start:pos.end] } // @ } if layerClassTmp.Contains(last.NextLayerType( /*@ ub @*/ )) { data /*@ , start, end @*/ := last.LayerPayload( /*@ ub @*/ ) @@ -3780,38 +4388,37 @@ func decodeLayers(data []byte, base gopacket.DecodingLayer, // @ ghost if data == nil { // @ sl.NilAcc_Bytes() // @ } else { - // @ sl.SplitRange_Bytes(oldData, oldStart, oldEnd, writePerm) + // @ sl.SplitRange_Bytes(oldData, oldStart, oldEnd, R40) // @ } if err := opt.DecodeFromBytes(data, gopacket.NilDecodeFeedback); err != nil { - // @ ghost if data != nil { sl.CombineRange_Bytes(oldData, oldStart, oldEnd, writePerm) } + // @ ghost if data != nil { sl.CombineRange_Bytes(oldData, oldStart, oldEnd, R40) } // @ base.DowngradePerm(oldData) // ghost clean-up: // @ ghost - // @ invariant 0 <= i0 && i0 <= len(opts) - // @ invariant -1 <= c && c <= i0 + // @ invariant -1 <= c && c < i0 // @ invariant len(processed) == len(opts) // @ invariant len(offsets) == len(opts) // @ invariant forall i int :: {&opts[i]} 0 <= i && i < len(opts) ==> acc(&opts[i], R10) // @ invariant forall i, j int :: {&opts[i], &opts[j]} 0 <= i && i < j && j < len(opts) ==> opts[i] !== opts[j] // @ invariant forall i int :: {&opts[i]} 0 <= i && i < len(opts) ==> opts[i] != nil // @ invariant forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> - // @ (processed[i] ==> (0 <= offsets[i].start && offsets[i].start <= offsets[i].end && offsets[i].end <= len(oldData))) + // @ (processed[i] ==> (0 <= offsets[i].start && offsets[i].start <= offsets[i].end && offsets[i].end <= len(oldData))) // @ invariant forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> - // @ ((processed[i] && !offsets[i].isNil) ==> opts[i].Mem(oldData[offsets[i].start:offsets[i].end])) + // @ ((processed[i] && !offsets[i].isNil) ==> opts[i].Mem(oldData[offsets[i].start:offsets[i].end])) // @ invariant forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> - // @ ((processed[i] && offsets[i].isNil) ==> opts[i].Mem(nil)) + // @ ((processed[i] && offsets[i].isNil) ==> opts[i].Mem(nil)) // @ invariant forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> - // @ (!processed[i] ==> opts[i].NonInitMem()) + // @ (!processed[i] ==> opts[i].NonInitMem()) // @ invariant forall i int :: {&opts[i]}{processed[i]} 0 < len(opts) && c < i && i < len(opts) ==> - // @ !processed[i] + // @ !processed[i] // @ decreases c // @ for c := i0-1; 0 <= c; c=c-1 { - // @ if processed[c] { - // @ off := offsets[c] - // @ if off.isNil { + // @ if processed[c] { + // @ off := offsets[c] + // @ if off.isNil { // @ opts[c].DowngradePerm(nil) - // @ } else { + // @ } else { // @ opts[c].DowngradePerm(oldData[off.start:off.end]) // @ } // @ } @@ -3822,7 +4429,7 @@ func decodeLayers(data []byte, base gopacket.DecodingLayer, // @ processed[i0] = true // @ ghost offsets[i0] = offsetPair{oldStart, oldEnd, data == nil} // @ idx = i0 - // @ ghost if data != nil { sl.CombineRange_Bytes(oldData, oldStart, oldEnd, writePerm) } + // @ ghost if data != nil { sl.CombineRange_Bytes(oldData, oldStart, oldEnd, R40) } last = opt } } diff --git a/router/dataplane_concurrency_model.gobra b/router/dataplane_concurrency_model.gobra index 5a8223130..4fdbc540d 100644 --- a/router/dataplane_concurrency_model.gobra +++ b/router/dataplane_concurrency_model.gobra @@ -17,14 +17,16 @@ package router import ( - "sync" - io "github.com/scionproto/scion/verification/io" + gsync "verification/utils/ghost_sync" + io "verification/io" ) +// Never use `==` for comparisons! Because this is a ghost structure, only the ghost comparison (`===`) +// is meaningful. type SharedArg struct { - Place *io.Place // Existential for the current place - State *io.IO_dp3s_state_local // Existential for the current model state - IBufY, OBufY ElemRA // Parameters of the algebra + ghost Place gpointer[io.Place] // Existential for the current place + ghost State gpointer[io.IO_dp3s_state_local] // Existential for the current model state + ghost IBufY, OBufY ElemRA // Parameters of the algebra } pred SharedInv(ghost dp io.DataPlaneSpec, ghost y SharedArg) { @@ -36,13 +38,13 @@ pred SharedInv(ghost dp io.DataPlaneSpec, ghost y SharedArg) { // initialize the shared invariant: ghost requires io.token(p) && dp.dp3s_iospec_ordered(s, p) -ensures m.LockP() && m.LockInv() == SharedInv!< dp, y !>; +ensures m.LockP() && m.LockInv() == SharedInv!< dp, y !> decreases func InitSharedInv( dp io.DataPlaneSpec, p io.Place, - s io.IO_dp3s_state_local) (m *sync.Mutex, y SharedArg) { - mV@ := sync.Mutex{} + s io.IO_dp3s_state_local) (m gpointer[gsync.GhostMutex], y SharedArg) { + mV@ := gsync.GhostMutex{} m = &mV pE@ := p sE@ := s @@ -98,8 +100,8 @@ pure func MultiReadBioCorrectIfs( } && MultiReadBioCorrectIfs(io.dp3s_iospec_bio3s_recv_T(t), expectedPkts-1, k) } - ghost +opaque requires 0 <= expectedPkts && MultiReadBio(t, expectedPkts) ensures len(res) == expectedPkts decreases expectedPkts @@ -160,8 +162,10 @@ requires ElemAuth(s.ibuf, y.IBufY) && ElemAuth(s.obuf, y.OBufY) ensures MultiReadBio(t, n) ensures MultiReadBioUpd(t, n, s) == old(MultiReadBioUpd(t, n, s)) ensures MultiReadBioNext(t, n) == old(MultiReadBioNext(t, n)) -ensures ElemAuth(MultiReadBioUpd(t, n, s).ibuf, y.IBufY) && ElemAuth(MultiReadBioUpd(t, n, s).obuf, y.OBufY) -ensures 0 <= n && MultiReadBioCorrectIfs(t, n, k) ==> MultiElemWitness(y.IBufY, k, MultiReadBioIO_val(t, n)) +ensures ElemAuth(MultiReadBioUpd(t, n, s).ibuf, y.IBufY) +ensures ElemAuth(MultiReadBioUpd(t, n, s).obuf, y.OBufY) +ensures 0 <= n && MultiReadBioCorrectIfs(t, n, k) ==> + MultiElemWitness(y.IBufY, k, MultiReadBioIO_val(t, n)) decreases n func MultiUpdateElemWitness( t io.Place, @@ -181,6 +185,7 @@ func MultiUpdateElemWitness( } if 0 <= n && MultiReadBioCorrectIfs(t, n, k) { + reveal MultiReadBioIO_val(t, n) fold MultiElemWitness(y.IBufY, k, MultiReadBioIO_val(t, n)) } } @@ -216,6 +221,16 @@ func MultiElemWitnessConv(y ElemRA,k Key, es seq[io.IO_val]) { multiElemWitnessConvAux(y,k,es,0) } +ghost +requires 0 <= currIdx && currIdx < len(es) +requires MultiElemWitnessWithIndex(y, k, es, currIdx) +ensures es[currIdx].isIO_val_Pkt2 ==> ElemWitness(y, k, es[currIdx].IO_val_Pkt2_2) +ensures MultiElemWitnessWithIndex(y, k, es, currIdx + 1) +decreases +func MultiElemWitnessStep(y ElemRA, k Key, es seq[io.IO_val], currIdx int) { + unfold MultiElemWitnessWithIndex(y, k, es, currIdx) +} + ghost requires i >= 0 requires MultiElemWitness(y,k,es[i:]) diff --git a/router/dataplane_spec.gobra b/router/dataplane_spec.gobra index ed9fe6dfa..5361ad9c2 100644 --- a/router/dataplane_spec.gobra +++ b/router/dataplane_spec.gobra @@ -68,7 +68,7 @@ pred (d *DataPlane) Mem() { (d.svc != nil ==> d.svc.Mem()) && (d.macFactory != nil ==> ( acc(d.key) && - acc(sl.AbsSlice_Bytes(*d.key, 0, len(*d.key)), _) && + acc(sl.Bytes(*d.key, 0, len(*d.key)), _) && scrypto.ValidKeyForHash(*d.key) && d.macFactory implements MacFactorySpec{d.key})) && (d.bfdSessions != nil ==> accBfdSession(d.bfdSessions)) && @@ -145,9 +145,10 @@ pred (p *scionPacketProcessor) initMem() { acc(&p.bfdLayer) } -requires acc(key, _) && acc(sl.AbsSlice_Bytes(*key, 0, len(*key)), _) +// This is used as a signature, not as an assumed function. +requires acc(key, _) && acc(sl.Bytes(*key, 0, len(*key)), _) requires scrypto.ValidKeyForHash(*key) -ensures acc(key, _) && acc(sl.AbsSlice_Bytes(*key, 0, len(*key)), _) +ensures acc(key, _) && acc(sl.Bytes(*key, 0, len(*key)), _) ensures res != nil && res.Mem() decreases func MacFactorySpec(ghost key *[]byte) (res hash.Hash) @@ -204,6 +205,21 @@ pure func (d *DataPlane) getValForwardingMetrics() map[uint16]forwardingMetrics return unfolding acc(d.Mem(), _) in d.forwardingMetrics } +ghost +pure +requires acc(p.sInit(), _) +decreases +func (p *scionPacketProcessor) getIngressID() uint16 { + return unfolding acc(p.sInit(), _) in p.ingressID +} + +ghost +requires acc(d.Mem(), _) +decreases +pure func (d *DataPlane) getMacFactory() func() hash.Hash { + return unfolding acc(d.Mem(), _) in d.macFactory +} + ghost requires acc(d.Mem(), _) decreases @@ -227,6 +243,7 @@ pure func (d *DataPlane) GetDomInternalNextHops() set[uint16] { } ghost +opaque requires acc(d.Mem(), _) decreases pure func (d *DataPlane) getDomExternal() set[uint16] { @@ -237,12 +254,33 @@ pure func (d *DataPlane) getDomExternal() set[uint16] { domain(d.external)) } +ghost +requires acc(d.Mem(), _) +decreases +pure func (d *DataPlane) getDomNeighborIAs() set[uint16] { + return unfolding acc(d.Mem(), _) in + d.neighborIAs == nil ? + set[uint16]{} : domain(d.neighborIAs) +} + +ghost +requires acc(d.Mem(), _) +decreases +pure func (d *DataPlane) getDomLinkTypes() set[uint16] { + return unfolding acc(d.Mem(), _) in + d.linkTypes == nil ? + set[uint16]{} : domain(d.linkTypes) +} + ghost opaque requires acc(d.Mem(), _) decreases pure func (d *DataPlane) WellConfigured() bool { - return d.getDomExternal() subset d.getDomForwardingMetrics() + return d.getDomNeighborIAs() == d.getDomExternal() && + d.getDomNeighborIAs() == d.getDomLinkTypes() && + !(0 in d.getDomNeighborIAs()) && + d.getDomExternal() subset d.getDomForwardingMetrics() } ghost @@ -250,7 +288,10 @@ opaque requires acc(d.Mem(), _) decreases pure func (d *DataPlane) PreWellConfigured() bool { - return d.getDomExternal() intersection d.GetDomInternalNextHops() == set[uint16]{} + return d.getDomNeighborIAs() == d.getDomExternal() && + d.getDomExternal() == d.getDomLinkTypes() && + !(0 in d.getDomNeighborIAs()) && + d.getDomExternal() intersection d.GetDomInternalNextHops() == set[uint16]{} } ghost @@ -319,6 +360,22 @@ func (d *DataPlane) getSvcMem() { unfold acc(d.Mem(), _) } +ghost +requires acc(d.Mem(), _) && d.InternalConnIsSet() +ensures acc(&d.internal, _) && acc(d.internal.Mem(), _) +decreases +func (d *DataPlane) getInternalMem() { + unfold acc(d.Mem(), _) +} + +ghost +requires acc(d.Mem(), _) +ensures acc(&d.running, _) +decreases +func (d *DataPlane) getRunningMem() { + unfold acc(d.Mem(), _) +} + ghost requires acc(d.Mem(), _) decreases @@ -359,10 +416,9 @@ func (d *DataPlane) getMacFactoryMem() { } ghost -requires acc(d.Mem(), _) -requires acc(&d.macFactory, _) && d.macFactory != nil +requires acc(d.Mem(), _) && d.getMacFactory() != nil ensures acc(&d.macFactory, _) && acc(&d.key, _) && acc(d.key, _) -ensures acc(sl.AbsSlice_Bytes(*d.key, 0, len(*d.key)), _) +ensures acc(sl.Bytes(*d.key, 0, len(*d.key)), _) ensures scrypto.ValidKeyForHash(*d.key) ensures d.macFactory implements MacFactorySpec{d.key} decreases @@ -467,8 +523,8 @@ pred writeMsgInv(writeMsgs underlayconn.Messages) { acc(&writeMsgs[0]) && len(writeMsgs[0].Buffers) == 1 && acc(&writeMsgs[0].Buffers[0]) && - // sl.AbsSlice_Bytes(writeMsgs[0].Buffers[0], 0, len(writeMsgs[0].Buffers[0])) && - sl.AbsSlice_Bytes(writeMsgs[0].OOB, 0, len(writeMsgs[0].OOB)) && + // sl.Bytes(writeMsgs[0].Buffers[0], 0, len(writeMsgs[0].Buffers[0])) && + sl.Bytes(writeMsgs[0].OOB, 0, len(writeMsgs[0].OOB)) && 0 <= writeMsgs[0].N } /** end of definitions used internally for the proof of Run **/ @@ -478,7 +534,6 @@ pred (s *scmpError) Mem() { acc(s) } -ghost preserves s.Mem() && s.CanSet(e) ensures s.Get() === e decreases @@ -525,7 +580,7 @@ pred (s* scionPacketProcessor) sInit() { acc(&s.segmentChange) && acc(&s.cachedMac) && acc(&s.macBuffers) && - sl.AbsSlice_Bytes(s.macBuffers.scionInput, 0, len(s.macBuffers.scionInput)) && + sl.Bytes(s.macBuffers.scionInput, 0, len(s.macBuffers.scionInput)) && s.bfdLayer.NonInitMem() && acc(&s.srcAddr) && acc(&s.rawPkt) @@ -629,19 +684,6 @@ func (d *DataPlane) InDomainExternalInForwardingMetrics(id uint16) { reveal d.WellConfigured() } -ghost -requires acc(d.Mem(), _) && d.WellConfigured() -requires acc(&d.external, _) && acc(d.external, _) -requires id in domain(d.external) -ensures acc(d.Mem(), _) -ensures id in d.getDomForwardingMetrics() -decreases -func (d *DataPlane) InDomainExternalInForwardingMetrics2(id uint16) { - unfold acc(d.Mem(), _) - reveal d.WellConfigured() - unfold acc(accBatchConn(d.external), _) -} - ghost requires acc(d.Mem(), _) && d.WellConfigured() requires acc(&d.external, _) && acc(d.external, R55) @@ -652,6 +694,7 @@ ensures id in d.getDomForwardingMetrics() decreases func (d *DataPlane) InDomainExternalInForwardingMetrics3(id uint16) { reveal d.WellConfigured() + reveal d.getDomExternal() assert unfolding acc(d.Mem(), _) in (unfolding acc(accBatchConn(d.external), _) in true) } @@ -674,4 +717,4 @@ pure func (d *DataPlane) DomainForwardingMetrics() set[uint16] { unfolding acc(accForwardingMetrics(d.forwardingMetrics), _) in domain(d.forwardingMetrics) : set[uint16]{} -} \ No newline at end of file +} diff --git a/router/dataplane_spec_test.gobra b/router/dataplane_spec_test.gobra index f1d0dce79..433b3bd86 100644 --- a/router/dataplane_spec_test.gobra +++ b/router/dataplane_spec_test.gobra @@ -82,7 +82,7 @@ func canModifyRunning(d *DataPlane) { requires macFactory != nil && acc(key) && - acc(sl.AbsSlice_Bytes(*key, 0, len(*key)), _) && + acc(sl.Bytes(*key, 0, len(*key)), _) && scrypto.ValidKeyForHash(*key) && macFactory implements MacFactorySpec{key} requires metrics != nil && metrics.Mem() @@ -99,18 +99,28 @@ func testRun( b1 := allocateBatchConn() b2 := allocateBatchConn() + b3 := allocateBatchConn() d.external = map[uint16]BatchConn{ uint16(1): b1, uint16(2): b2, + uint16(3): b3, } fold accBatchConn(d.external) - d.linkTypes = make(map[uint16]topology.LinkType) - d.neighborIAs = make(map[uint16]addr.IA) + d.linkTypes = map[uint16]topology.LinkType{ + uint16(1): topology.Child, + uint16(2): topology.Child, + uint16(3): topology.Child, + } + d.neighborIAs = map[uint16]addr.IA{ + uint16(1): 1001, + uint16(2): 1002, + uint16(3): 1000, + } a1 := allocateUDPAddr() d.internalNextHops = map[uint16]*net.UDPAddr{ - uint16(3): a1, + uint16(4): a1, } d.internal = allocateBatchConn() @@ -126,7 +136,7 @@ func testRun( fold accForwardingMetrics(d.forwardingMetrics) ensures dp.Valid() - ensures dp === io.DataPlaneSpec_{ + ensures dp === io.DataPlaneSpec_ { linkTypes: dict[io.IO_ifs]io.IO_Link{ 1: io.IO_ProvCust{}, 2: io.IO_ProvCust{}, @@ -138,14 +148,12 @@ func testRun( 3: 1000, }, localIA: 1000, - topology: io.TopologySpec_{ - coreAS: set[io.IO_as]{1000}, - links: dict[io.AsIfsPair]io.AsIfsPair { - io.AsIfsPair{1000, 1}: io.AsIfsPair{1001, 7}, - io.AsIfsPair{1000, 2}: io.AsIfsPair{1002, 8}, - io.AsIfsPair{1000, 3}: io.AsIfsPair{1000, 3}, - io.AsIfsPair{1001, 7}: io.AsIfsPair{1000, 1}, - io.AsIfsPair{1002, 8}: io.AsIfsPair{1000, 2}}}} + links: dict[io.AsIfsPair]io.AsIfsPair { + io.AsIfsPair{1000, 1}: io.AsIfsPair{1001, 7}, + io.AsIfsPair{1000, 2}: io.AsIfsPair{1002, 8}, + io.AsIfsPair{1000, 3}: io.AsIfsPair{1000, 3}, + io.AsIfsPair{1001, 7}: io.AsIfsPair{1000, 1}, + io.AsIfsPair{1002, 8}: io.AsIfsPair{1000, 2}}} outline( pair1 := io.AsIfsPair{1000, 1} pair2 := io.AsIfsPair{1000, 2} @@ -153,7 +161,7 @@ func testRun( pair4 := io.AsIfsPair{1001, 7} pair5 := io.AsIfsPair{1002, 8} - dp := io.DataPlaneSpec_{ + dp := io.DataPlaneSpec_ { linkTypes: dict[io.IO_ifs]io.IO_Link{ 1: io.IO_ProvCust{}, 2: io.IO_ProvCust{}, @@ -165,14 +173,12 @@ func testRun( 3: 1000, }, localIA: 1000, - topology: io.TopologySpec_{ - coreAS: set[io.IO_as]{1000}, - links: dict[io.AsIfsPair]io.AsIfsPair { - pair1: pair4, - pair2: pair5, - pair3: pair3, - pair4: pair1, - pair5: pair2}}} + links: dict[io.AsIfsPair]io.AsIfsPair { + pair1: pair4, + pair2: pair5, + pair3: pair3, + pair4: pair1, + pair5: pair2}} assert dp.Lookup(dp.Lookup(pair1)) == pair1 assert dp.Lookup(dp.Lookup(pair2)) == pair2 @@ -181,15 +187,16 @@ func testRun( assert dp.Lookup(dp.Lookup(pair5)) == pair5 assert forall ifs io.IO_ifs :: {ifs in domain(dp.neighborIAs)} ifs in domain(dp.neighborIAs) ==> - io.AsIfsPair{dp.localIA, ifs} in domain(dp.topology.links) + io.AsIfsPair{dp.localIA, ifs} in domain(dp.links) assert forall ifs io.IO_ifs :: {ifs in domain(dp.neighborIAs)} ifs in domain(dp.neighborIAs) ==> dp.Lookup(io.AsIfsPair{dp.localIA, ifs}).asid == dp.neighborIAs[ifs] - assert forall ifs io.IO_ifs :: {ifs in domain(dp.neighborIAs)} io.AsIfsPair{dp.localIA, ifs} in domain(dp.topology.links) ==> + assert forall ifs io.IO_ifs :: {ifs in domain(dp.neighborIAs)} io.AsIfsPair{dp.localIA, ifs} in domain(dp.links) ==> ifs in domain(dp.neighborIAs) - assert forall pair io.AsIfsPair :: {dp.Lookup(pair)} pair in domain(dp.topology.links) ==> + assert forall pair io.AsIfsPair :: {dp.Lookup(pair)} pair in domain(dp.links) ==> let next_pair := dp.Lookup(pair) in - (next_pair in domain(dp.topology.links)) && + (next_pair in domain(dp.links)) && dp.Lookup(next_pair) == pair + assert domain(dp.linkTypes) == domain(dp.neighborIAs) assert reveal dp.Valid() ) @@ -200,7 +207,12 @@ func testRun( assert d.dpSpecWellConfiguredLinkTypes(dp) fold d.Mem() + assert d.getDomNeighborIAs() == reveal d.getDomExternal() + assert d.getDomNeighborIAs() == d.getDomLinkTypes() + assert !(0 in d.getDomNeighborIAs()) + assert reveal d.getDomExternal() intersection d.GetDomInternalNextHops() == set[uint16]{} assert reveal d.DpAgreesWithSpec(dp) + assert reveal d.PreWellConfigured() fold MutexInvariant!< d !>() // end of foldDataPlaneMem @@ -211,7 +223,7 @@ func testRun( assert d.MetricsAreSet() d.mtx.SetInv(MutexInvariant!) assert d.mtx.LockP() - assert d.mtx.LockInv() == MutexInvariant!; + assert d.mtx.LockInv() == MutexInvariant! // io-spec needs to be inhaled inhale io.token(place) diff --git a/router/dataplane_test.go b/router/dataplane_test.go index 4ae857cb9..1623157d2 100644 --- a/router/dataplane_test.go +++ b/router/dataplane_test.go @@ -749,8 +749,9 @@ func TestProcessPkt(t *testing.T) { dpath := &scion.Decoded{ Base: scion.Base{ PathMeta: scion.MetaHdr{ - CurrHF: 2, - SegLen: [3]uint8{2, 2, 0}, + CurrINF: 0, + CurrHF: 1, + SegLen: [3]uint8{2, 2, 0}, }, NumINF: 2, NumHops: 4, @@ -762,20 +763,23 @@ func TestProcessPkt(t *testing.T) { {SegID: 0x222, ConsDir: false, Timestamp: util.TimeToSecs(now)}, }, HopFields: []path.HopField{ - {ConsIngress: 0, ConsEgress: 1}, // IA 110 {ConsIngress: 31, ConsEgress: 0}, // Src - {ConsIngress: 0, ConsEgress: 51}, // Dst + {ConsIngress: 0, ConsEgress: 51}, // IA 110 {ConsIngress: 3, ConsEgress: 0}, // IA 110 + {ConsIngress: 0, ConsEgress: 1}, // Dst }, } - dpath.HopFields[2].Mac = computeMAC(t, key, dpath.InfoFields[0], dpath.HopFields[2]) - dpath.HopFields[3].Mac = computeMAC(t, key, dpath.InfoFields[1], dpath.HopFields[3]) + dpath.HopFields[1].Mac = computeMAC(t, key, dpath.InfoFields[0], dpath.HopFields[1]) + dpath.HopFields[2].Mac = computeMAC(t, key, dpath.InfoFields[1], dpath.HopFields[2]) if !afterProcessing { - dpath.InfoFields[0].UpdateSegID(dpath.HopFields[2].Mac) + dpath.InfoFields[0].UpdateSegID(dpath.HopFields[1].Mac) return toMsg(t, spkt, dpath) } - require.NoError(t, dpath.IncPath()) + + dpath.PathMeta.CurrHF++ + dpath.PathMeta.CurrINF++ + ret := toMsg(t, spkt, dpath) ret.Addr = &net.UDPAddr{IP: net.ParseIP("10.0.200.200").To4(), Port: 30043} ret.Flags, ret.NN, ret.N, ret.OOB = 0, 0, 0, nil diff --git a/router/io-spec-abstract-transitions.gobra b/router/io-spec-abstract-transitions.gobra new file mode 100644 index 000000000..8aa346ff3 --- /dev/null +++ b/router/io-spec-abstract-transitions.gobra @@ -0,0 +1,253 @@ +// Copyright 2022 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package router + +import ( + "github.com/scionproto/scion/pkg/slayers/path" + "github.com/scionproto/scion/pkg/slayers/path/scion" + "github.com/scionproto/scion/pkg/slayers" + . "verification/utils/definitions" + io "verification/io" + gsync "verification/utils/ghost_sync" + sl "verification/utils/slices" +) + +ghost +requires pkt.PathNotFullyTraversed() +decreases +pure func CurrSegIO_ifs(pkt io.IO_pkt2, dir bool) option[io.IO_ifs] { + return let currseg := pkt.CurrSeg in + (currseg.ConsDir == dir ? currseg.Future[0].InIF2 : currseg.Future[0].EgIF2) +} + +ghost +opaque +requires oldPkt.PathNotFullyTraversed() +ensures newPkt.PathNotFullyTraversed() +ensures len(newPkt.CurrSeg.Future) == len(oldPkt.CurrSeg.Future) +decreases +pure func AbsUpdateNonConsDirIngressSegID(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs]) (newPkt io.IO_pkt2) { + return ingressID == none[io.IO_ifs] ? oldPkt : io.IO_Packet2 { + io.establishGuardTraversedseg(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir), + oldPkt.LeftSeg, + oldPkt.MidSeg, + oldPkt.RightSeg, + } +} + +ghost +opaque +requires pkt.PathNotFullyTraversed() +decreases +pure func AbsValidateIngressIDConstraint(pkt io.IO_pkt2, ingressID option[io.IO_ifs]) bool { + return let currseg := pkt.CurrSeg in + ingressID != none[io.IO_ifs] ==> + ingressID == CurrSegIO_ifs(pkt, true) +} + +ghost +opaque +requires pkt.RightSeg != none[io.IO_seg2] +requires len(get(pkt.RightSeg).Past) > 0 +decreases +pure func AbsValidateIngressIDConstraintXover(pkt io.IO_pkt2, ingressID option[io.IO_ifs]) bool { + return let rightseg := get(pkt.RightSeg) in + ingressID != none[io.IO_ifs] ==> + ingressID == (rightseg.ConsDir ? rightseg.Past[0].InIF2 : rightseg.Past[0].EgIF2) +} + +ghost +opaque +requires pkt.PathNotFullyTraversed() +decreases +pure func AbsEgressInterfaceConstraint(pkt io.IO_pkt2, egressID option[io.IO_ifs]) bool { + return let currseg := pkt.CurrSeg in + egressID == CurrSegIO_ifs(pkt, false) +} + +ghost +opaque +requires dp.Valid() +requires pkt.PathNotFullyTraversed() +decreases +pure func AbsValidateEgressIDConstraint(pkt io.IO_pkt2, enter bool, dp io.DataPlaneSpec) bool { + return let currseg := pkt.CurrSeg in + (enter ==> dp.dp2_check_interface_top(currseg.ConsDir, dp.Asid(), currseg.Future[0])) +} + +ghost +opaque +requires oldPkt.PathNotFullyTraversed() +ensures len(newPkt.CurrSeg.Future) >= 0 +decreases +pure func AbsProcessEgress(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { + return io.IO_Packet2 { + io.establishGuardTraversedsegInc(oldPkt.CurrSeg, oldPkt.CurrSeg.ConsDir), + oldPkt.LeftSeg, + oldPkt.MidSeg, + oldPkt.RightSeg, + } +} + +ghost +opaque +requires oldPkt.LeftSeg != none[io.IO_seg2] +requires len(oldPkt.CurrSeg.Future) == 1 +requires len(get(oldPkt.LeftSeg).Future) > 0 +requires len(get(oldPkt.LeftSeg).History) == 0 +ensures newPkt.PathNotFullyTraversed() +ensures newPkt.RightSeg != none[io.IO_seg2] +ensures len(get(newPkt.RightSeg).Past) > 0 +decreases +pure func AbsDoXover(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { + return io.IO_Packet2 { + get(oldPkt.LeftSeg), + oldPkt.MidSeg, + oldPkt.RightSeg, + some(io.establishGuardTraversedsegInc(oldPkt.CurrSeg, false)), + } +} + +ghost +opaque +requires dp.Valid() +requires pkt.PathNotFullyTraversed() +requires pkt.RightSeg != none[io.IO_seg2] +requires len(get(pkt.RightSeg).Past) > 0 +decreases +pure func AbsValidateEgressIDConstraintXover(pkt io.IO_pkt2, dp io.DataPlaneSpec) bool { + return let currseg := pkt.CurrSeg in + let rightseg := get(pkt.RightSeg) in + dp.xover2_link_type_dir(dp.Asid(), rightseg.ConsDir, rightseg.Past[0], + currseg.ConsDir, currseg.Future[0]) +} + +ghost +opaque +requires pkt.PathNotFullyTraversed() +decreases +pure func AbsVerifyCurrentMACConstraint(pkt io.IO_pkt2, dp io.DataPlaneSpec) bool { + return let currseg := pkt.CurrSeg in + let d := currseg.ConsDir in + let ts := currseg.AInfo in + let hf := currseg.Future[0] in + let uinfo := currseg.UInfo in + dp.hf_valid(d, ts, uinfo, hf) +} +// This executes the IO enter event whenever a pkt was received +// from a different AS (ingressID != none[io.IO_ifs]) +// and will be forwarded to another border router within the AS (egressID == none[io.IO_ifs]) +ghost +requires dp.Valid() +requires ingressID != none[io.IO_ifs] +requires egressID == none[io.IO_ifs] +requires oldPkt.PathNotFullyTraversed() +requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) +requires newPkt == AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) +requires AbsValidateIngressIDConstraint(oldPkt, ingressID) +requires AbsVerifyCurrentMACConstraint(newPkt, dp) +requires len(newPkt.CurrSeg.Future) == 1 || AbsValidateEgressIDConstraint(newPkt, true, dp) +preserves acc(ioLock.LockP(), _) +preserves ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> +ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) +decreases +func InternalEnterEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock gpointer[gsync.GhostMutex], ioSharedArg SharedArg, dp io.DataPlaneSpec) { + reveal AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) + reveal AbsValidateIngressIDConstraint(oldPkt, ingressID) + reveal AbsVerifyCurrentMACConstraint(newPkt, dp) + if(len(newPkt.CurrSeg.Future) != 1) { + reveal AbsValidateEgressIDConstraint(newPkt, true, dp) + } + AtomicEnter(oldPkt, ingressID, newPkt, egressID, ioLock, ioSharedArg, dp) +} + +// Either this executes the IO enter event whenever a pkt was received +// from a different AS (ingressID != none[io.IO_ifs]) +// and will leave the AS (egressID != none[io.IO_ifs]) or +// it executes the IO exit event whenever a pkt was received from +// within the AS (ingressID == none[io.IO_ifs]) +// and will leave the AS (egressID != none[io.IO_ifs]) +ghost +requires dp.Valid() +requires egressID != none[io.IO_ifs] +requires get(egressID) in domain(dp.GetNeighborIAs()) +requires oldPkt.PathNotFullyTraversed() +requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) +requires AbsValidateIngressIDConstraint(oldPkt, ingressID) +requires AbsVerifyCurrentMACConstraint(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID), dp) +requires AbsValidateEgressIDConstraint(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID), (ingressID != none[io.IO_ifs]), dp) +requires AbsEgressInterfaceConstraint(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID), egressID) +requires newPkt == AbsProcessEgress(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)) +preserves acc(ioLock.LockP(), _) +preserves ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> +ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) +decreases +func ExternalEnterOrExitEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock gpointer[gsync.GhostMutex], ioSharedArg SharedArg, dp io.DataPlaneSpec) { + reveal dp.Valid() + nextPkt := reveal AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) + reveal AbsValidateIngressIDConstraint(oldPkt, ingressID) + reveal AbsVerifyCurrentMACConstraint(nextPkt, dp) + reveal AbsEgressInterfaceConstraint(nextPkt, egressID) + reveal AbsValidateEgressIDConstraint(nextPkt, (ingressID != none[io.IO_ifs]), dp) + reveal AbsProcessEgress(nextPkt) + if(ingressID == none[io.IO_ifs]){ + AtomicExit(oldPkt, ingressID, newPkt, egressID, ioLock, ioSharedArg, dp) + } else { + AtomicEnter(oldPkt, ingressID, newPkt, egressID, ioLock, ioSharedArg, dp) + } +} + +// This executes the IO xover event whenever a pkt was received +// from a different AS (ingressID != none[io.IO_ifs]) +// and a segment switch was performed. +ghost +requires dp.Valid() +requires ingressID != none[io.IO_ifs] +requires oldPkt.PathNotFullyTraversed() +requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) +requires AbsValidateIngressIDConstraint(oldPkt, ingressID) +requires AbsVerifyCurrentMACConstraint(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID), dp) +requires len(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID).CurrSeg.Future) == 1 +requires AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID).LeftSeg != none[io.IO_seg2] +requires len(get(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID).LeftSeg).Future) > 0 +requires len(get(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID).LeftSeg).History) == 0 +requires AbsVerifyCurrentMACConstraint(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)), dp) +requires AbsValidateEgressIDConstraintXover(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)), dp) +requires egressID != none[io.IO_ifs] ==> get(egressID) in domain(dp.GetNeighborIAs()) +requires egressID != none[io.IO_ifs] ==> AbsEgressInterfaceConstraint(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)), egressID) +requires egressID == none[io.IO_ifs] ==> + newPkt == AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)) +requires egressID != none[io.IO_ifs] ==> + newPkt == AbsProcessEgress(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID))) +preserves acc(ioLock.LockP(), _) +preserves ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> +ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) +decreases +func XoverEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock gpointer[gsync.GhostMutex], ioSharedArg SharedArg, dp io.DataPlaneSpec) { + reveal dp.Valid() + intermediatePkt1 := reveal AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) + intermediatePkt2 := reveal AbsDoXover(intermediatePkt1) + reveal AbsValidateIngressIDConstraint(oldPkt, ingressID) + reveal AbsVerifyCurrentMACConstraint(intermediatePkt1, dp) + reveal AbsVerifyCurrentMACConstraint(intermediatePkt2, dp) + reveal AbsValidateEgressIDConstraintXover(intermediatePkt2, dp) + if(egressID != none[io.IO_ifs]){ + reveal AbsEgressInterfaceConstraint(intermediatePkt2, egressID) + reveal AbsProcessEgress(intermediatePkt2) + } + AtomicXover(oldPkt, ingressID, newPkt, egressID, ioLock, ioSharedArg, dp) +} diff --git a/router/io-spec-atomic-events.gobra b/router/io-spec-atomic-events.gobra new file mode 100644 index 000000000..fca20b964 --- /dev/null +++ b/router/io-spec-atomic-events.gobra @@ -0,0 +1,160 @@ +// Copyright 2022 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +// This file contains the definition of operations that perform the atomic transitions of state +// in the IO spec. They all take a gpointer[gsync.GhostMutex], which acts as a logical invariant, because Gobra +// does not support invariants natively. As such, we can only get access to the invariants if we +// first lock the mutex. Even though all these operations are +// terminating, Gobra cannot currently prove this and thus, we assume termination for all methods +// in this file. + +package router + +import ( + "sync" + io "verification/io" + gsync "verification/utils/ghost_sync" +) + +ghost +requires dp.Valid() +requires ingressID != none[io.IO_ifs] +requires len(oldPkt.CurrSeg.Future) > 0 +requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) +requires dp.dp2_enter_guard( + oldPkt, + oldPkt.CurrSeg, + io.establishGuardTraversedseg(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir), + dp.Asid(), + oldPkt.CurrSeg.Future[0], + get(ingressID), + oldPkt.CurrSeg.Future[1:]) +requires dp.dp3s_forward( + io.IO_Packet2 { + io.establishGuardTraversedseg(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir), + oldPkt.LeftSeg, + oldPkt.MidSeg, + oldPkt.RightSeg, + }, + newPkt, + egressID) +preserves acc(ioLock.LockP(), _) +preserves ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> +ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) +decreases _ +func AtomicEnter(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock gpointer[gsync.GhostMutex], ioSharedArg SharedArg, dp io.DataPlaneSpec) { + ghost ioLock.Lock() + unfold SharedInv!< dp, ioSharedArg !>() + t, s := *ioSharedArg.Place, *ioSharedArg.State + ApplyElemWitness(s.ibuf, ioSharedArg.IBufY, ingressID, oldPkt) + ghost pkt_internal := io.IO_val(io.IO_Internal_val1{oldPkt, get(ingressID), newPkt, egressID}) + assert dp.dp3s_iospec_bio3s_enter_guard(s, t, pkt_internal) + unfold dp.dp3s_iospec_ordered(s, t) + unfold dp.dp3s_iospec_bio3s_enter(s, t) + io.TriggerBodyIoEnter(pkt_internal) + tN := io.CBio_IN_bio3s_enter_T(t, pkt_internal) + io.Enter(t, pkt_internal) //Event + UpdateElemWitness(s.obuf, ioSharedArg.OBufY, egressID, newPkt) + ghost *ioSharedArg.State = io.dp3s_add_obuf(s, egressID, newPkt) + ghost *ioSharedArg.Place = tN + fold SharedInv!< dp, ioSharedArg !>() + ghost ioLock.Unlock() +} + +ghost +requires dp.Valid() +requires ingressID == none[io.IO_ifs] +requires egressID != none[io.IO_ifs] +requires len(oldPkt.CurrSeg.Future) > 0 +requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) +requires dp.dp3s_forward_ext(oldPkt, newPkt, get(egressID)) +preserves acc(ioLock.LockP(), _) +preserves ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> +ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) +decreases _ +func AtomicExit(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock gpointer[gsync.GhostMutex], ioSharedArg SharedArg, dp io.DataPlaneSpec) { + ghost ioLock.Lock() + unfold SharedInv!< dp, ioSharedArg !>() + t, s := *ioSharedArg.Place, *ioSharedArg.State + ApplyElemWitness(s.ibuf, ioSharedArg.IBufY, ingressID, oldPkt) + ghost pkt_internal := io.IO_val(io.IO_Internal_val2{oldPkt, newPkt, get(egressID)}) + assert dp.dp3s_iospec_bio3s_exit_guard(s, t, pkt_internal) + unfold dp.dp3s_iospec_ordered(s, t) + unfold dp.dp3s_iospec_bio3s_exit(s, t) + io.TriggerBodyIoExit(pkt_internal) + tN := io.dp3s_iospec_bio3s_exit_T(t, pkt_internal) + io.Exit(t, pkt_internal) //Event + UpdateElemWitness(s.obuf, ioSharedArg.OBufY, egressID, newPkt) + ghost *ioSharedArg.State = io.dp3s_add_obuf(s, egressID, newPkt) + ghost *ioSharedArg.Place = tN + fold SharedInv!< dp, ioSharedArg !>() + ghost ioLock.Unlock() +} + +ghost +requires dp.Valid() +requires oldPkt.LeftSeg != none[io.IO_seg2] +requires len(oldPkt.CurrSeg.Future) > 0 +requires len(get(oldPkt.LeftSeg).Future) > 0 +requires ingressID != none[io.IO_ifs] +requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) +requires dp.dp2_xover_guard( + oldPkt, + oldPkt.CurrSeg, + get(oldPkt.LeftSeg), + io.establishGuardTraversedsegInc(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir), + io.IO_Packet2 { + get(oldPkt.LeftSeg), + oldPkt.MidSeg, + oldPkt.RightSeg, + some(io.establishGuardTraversedsegInc(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir)), + }, + oldPkt.CurrSeg.Future[0], + get(oldPkt.LeftSeg).Future[0], + get(oldPkt.LeftSeg).Future[1:], + dp.Asid(), + get(ingressID)) +requires dp.dp3s_forward_xover( + io.IO_Packet2 { + get(oldPkt.LeftSeg), + oldPkt.MidSeg, + oldPkt.RightSeg, + some(io.establishGuardTraversedsegInc(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir)), + }, + newPkt, + egressID) +preserves acc(ioLock.LockP(), _) +preserves ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> +ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) +decreases _ +func AtomicXover(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock gpointer[gsync.GhostMutex], ioSharedArg SharedArg, dp io.DataPlaneSpec) { + ghost ioLock.Lock() + unfold SharedInv!< dp, ioSharedArg !>() + t, s := *ioSharedArg.Place, *ioSharedArg.State + ApplyElemWitness(s.ibuf, ioSharedArg.IBufY, ingressID, oldPkt) + ghost pkt_internal := io.IO_val(io.IO_Internal_val1{oldPkt, get(ingressID), newPkt, egressID}) + assert dp.dp3s_iospec_bio3s_xover_guard(s, t, pkt_internal) + unfold dp.dp3s_iospec_ordered(s, t) + unfold dp.dp3s_iospec_bio3s_xover(s, t) + io.TriggerBodyIoXover(pkt_internal) + tN := io.dp3s_iospec_bio3s_xover_T(t, pkt_internal) + io.Xover(t, pkt_internal) //Event + UpdateElemWitness(s.obuf, ioSharedArg.OBufY, egressID, newPkt) + ghost *ioSharedArg.State = io.dp3s_add_obuf(s, egressID, newPkt) + ghost *ioSharedArg.Place = tN + fold SharedInv!< dp, ioSharedArg !>() + ghost ioLock.Unlock() +} \ No newline at end of file diff --git a/router/io-spec-lemmas.gobra b/router/io-spec-lemmas.gobra new file mode 100644 index 000000000..2d38dc9cc --- /dev/null +++ b/router/io-spec-lemmas.gobra @@ -0,0 +1,394 @@ +// Copyright 2022 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package router + +import ( + "sync" + "github.com/scionproto/scion/pkg/slayers/path" + "github.com/scionproto/scion/pkg/slayers/path/scion" + "github.com/scionproto/scion/pkg/slayers/path/epic" + "github.com/scionproto/scion/pkg/slayers" + "verification/dependencies/encoding/binary" + io "verification/io" + sl "github.com/scionproto/scion/verification/utils/slices" + . "verification/utils/definitions" +) + +ghost +preserves acc(sl.Bytes(raw, 0, len(raw)), R55) +ensures slayers.ValidPktMetaHdr(raw) && slayers.IsSupportedPkt(raw) ==> + absIO_val(raw, ingressID).isIO_val_Pkt2 && + absIO_val(raw, ingressID).IO_val_Pkt2_2 == absPkt(raw) && + absPkt(raw).PathNotFullyTraversed() +decreases +func absIO_valLemma(raw []byte, ingressID uint16) { + if(slayers.ValidPktMetaHdr(raw) && slayers.IsSupportedPkt(raw)){ + absIO := reveal absIO_val(raw, ingressID) + assert absIO.isIO_val_Pkt2 + assert absIO_val(raw, ingressID).IO_val_Pkt2_2 == absPkt(raw) + absPktFutureLemma(raw) + } +} + +ghost +requires acc(sl.Bytes(raw, 0, len(raw)), R56) +requires slayers.ValidPktMetaHdr(raw) +ensures acc(sl.Bytes(raw, 0, len(raw)), R56) +ensures slayers.ValidPktMetaHdr(raw) +ensures absPkt(raw).PathNotFullyTraversed() +decreases +func absPktFutureLemma(raw []byte) { + reveal slayers.ValidPktMetaHdr(raw) + headerOffset := slayers.GetAddressOffset(raw) + headerOffsetWithMetaLen := headerOffset + scion.MetaLen + assert forall k int :: {&raw[headerOffset:headerOffset+scion.MetaLen][k]} 0 <= k && k < scion.MetaLen ==> &raw[headerOffset:headerOffset+scion.MetaLen][k] == &raw[headerOffset + k] + hdr := (unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in + binary.BigEndian.Uint32(raw[headerOffset:headerOffset+scion.MetaLen])) + metaHdr := scion.DecodedFrom(hdr) + currInfIdx := int(metaHdr.CurrINF) + currHfIdx := int(metaHdr.CurrHF) + seg1Len := int(metaHdr.SegLen[0]) + seg2Len := int(metaHdr.SegLen[1]) + seg3Len := int(metaHdr.SegLen[2]) + segs := io.CombineSegLens(seg1Len, seg2Len, seg3Len) + segLen := segs.LengthOfCurrSeg(currHfIdx) + prevSegLen := segs.LengthOfPrevSeg(currHfIdx) + numINF := segs.NumInfoFields() + offset := scion.HopFieldOffset(numINF, prevSegLen, headerOffsetWithMetaLen) + pkt := reveal absPkt(raw) + assert pkt.CurrSeg == reveal scion.CurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen) + assert pkt.PathNotFullyTraversed() +} + +ghost +requires oldPkt.PathNotFullyTraversed() +requires AbsValidateIngressIDConstraint(oldPkt, ingressID) +requires newPkt == AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) +ensures AbsValidateIngressIDConstraint(newPkt, ingressID) +decreases +func AbsValidateIngressIDLemma(oldPkt io.IO_pkt2, newPkt io.IO_pkt2, ingressID option[io.IO_ifs]) { + reveal AbsValidateIngressIDConstraint(oldPkt, ingressID) + reveal AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) + reveal AbsValidateIngressIDConstraint(newPkt, ingressID) +} + +ghost +requires len(oldPkt.CurrSeg.Future) == 1 +requires oldPkt.LeftSeg != none[io.IO_seg2] +requires len(get(oldPkt.LeftSeg).Future) > 0 +requires len(get(oldPkt.LeftSeg).History) == 0 +requires AbsValidateIngressIDConstraint(oldPkt, ingressID) +requires newPkt == AbsDoXover(oldPkt) +ensures AbsValidateIngressIDConstraintXover(newPkt, ingressID) +decreases +func AbsValidateIngressIDXoverLemma(oldPkt io.IO_pkt2, newPkt io.IO_pkt2, ingressID option[io.IO_ifs]) { + reveal AbsValidateIngressIDConstraint(oldPkt, ingressID) + reveal AbsDoXover(oldPkt) + reveal AbsValidateIngressIDConstraintXover(newPkt, ingressID) +} + +ghost +opaque +requires acc(p.scionLayer.Mem(ub), R50) +requires acc(&p.d, R55) && acc(p.d.Mem(), _) +requires acc(&p.ingressID, R55) +decreases +pure func (p *scionPacketProcessor) DstIsLocalIngressID(ub []byte) bool { + return (unfolding acc(p.scionLayer.Mem(ub), R50) in + (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in + p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> p.ingressID != 0 +} + +ghost +opaque +requires acc(p.scionLayer.Mem(ub), R50) +requires acc(&p.d, R55) && acc(p.d.Mem(), _) +requires acc(&p.ingressID, R55) +requires acc(sl.Bytes(ub, 0, len(ub)), R56) +requires slayers.ValidPktMetaHdr(ub) +decreases +pure func (p *scionPacketProcessor) LastHopLen(ub []byte) bool { + return (unfolding acc(p.scionLayer.Mem(ub), R50) in + (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in + p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> + len(absPkt(ub).CurrSeg.Future) == 1 +} + +//TODO: Does not work with --disableNL --unsafeWildcardoptimization +ghost +requires acc(p.scionLayer.Mem(ub), R50) +requires acc(&p.d, R55) && acc(p.d.Mem(), _) +requires acc(&p.ingressID, R55) +requires acc(sl.Bytes(ub, 0, len(ub)), R56) +requires slayers.ValidPktMetaHdr(ub) +requires p.DstIsLocalIngressID(ub) +requires p.LastHopLen(ub) +requires (unfolding acc(p.scionLayer.Mem(ub), R50) in + (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in + p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) +ensures acc(p.scionLayer.Mem(ub), R50) +ensures acc(&p.d, R55) && acc(p.d.Mem(), _) +ensures acc(&p.ingressID, R55) +ensures acc(sl.Bytes(ub, 0, len(ub)), R56) +ensures slayers.ValidPktMetaHdr(ub) +ensures p.ingressID != 0 +ensures len(absPkt(ub).CurrSeg.Future) == 1 +decreases +func (p* scionPacketProcessor) LocalDstLemma(ub []byte) { + reveal p.DstIsLocalIngressID(ub) + reveal p.LastHopLen(ub) +} + +ghost +requires acc(p.scionLayer.Mem(ub), R55) +requires acc(&p.path, R55) && p.path == p.scionLayer.GetScionPath(ub) +decreases +pure func (p* scionPacketProcessor) GetIsXoverSpec(ub []byte) bool { + return let ubPath := p.scionLayer.UBPath(ub) in + let ubScionPath := p.scionLayer.UBScionPath(ub) in + unfolding acc(p.scionLayer.Mem(ub), R55) in + unfolding acc(p.scionLayer.Path.Mem(ubPath), _) in + p.path.GetBase(ubScionPath).IsXoverSpec() +} + +ghost +opaque +requires acc(&p.d, R55) && acc(p.d.Mem(), _) +requires acc(&p.ingressID, R55) +requires pkt.PathNotFullyTraversed() +decreases +pure func (p *scionPacketProcessor) NoBouncingPkt(pkt io.IO_pkt2) bool { + return let currseg := pkt.CurrSeg in + let OptEgressID := CurrSegIO_ifs(pkt, false) in + let egressID := path.IO_ifsToIfs(OptEgressID) in + ((egressID in p.d.getDomExternal()) || p.ingressID != 0) +} + +ghost +requires acc(&p.d, R55) && acc(p.d.Mem(), _) +requires acc(&p.ingressID, R55) +requires pkt.PathNotFullyTraversed() +requires AbsEgressInterfaceConstraint(pkt, path.ifsToIO_ifs(egressID)) +requires (egressID in p.d.getDomExternal()) || p.ingressID != 0 +ensures acc(&p.d, R55) && acc(p.d.Mem(), _) +ensures acc(&p.ingressID, R55) +ensures p.NoBouncingPkt(pkt) +decreases +func (p *scionPacketProcessor) EstablishNoBouncingPkt(pkt io.IO_pkt2, egressID uint16) { + reveal AbsEgressInterfaceConstraint(pkt, path.ifsToIO_ifs(egressID)) + reveal p.NoBouncingPkt(pkt) +} + +ghost +requires acc(&p.d, R55) && acc(p.d.Mem(), _) +requires acc(&p.ingressID, R55) +requires pkt.PathNotFullyTraversed() +requires AbsEgressInterfaceConstraint(pkt, path.ifsToIO_ifs(egressID)) +requires p.NoBouncingPkt(pkt) +requires !(egressID in p.d.getDomExternal()) +ensures acc(&p.d, R55) && acc(p.d.Mem(), _) +ensures acc(&p.ingressID, R55) +ensures p.ingressID != 0 +decreases +func (p *scionPacketProcessor) IngressIDNotZeroLemma(pkt io.IO_pkt2, egressID uint16) { + reveal AbsEgressInterfaceConstraint(pkt, path.ifsToIO_ifs(egressID)) + reveal p.NoBouncingPkt(pkt) +} + +ghost +requires 0 <= start && start <= end && end <= len(ub) +requires acc(p.scionLayer.Mem(ub), R55) +requires acc(sl.Bytes(ub, 0, len(ub)), R50) +requires acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R50) +requires acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) +requires p.path === p.scionLayer.GetScionPath(ub) +requires start == p.scionLayer.PathScionStartIdx(ub) +requires end == p.scionLayer.PathScionEndIdx(ub) +requires p.scionLayer.EqAbsHeader(ub) +requires p.scionLayer.ValidScionInitSpec(ub) +ensures acc(sl.Bytes(ub, 0, len(ub)), R50) +ensures acc(p.scionLayer.Mem(ub), R55) +ensures acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R50) +ensures acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) +ensures start == p.scionLayer.PathScionStartIdx(ub) +ensures end == p.scionLayer.PathScionEndIdx(ub) +ensures p.path.GetBase(ub[start:end]).EqAbsHeader(ub[start:end]) +ensures p.path.GetBase(ub[start:end]).WeaklyValid() +ensures p.scionLayer.ValidHeaderOffset(ub, len(ub)) +decreases +func (p* scionPacketProcessor) EstablishEqAbsHeader(ub []byte, start int, end int) { + unfold acc(sl.Bytes(ub, 0, len(ub)), R56) + unfold acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R56) + unfold acc(p.scionLayer.Mem(ub), R56) + unfold acc(p.path.Mem(ub[start:end]), R56) + reveal p.scionLayer.EqAbsHeader(ub) + reveal p.scionLayer.ValidScionInitSpec(ub) + assert reveal p.scionLayer.ValidHeaderOffset(ub, len(ub)) + assert p.path.GetBase(ub[start:end]).EqAbsHeader(ub[start:end]) + fold acc(p.path.Mem(ub[start:end]), R56) + fold acc(p.scionLayer.Mem(ub), R56) + fold acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R56) + fold acc(sl.Bytes(ub, 0, len(ub)), R56) +} + +ghost +requires 0 <= start && start <= end && end <= len(ub) +requires acc(p.scionLayer.Mem(ub), R55) +requires acc(sl.Bytes(ub, 0, len(ub)), R50) +requires acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R50) +requires acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) +requires p.path === p.scionLayer.GetScionPath(ub) +requires slayers.ValidPktMetaHdr(ub) +requires start == p.scionLayer.PathScionStartIdx(ub) +requires end == p.scionLayer.PathScionEndIdx(ub) +requires p.scionLayer.EqAbsHeader(ub) +ensures acc(sl.Bytes(ub, 0, len(ub)), R50) +ensures acc(p.scionLayer.Mem(ub), R55) +ensures acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R50) +ensures acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) +ensures slayers.ValidPktMetaHdr(ub) +ensures p.scionLayer.EqAbsHeader(ub) +ensures start == p.scionLayer.PathScionStartIdx(ub) +ensures end == p.scionLayer.PathScionEndIdx(ub) +ensures scion.validPktMetaHdr(ub[start:end]) +ensures p.path.GetBase(ub[start:end]).EqAbsHeader(ub[start:end]) +ensures p.scionLayer.ValidHeaderOffset(ub, len(ub)) +ensures absPkt(ub) == p.path.absPkt(ub[start:end]) +decreases +func (p* scionPacketProcessor) AbsPktToSubSliceAbsPkt(ub []byte, start int, end int) { + unfold acc(sl.Bytes(ub, 0, len(ub)), R56) + unfold acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R56) + reveal slayers.ValidPktMetaHdr(ub) + reveal p.scionLayer.EqAbsHeader(ub) + assert reveal scion.validPktMetaHdr(ub[start:end]) + unfold acc(p.scionLayer.Mem(ub), R56) + reveal p.scionLayer.ValidHeaderOffset(ub, len(ub)) + assert p.path.GetBase(ub[start:end]).EqAbsHeader(ub[start:end]) + fold acc(p.scionLayer.Mem(ub), R56) + assert start == slayers.GetAddressOffset(ub) + + hdr1 := binary.BigEndian.Uint32(ub[start:start+scion.MetaLen]) + hdr2 := binary.BigEndian.Uint32(ub[start:end][:scion.MetaLen]) + assert hdr1 == hdr2 + hdr := hdr1 + fold acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R56) + fold acc(sl.Bytes(ub, 0, len(ub)), R56) + headerOffsetWithMetaLen := start + scion.MetaLen + metaHdr := scion.DecodedFrom(hdr) + currInfIdx := int(metaHdr.CurrINF) + currHfIdx := int(metaHdr.CurrHF) + seg1Len := int(metaHdr.SegLen[0]) + seg2Len := int(metaHdr.SegLen[1]) + seg3Len := int(metaHdr.SegLen[2]) + segs := io.CombineSegLens(seg1Len, seg2Len, seg3Len) + segLen := segs.LengthOfCurrSeg(currHfIdx) + prevSegLen := segs.LengthOfPrevSeg(currHfIdx) + numINF := segs.NumInfoFields() + offset := scion.HopFieldOffset(numINF, prevSegLen, headerOffsetWithMetaLen) + + scion.WidenCurrSeg(ub, offset, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen, start, end) + scion.WidenLeftSeg(ub, currInfIdx + 1, segs, headerOffsetWithMetaLen, start, end) + scion.WidenMidSeg(ub, currInfIdx + 2, segs, headerOffsetWithMetaLen, start, end) + scion.WidenRightSeg(ub, currInfIdx - 1, segs, headerOffsetWithMetaLen, start, end) + assert reveal absPkt(ub) == reveal p.path.absPkt(ub[start:end]) +} + +ghost +requires 0 <= start && start <= end && end <= len(ub) +requires acc(p.scionLayer.Mem(ub), R55) +requires acc(sl.Bytes(ub, 0, len(ub)), R50) +requires acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R50) +requires acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) +requires p.path === p.scionLayer.GetScionPath(ub) +requires scion.validPktMetaHdr(ub[start:end]) +requires start == p.scionLayer.PathScionStartIdx(ub) +requires end == p.scionLayer.PathScionEndIdx(ub) +requires p.path.GetBase(ub[start:end]).EqAbsHeader(ub[start:end]) +requires p.scionLayer.ValidHeaderOffset(ub, len(ub)) +ensures acc(sl.Bytes(ub, 0, len(ub)), R50) +ensures acc(p.scionLayer.Mem(ub), R55) +ensures acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R50) +ensures acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) +ensures slayers.ValidPktMetaHdr(ub) +ensures start == p.scionLayer.PathScionStartIdx(ub) +ensures end == p.scionLayer.PathScionEndIdx(ub) +ensures scion.validPktMetaHdr(ub[start:end]) +ensures p.scionLayer.EqAbsHeader(ub) +ensures absPkt(ub) == p.path.absPkt(ub[start:end]) +decreases +func (p* scionPacketProcessor) SubSliceAbsPktToAbsPkt(ub []byte, start int, end int){ + unfold acc(sl.Bytes(ub, 0, len(ub)), R56) + unfold acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R56) + unfold acc(p.scionLayer.Mem(ub), R56) + unfold acc(p.scionLayer.Path.Mem(ub[start:end]), R56) + reveal p.scionLayer.ValidHeaderOffset(ub, len(ub)) + assert reveal p.scionLayer.EqAbsHeader(ub) + fold acc(p.scionLayer.Path.Mem(ub[start:end]), R56) + fold acc(p.scionLayer.Mem(ub), R56) + reveal scion.validPktMetaHdr(ub[start:end]) + assert reveal slayers.ValidPktMetaHdr(ub) + assert start == slayers.GetAddressOffset(ub) + headerOffsetWithMetaLen := start + scion.MetaLen + hdr1 := binary.BigEndian.Uint32(ub[start:start+scion.MetaLen]) + hdr2 := binary.BigEndian.Uint32(ub[start:end][:scion.MetaLen]) + assert hdr1 == hdr2 + hdr := hdr1 + fold acc(sl.Bytes(ub[start:end], 0, len(ub[start:end])), R56) + fold acc(sl.Bytes(ub, 0, len(ub)), R56) + + metaHdr := scion.DecodedFrom(hdr) + currInfIdx := int(metaHdr.CurrINF) + currHfIdx := int(metaHdr.CurrHF) + seg1Len := int(metaHdr.SegLen[0]) + seg2Len := int(metaHdr.SegLen[1]) + seg3Len := int(metaHdr.SegLen[2]) + segs := io.CombineSegLens(seg1Len, seg2Len, seg3Len) + segLen := segs.LengthOfCurrSeg(currHfIdx) + prevSegLen := segs.LengthOfPrevSeg(currHfIdx) + numINF := segs.NumInfoFields() + offset := scion.HopFieldOffset(numINF, prevSegLen, headerOffsetWithMetaLen) + + scion.WidenCurrSeg(ub, offset, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen, start, end) + scion.WidenLeftSeg(ub, currInfIdx + 1, segs, headerOffsetWithMetaLen, start, end) + scion.WidenMidSeg(ub, currInfIdx + 2, segs, headerOffsetWithMetaLen, start, end) + scion.WidenRightSeg(ub, currInfIdx - 1, segs, headerOffsetWithMetaLen, start, end) + assert reveal absPkt(ub) == reveal p.path.absPkt(ub[start:end]) +} + +ghost +opaque +requires acc(&p.hopField, R55) +requires pkt.PathNotFullyTraversed() +decreases +pure func (p* scionPacketProcessor) EqAbsHopField(pkt io.IO_pkt2) bool { + return let absHop := p.hopField.ToIO_HF() in + let currHF := pkt.CurrSeg.Future[0] in + absHop == currHF +} + +ghost +opaque +requires acc(&p.infoField, R55) +decreases +pure func (p* scionPacketProcessor) EqAbsInfoField(pkt io.IO_pkt2) bool { + return let absInf := p.infoField.ToAbsInfoField() in + let currseg := pkt.CurrSeg in + absInf.AInfo == currseg.AInfo && + absInf.UInfo == currseg.UInfo && + absInf.ConsDir == currseg.ConsDir && + absInf.Peer == currseg.Peer +} \ No newline at end of file diff --git a/router/io-spec.gobra b/router/io-spec.gobra index 6de64f1bb..6aeed7678 100644 --- a/router/io-spec.gobra +++ b/router/io-spec.gobra @@ -17,584 +17,81 @@ package router import ( - sl "github.com/scionproto/scion/verification/utils/slices" - "github.com/scionproto/scion/verification/io" - "github.com/scionproto/scion/verification/dependencies/encoding/binary" + "github.com/scionproto/scion/pkg/slayers" "github.com/scionproto/scion/pkg/slayers/path" "github.com/scionproto/scion/pkg/slayers/path/scion" "github.com/scionproto/scion/private/topology" -) - -ghost -decreases -pure func numInfoFields(seg1Len int, seg2Len int, seg3Len int) int { - return seg3Len > 0 ? 3 : (seg2Len > 0 ? 2 : 1) -} - -ghost -decreases -pure func hopFieldOffset(numINF int, currHF int) int { - return path.InfoFieldOffset(numINF) + path.HopLen * currHF -} - -ghost -decreases -pure func pktLen(seg1Len int, seg2Len int, seg3Len int) int { - return hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) + - path.HopLen * (seg1Len + seg2Len + seg3Len) -} - - -ghost -decreases -pure func lengthOfCurrSeg(currHF int, seg1Len int, seg2Len int, seg3Len int) int { - return seg1Len > currHF ? seg1Len : ((seg1Len + seg2Len) > currHF ? seg2Len : seg3Len) -} - -ghost -requires 0 <= currHF -ensures res <= currHF -decreases -pure func lengthOfPrevSeg(currHF int, seg1Len int, seg2Len int, seg3Len int) (res int) { - return seg1Len > currHF ? 0 : ((seg1Len + seg2Len) > currHF ? seg1Len : seg1Len + seg2Len) -} - -// returns the ASid of a hopfield -ghost -requires 1 <= numINF -requires 0 <= currHFIdx -requires hopFieldOffset(numINF, currHFIdx) + path.HopLen <= len(raw) -requires dp.Valid() -requires let idx := hopFieldOffset(numINF, currHFIdx) in - acc(&raw[idx+2], _) && acc(&raw[idx+3], _) && acc(&raw[idx+4], _) && acc(&raw[idx+5], _) -decreases -pure func asidFromIfs( - dp io.DataPlaneSpec, - raw []byte, - numINF int, - currHFIdx int, - consDir bool, - asid io.IO_as) (res option[io.IO_as]) { - return let idx := hopFieldOffset(numINF, currHFIdx) in - let ifs := consDir ? binary.BigEndian.Uint16(raw[idx+4:idx+6]) : binary.BigEndian.Uint16(raw[idx+2:idx+4]) in - let asIfPair := io.AsIfsPair{asid, io.IO_ifs(ifs)} in - (asIfPair in domain(dp.GetLinks()) ? - some(dp.Lookup(asIfPair).asid) : none[io.IO_as]) -} - -// returns a list of ASids of hopfields that are before the current hopfield in a segment -ghost -requires 1 <= numINF -requires 0 <= prevSegLen && prevSegLen <= currHFIdx -requires hopFieldOffset(numINF, currHFIdx) + path.HopLen <= len(raw) -requires dp.Valid() -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -ensures res != none[seq[io.IO_as]] ==> len(get(res)) == currHFIdx - prevSegLen + 1 -decreases currHFIdx - prevSegLen -pure func asidsBefore( - dp io.DataPlaneSpec, - raw []byte, - numINF int, - currHFIdx int, - prevSegLen int, - consDir bool, - asid io.IO_as) (res option[seq[io.IO_as]]) { - return let next_asid := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in asidFromIfs(dp, raw, numINF, currHFIdx, !consDir, asid)) in - match next_asid{ - case none[io.IO_as]: - none[seq[io.IO_as]] - default: - currHFIdx == prevSegLen ? some(seq[io.IO_as]{get(next_asid)}) : - let next_asid_seq := asidsBefore(dp, raw, numINF, currHFIdx-1, prevSegLen, consDir, get(next_asid)) in - match next_asid_seq{ - case none[seq[io.IO_as]]: - none[seq[io.IO_as]] - default: - some(get(next_asid_seq) ++ seq[io.IO_as]{get(next_asid)}) - } - } -} - -// returns a list of ASids of hopfields that are after the current hopfield in a segment -ghost -requires 1 <= numINF -requires 0 <= currHFIdx && currHFIdx < segLen -requires hopFieldOffset(numINF, segLen) <= len(raw) -requires dp.Valid() -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -ensures res != none[seq[io.IO_as]] ==> len(get(res)) == segLen - currHFIdx -decreases segLen - currHFIdx + 1 -pure func asidsAfter( - dp io.DataPlaneSpec, - raw []byte, - numINF int, - currHFIdx int, - segLen int, - consDir bool, - asid io.IO_as) (res option[seq[io.IO_as]]) { - return let next_asid := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in asidFromIfs(dp, raw, numINF, currHFIdx, consDir, asid)) in - match next_asid{ - case none[io.IO_as]: - none[seq[io.IO_as]] - default: - currHFIdx == segLen - 1 ? some(seq[io.IO_as]{get(next_asid)}) : - let next_asid_seq := asidsAfter(dp, raw, numINF, currHFIdx+1, segLen, consDir, get(next_asid)) in - match next_asid_seq{ - case none[seq[io.IO_as]]: - none[seq[io.IO_as]] - default: - some(seq[io.IO_as]{get(next_asid)} ++ get(next_asid_seq)) - } - } -} - -// returns a list of ASids of hopfields for CurrSeg in the abstract packet -ghost -requires 1 <= numINF -requires 0 <= prevSegLen && prevSegLen <= currHFIdx -requires currHFIdx < segLen -requires hopFieldOffset(numINF, segLen) <= len(raw) -requires dp.Valid() -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -ensures res != none[seq[io.IO_as]] ==> len(get(res)) == segLen - prevSegLen -decreases -pure func asidForCurrSeg( - dp io.DataPlaneSpec, - raw []byte, - numINF int, - currHFIdx int, - segLen int, - prevSegLen int, - consDir bool, - asid io.IO_as) (res option[seq[io.IO_as]]) { - return segLen == 0 ? some(seq[io.IO_as]{}) : - let left := asidsBefore(dp, raw, numINF, currHFIdx, prevSegLen, consDir, asid) in - let right := asidsAfter(dp, raw, numINF, currHFIdx, segLen, consDir, asid) in - (left == none[seq[io.IO_as]] || right == none[seq[io.IO_as]]) ? - none[seq[io.IO_as]] : - some(get(left) ++ get(right)[1:]) -} - -// returns a list of ASids of hopfields for LeftSeg in the abstract packet -ghost -requires dp.Valid() -requires 1 <= numINF -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires hopFieldOffset(numINF, seg1Len + seg2Len + seg3Len) <= len(raw) -requires currINFIdx <= numINF + 1 -requires 1 <= currINFIdx && currINFIdx < 4 -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -decreases -pure func asidsForLeftSeg(dp io.DataPlaneSpec, raw []byte, numINF int, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid io.IO_as) (res option[seq[io.IO_as]]) { - return let consDir := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in path.ConsDir(raw, currINFIdx) in - (currINFIdx == 1 && seg2Len > 0) ? - asidForCurrSeg(dp, raw, numINF, seg1Len, seg1Len+seg2Len, seg1Len, consDir, asid) : - (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ? - asidForCurrSeg(dp, raw, numINF, seg1Len+seg2Len, seg1Len+seg2Len+seg3Len, seg1Len+seg2Len, consDir, asid) : - some(seq[io.IO_as]{}) -} - -// returns a list of ASids of hopfields for RightSeg in the abstract packet -ghost -requires dp.Valid() -requires 1 <= numINF -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires hopFieldOffset(numINF, seg1Len + seg2Len + seg3Len) <= len(raw) -requires currINFIdx <= numINF + 1 -requires -1 <= currINFIdx && currINFIdx < 2 -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -ensures (currINFIdx == 0 && res != none[seq[io.IO_as]]) ==> len(get(res)) == seg1Len -ensures (currINFIdx == 1 && seg2Len > 0 && res != none[seq[io.IO_as]]) ==> len(get(res)) == seg2Len -decreases -pure func asidsForRightSeg(dp io.DataPlaneSpec, raw []byte, numINF int, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid io.IO_as) (res option[seq[io.IO_as]]) { - return (currINFIdx == 1 && seg2Len > 0) ? - let consDir := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in path.ConsDir(raw, currINFIdx) in - asidForCurrSeg(dp, raw, numINF, seg1Len+seg2Len-1, seg1Len+seg2Len, seg1Len, consDir, asid) : - (currINFIdx == 0) ? - let consDir := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in path.ConsDir(raw, currINFIdx) in - asidForCurrSeg(dp, raw, numINF, seg1Len-1, seg1Len, 0, consDir, asid) : - some(seq[io.IO_as]{}) -} - -// returns a list of ASids of hopfields for MidSeg in the abstract packet -ghost -requires dp.Valid() -requires 1 <= numINF -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires hopFieldOffset(numINF, seg1Len + seg2Len + seg3Len) <= len(raw) -requires currINFIdx <= numINF + 1 -requires 2 <= currINFIdx && currINFIdx < 5 -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -requires (currINFIdx == 4 && seg2Len > 0) ==> asid != none[io.IO_as] -requires (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ==> asid != none[io.IO_as] -decreases -pure func asidsForMidSeg(dp io.DataPlaneSpec, raw []byte, numINF int, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid option[io.IO_as]) (res option[seq[io.IO_as]]) { - return (currINFIdx == 4 && seg2Len > 0) ? - let consDir := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in path.ConsDir(raw, 1) in - asidForCurrSeg(dp, raw, numINF, seg1Len-1, seg1Len, 0, consDir, get(asid)) : - (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ? - let consDir := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in path.ConsDir(raw, 2) in - asidForCurrSeg(dp, raw, numINF, seg1Len + seg2Len, seg1Len + seg2Len + seg3Len, seg1Len + seg2Len, consDir, get(asid)) : - some(seq[io.IO_as]{}) -} - -ghost -requires idx + path.HopLen <= len(raw) -requires 0 <= idx -requires acc(&raw[idx+2], _) && acc(&raw[idx+3], _) && acc(&raw[idx+4], _) && acc(&raw[idx+5], _) -ensures len(res.HVF.MsgTerm_Hash_.MsgTerm_MPair_2.MsgTerm_L_) > 0 -decreases -pure func hopField(raw []byte, idx int, beta set[io.IO_msgterm], asid io.IO_as, ainfo io.IO_ainfo) (res io.IO_HF) { - return let inif2 := binary.BigEndian.Uint16(raw[idx+2:idx+4]) in - let egif2 := binary.BigEndian.Uint16(raw[idx+4:idx+6]) in - let op_inif2 := inif2 == 0 ? none[io.IO_ifs] : some(io.IO_ifs(inif2)) in - let op_egif2 := egif2 == 0 ? none[io.IO_ifs] : some(io.IO_ifs(egif2)) in - let ts := io.IO_msgterm(io.MsgTerm_Num{ainfo}) in - let l := io.IO_msgterm(io.MsgTerm_L{seq[io.IO_msgterm]{ts, io.if2term(op_inif2), io.if2term(op_egif2), - io.IO_msgterm(io.MsgTerm_FS{beta})}}) in - let hvf := io.mac(io.macKey(io.asidToKey(asid)), l) in - io.IO_HF(io.IO_HF_{ - InIF2 : op_inif2, - EgIF2 : op_egif2, - HVF : hvf, - }) -} - -ghost -requires 0 <= offset -requires 0 <= currHFIdx && currHFIdx <= len(asid) -requires offset + path.HopLen * len(asid) <= len(raw) -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -ensures len(res) == len(asid) - currHFIdx -ensures forall k int :: {res[k]} 0 <= k && k < len(res) ==> - len(res[k].HVF.MsgTerm_Hash_.MsgTerm_MPair_2.MsgTerm_L_) > 0 -decreases len(asid) - currHFIdx -pure func hopFieldsConsDir( - raw []byte, - offset int, - currHFIdx int, - beta set[io.IO_msgterm], - asid seq[io.IO_as], - ainfo io.IO_ainfo) (res seq[io.IO_HF]) { - return currHFIdx == len(asid) ? seq[io.IO_HF]{} : - let hf := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in - hopField(raw, offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo)) in - seq[io.IO_HF]{hf} ++ hopFieldsConsDir(raw, offset, currHFIdx + 1, (beta union set[io.IO_msgterm]{hf.HVF}), asid, ainfo) -} - -ghost -requires 0 <= offset -requires -1 <= currHFIdx && currHFIdx < len(asid) -requires offset + path.HopLen * currHFIdx + path.HopLen <= len(raw) -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -ensures len(res) == currHFIdx + 1 -ensures forall k int :: {res[k]} 0 <= k && k < len(res) ==> - len(res[k].HVF.MsgTerm_Hash_.MsgTerm_MPair_2.MsgTerm_L_) > 0 -decreases currHFIdx + 1 -pure func hopFieldsNotConsDir( - raw []byte, - offset int, - currHFIdx int, - beta set[io.IO_msgterm], - asid seq[io.IO_as], - ainfo io.IO_ainfo) (res seq[io.IO_HF]) { - return currHFIdx == -1 ? seq[io.IO_HF]{} : - let hf := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in - hopField(raw, offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo)) in - hopFieldsNotConsDir(raw, offset, currHFIdx -1, (beta union set[io.IO_msgterm]{hf.HVF}), asid, ainfo) ++ seq[io.IO_HF]{hf} -} - -ghost -requires -1 <= currHFIdx && currHFIdx < len(hopfields) -decreases currHFIdx + 1 -pure func segPast(hopfields seq[io.IO_HF], currHFIdx int) seq[io.IO_HF] { - return currHFIdx == -1 ? - seq[io.IO_HF]{} : - seq[io.IO_HF]{hopfields[currHFIdx]} ++ segPast(hopfields, currHFIdx - 1) -} - -ghost -requires 0 <= currHFIdx && currHFIdx <= len(hopfields) -decreases len(hopfields) - currHFIdx -pure func segFuture(hopfields seq[io.IO_HF], currHFIdx int) seq[io.IO_HF] { - return currHFIdx == len(hopfields) ? seq[io.IO_HF]{} : - seq[io.IO_HF]{hopfields[currHFIdx]} ++ segFuture(hopfields, currHFIdx + 1) -} - -ghost -requires -1 <= currHFIdx && currHFIdx < len(hopfields) -decreases currHFIdx + 1 -pure func segHistory(hopfields seq[io.IO_HF], currHFIdx int) seq[io.IO_ahi] { - return currHFIdx == -1 ? seq[io.IO_ahi]{} : - seq[io.IO_ahi]{hopfields[currHFIdx].Toab()} ++ segHistory(hopfields, currHFIdx - 1) -} - -ghost -requires 0 <= offset -requires 0 < len(asid) -requires offset + path.HopLen * len(asid) <= len(raw) -requires 0 <= currHFIdx && currHFIdx <= len(asid) -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -decreases -pure func segment(raw []byte, - offset int, - currHFIdx int, - asid seq[io.IO_as], - ainfo io.IO_ainfo, - consDir bool, - peer bool) io.IO_seg2 { - return let hopfields := consDir ? - hopFieldsConsDir(raw, offset, 0, set[io.IO_msgterm]{}, asid, ainfo) : - hopFieldsNotConsDir(raw, offset, len(asid) - 1, set[io.IO_msgterm]{}, asid, ainfo) in - let uinfo := uInfo(hopfields, currHFIdx, consDir) in - io.IO_seg2(io.IO_seg3_{ - AInfo :ainfo, - UInfo : uinfo, - ConsDir : consDir, - Peer : peer, - Past : segPast(hopfields, currHFIdx - 1), - Future : segFuture(hopfields, currHFIdx), - History : segHistory(hopfields, currHFIdx - 1), - }) -} - -ghost -requires path.InfoFieldOffset(currINFIdx) + path.InfoLen <= offset -requires 0 < len(asid) -requires offset + path.HopLen * len(asid) <= len(raw) -requires 0 <= currHFIdx && currHFIdx <= len(asid) -requires 0 <= currINFIdx && currINFIdx < 3 -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -decreases -pure func currSeg(raw []byte, offset int, currINFIdx int, currHFIdx int, asid seq[io.IO_as]) io.IO_seg3 { - return unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in - let ainfo := timestamp(raw, currINFIdx) in - let consDir := path.ConsDir(raw, currINFIdx) in - let peer := path.Peer(raw, currINFIdx) in - segment(raw, offset, currHFIdx, asid, ainfo, consDir, peer) -} - -ghost -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires pktLen(seg1Len, seg2Len, seg3Len) <= len(raw) -requires 1 <= currINFIdx && currINFIdx < 4 -requires (currINFIdx == 1 && seg2Len > 0) ==> len(asid) == seg2Len -requires (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ==> len(asid) == seg3Len -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -decreases -pure func leftSeg( - raw []byte, - currINFIdx int, - seg1Len int, - seg2Len int, - seg3Len int, - asid seq[io.IO_as]) option[io.IO_seg3] { - return let offset := hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) in - (currINFIdx == 1 && seg2Len > 0) ? - some(currSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, 0, asid)) : - ((currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ? - some(currSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid)) : - none[io.IO_seg3]) -} - -ghost -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires pktLen(seg1Len, seg2Len, seg3Len) <= len(raw) -requires -1 <= currINFIdx && currINFIdx < 2 -requires (currINFIdx == 1 && seg2Len > 0 && seg3Len > 0) ==> len(asid) == seg2Len -requires (currINFIdx == 0 && seg2Len > 0) ==> len(asid) == seg1Len -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -decreases -pure func rightSeg( - raw []byte, - currINFIdx int, - seg1Len int, - seg2Len int, - seg3Len int, - asid seq[io.IO_as]) option[io.IO_seg3] { - return let offset := hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) in - (currINFIdx == 1 && seg2Len > 0 && seg3Len > 0) ? - some(currSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, seg2Len, asid)) : - (currINFIdx == 0 && seg2Len > 0) ? - some(currSeg(raw, offset, currINFIdx, seg1Len, asid)) : - none[io.IO_seg3] -} + "github.com/scionproto/scion/pkg/addr" + "golang.org/x/net/ipv4" -ghost -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires pktLen(seg1Len, seg2Len, seg3Len) <= len(raw) -requires 2 <= currINFIdx && currINFIdx < 5 -requires (currINFIdx == 4 && seg2Len > 0) ==> len(asid) == seg1Len -requires (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ==> len(asid) == seg3Len -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -decreases -pure func midSeg( - raw []byte, - currINFIdx int, - seg1Len int, - seg2Len int, - seg3Len int, - asid seq[io.IO_as]) option[io.IO_seg3] { - return let offset := hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) in - (currINFIdx == 4 && seg2Len > 0) ? - some(currSeg(raw, offset, 0, seg1Len, asid)) : - ((currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ? - some(currSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid)) : - none[io.IO_seg3]) -} + "verification/dependencies/encoding/binary" + "verification/io" + sl "verification/utils/slices" + . "verification/utils/definitions" +) ghost -requires dp.Valid() -requires len(raw) > 4 -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -requires unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in - let hdr := binary.BigEndian.Uint32(raw[0:4]) in - let metaHdr := scion.DecodedFrom(hdr) in - let seg1 := int(metaHdr.SegLen[0]) in - let seg2 := int(metaHdr.SegLen[1]) in - let seg3 := int(metaHdr.SegLen[2]) in - let base := scion.Base{metaHdr, - numInfoFields(seg1, seg2, seg3), - seg1 + seg2 + seg3} in - metaHdr.InBounds() && - 0 < metaHdr.SegLen[0] && - base.ValidCurrInfSpec() && - base.ValidCurrHfSpec() && - len(raw) >= pktLen(seg1, seg2, seg3) -decreases -pure func absPkt(dp io.DataPlaneSpec, raw []byte, asid io.IO_as) option[io.IO_pkt2] { - return let hdr := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in binary.BigEndian.Uint32(raw[0:4]) in +opaque +requires acc(sl.Bytes(raw, 0, len(raw)), R56) +requires slayers.ValidPktMetaHdr(raw) +decreases +pure func absPkt(raw []byte) (res io.IO_pkt2) { + return let _ := reveal slayers.ValidPktMetaHdr(raw) in + let headerOffset := slayers.GetAddressOffset(raw) in + let headerOffsetWithMetaLen := headerOffset + scion.MetaLen in + let hdr := (unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in + binary.BigEndian.Uint32(raw[headerOffset:headerOffset+scion.MetaLen])) in let metaHdr := scion.DecodedFrom(hdr) in - let currINFIdx := int(metaHdr.CurrINF) in - let currHFIdx := int(metaHdr.CurrHF) in + let currInfIdx := int(metaHdr.CurrINF) in + let currHfIdx := int(metaHdr.CurrHF) in let seg1Len := int(metaHdr.SegLen[0]) in let seg2Len := int(metaHdr.SegLen[1]) in let seg3Len := int(metaHdr.SegLen[2]) in - let segLen := lengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) in - let prevSegLen := lengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) in - let numINF := numInfoFields(seg1Len, seg2Len, seg3Len) in - let offset := hopFieldOffset(numINF, 0) in - let consDir := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in path.ConsDir(raw, currINFIdx) in - let currAsidSeq := asidForCurrSeg(dp, raw, numINF, currHFIdx, prevSegLen+segLen, prevSegLen, consDir, dp.Asid()) in - currAsidSeq == none[seq[io.IO_as]] ? none[io.IO_pkt2] : - let last := get(currAsidSeq)[segLen-1] in - let first := get(currAsidSeq)[0] in - let leftAsidSeq := asidsForLeftSeg(dp, raw, numINF, currINFIdx + 1, seg1Len, seg2Len, seg3Len, last) in - let rightAsidSeq := asidsForRightSeg(dp, raw, numINF, currINFIdx - 1, seg1Len, seg2Len, seg3Len, first) in - (leftAsidSeq == none[seq[io.IO_as]] || rightAsidSeq == none[seq[io.IO_as]]) ? none[io.IO_pkt2] : - let midAsid := ((currINFIdx == 0 && seg2Len > 0 && seg3Len > 0) ? some(get(leftAsidSeq)[len(get(leftAsidSeq))-1]) : - (currINFIdx == 2 && seg2Len > 0) ? some(get(rightAsidSeq)[0]) : none[io.IO_as]) in - let midAsidSeq := asidsForMidSeg(dp, raw, numINF, currINFIdx + 2, seg1Len, seg2Len, seg3Len, midAsid) in - midAsidSeq == none[seq[io.IO_as]] ? none[io.IO_pkt2] : - some(io.IO_pkt2(io.IO_Packet2{ - CurrSeg : currSeg(raw, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, get(currAsidSeq)), - LeftSeg : leftSeg(raw, currINFIdx + 1, seg1Len, seg2Len , seg3Len, get(leftAsidSeq)), - MidSeg : midSeg(raw, currINFIdx + 2, seg1Len, seg2Len , seg3Len, get(midAsidSeq)), - RightSeg : rightSeg(raw, currINFIdx - 1, seg1Len, seg2Len , seg3Len, get(rightAsidSeq)), - })) -} - - -ghost -requires 0 <= offset -requires path.InfoFieldOffset(offset) + 8 < len(raw) -requires acc(&raw[path.InfoFieldOffset(offset) + 4], _) -requires acc(&raw[path.InfoFieldOffset(offset) + 5], _) -requires acc(&raw[path.InfoFieldOffset(offset) + 6], _) -requires acc(&raw[path.InfoFieldOffset(offset) + 7], _) -decreases -pure func timestamp(raw []byte, offset int) io.IO_ainfo { - return let idx := path.InfoFieldOffset(offset) + 4 in - io.IO_ainfo(binary.BigEndian.Uint32(raw[idx : idx + 4])) -} - -ghost -requires len(hopfield.HVF.MsgTerm_Hash_.MsgTerm_MPair_2.MsgTerm_L_) > 0 -decreases -pure func hvfSet(hopfield io.IO_HF) set[io.IO_msgterm] { - return let l := hopfield.HVF.MsgTerm_Hash_.MsgTerm_MPair_2.MsgTerm_L_ in - l[len(l) - 1].MsgTerm_FS_ -} - -ghost -requires 0 < len(hopfields) -requires 0 <= currHFIdx && currHFIdx <= len(hopfields) -requires forall idx int :: {hopfields[idx]} 0 <= idx && idx < len(hopfields) ==> - len(hopfields[idx].HVF.MsgTerm_Hash_.MsgTerm_MPair_2.MsgTerm_L_) > 0 -decreases -pure func uInfo(hopfields seq[io.IO_HF], currHFIdx int, consDir bool) set[io.IO_msgterm] { - return currHFIdx == len(hopfields) ? - hvfSet(hopfields[currHFIdx-1]) : - (currHFIdx == 0 ? - hvfSet(hopfields[currHFIdx]) : - (consDir ? - hvfSet(hopfields[currHFIdx]) : - hvfSet(hopfields[currHFIdx-1]))) -} - -ghost -decreases -pure func ifsToIO_ifs(ifs uint16) option[io.IO_ifs]{ - return ifs == 0 ? none[io.IO_ifs] : some(io.IO_ifs(ifs)) -} - -ghost -opaque -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -decreases -pure func validPktMetaHdr(raw []byte) bool { - return len(raw) > 4 && - unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in - let hdr := binary.BigEndian.Uint32(raw[0:4]) in - let metaHdr := scion.DecodedFrom(hdr) in - let seg1 := int(metaHdr.SegLen[0]) in - let seg2 := int(metaHdr.SegLen[1]) in - let seg3 := int(metaHdr.SegLen[2]) in - let base := scion.Base{metaHdr, - numInfoFields(seg1, seg2, seg3), - seg1 + seg2 + seg3} in - metaHdr.InBounds() && - 0 < metaHdr.SegLen[0] && - base.ValidCurrInfSpec() && - base.ValidCurrHfSpec() && - len(raw) >= pktLen(seg1, seg2, seg3) + let segs := io.CombineSegLens(seg1Len, seg2Len, seg3Len) in + let segLen := segs.LengthOfCurrSeg(currHfIdx) in + let prevSegLen := segs.LengthOfPrevSeg(currHfIdx) in + let numINF := segs.NumInfoFields() in + let offset := scion.HopFieldOffset(numINF, prevSegLen, headerOffsetWithMetaLen) in + io.IO_Packet2 { + CurrSeg : scion.CurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen), + LeftSeg : scion.LeftSeg(raw, currInfIdx + 1, segs, headerOffsetWithMetaLen), + MidSeg : scion.MidSeg(raw, currInfIdx + 2, segs, headerOffsetWithMetaLen), + RightSeg : scion.RightSeg(raw, currInfIdx - 1, segs, headerOffsetWithMetaLen), + } } ghost -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) +requires acc(sl.Bytes(raw, 0, len(raw)), R56) ensures val.isIO_val_Unsupported -ensures val.IO_val_Unsupported_1 == ifsToIO_ifs(ingressID) +ensures val.IO_val_Unsupported_1 == path.ifsToIO_ifs(ingressID) decreases pure func absIO_val_Unsupported(raw []byte, ingressID uint16) (val io.IO_val) { return io.IO_val(io.IO_val_Unsupported{ - ifsToIO_ifs(ingressID), - io.Unit(io.Unit_{}), + path.ifsToIO_ifs(ingressID), + io.Unit{}, }) } ghost -requires dp.Valid() -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) +opaque +requires acc(sl.Bytes(raw, 0, len(raw)), R56) ensures val.isIO_val_Pkt2 || val.isIO_val_Unsupported decreases -pure func absIO_val(dp io.DataPlaneSpec, raw []byte, ingressID uint16) (val io.IO_val) { - return (reveal validPktMetaHdr(raw) && absPkt(dp, raw, dp.asid()) != none[io.IO_pkt2]) ? - io.IO_val(io.IO_val_Pkt2{ifsToIO_ifs(ingressID), get(absPkt(dp, raw, dp.asid()))}) : +pure func absIO_val(raw []byte, ingressID uint16) (val io.IO_val) { + return (reveal slayers.ValidPktMetaHdr(raw) && slayers.IsSupportedPkt(raw)) ? + io.IO_val(io.IO_val_Pkt2{path.ifsToIO_ifs(ingressID), absPkt(raw)}) : absIO_val_Unsupported(raw, ingressID) } +ghost +requires respr.OutPkt != nil ==> + acc(sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)), R56) +decreases +pure func absReturnErr(respr processResult) (val io.IO_val) { + return respr.OutPkt == nil ? io.IO_val_Unit{} : + absIO_val(respr.OutPkt, respr.EgressID) +} + ghost requires acc(&d.localIA, _) decreases @@ -614,10 +111,10 @@ pure func (d *DataPlane) dpSpecWellConfiguredNeighborIAs(dp io.DataPlaneSpec) bo ghost decreases pure func absLinktype(link topology.LinkType) io.IO_Link { - return link == topology.Core ? io.IO_Link(io.IO_PeerOrCore{}) : + return link == topology.Core ? io.IO_Link(io.IO_Core{}) : link == topology.Parent ? io.IO_Link(io.IO_CustProv{}) : link == topology.Child ? io.IO_Link(io.IO_ProvCust{}) : - link == topology.Peer ? io.IO_Link(io.IO_PeerOrCore{}) : + link == topology.Peer ? io.IO_Link(io.IO_Core{}) : io.IO_Link(io.IO_NoLink{}) } @@ -640,3 +137,60 @@ pure func (d *DataPlane) DpAgreesWithSpec(dp io.DataPlaneSpec) bool { d.dpSpecWellConfiguredNeighborIAs(dp) && d.dpSpecWellConfiguredLinkTypes(dp) } + +ghost +requires acc(d.Mem(), _) +requires d.DpAgreesWithSpec(dp) +requires d.WellConfigured() +ensures acc(&d.linkTypes, _) +ensures d.linkTypes != nil ==> acc(d.linkTypes, _) && !(0 in domain(d.linkTypes)) +ensures d.dpSpecWellConfiguredLinkTypes(dp) +decreases +func (d *DataPlane) LinkTypesLemma(dp io.DataPlaneSpec) { + reveal d.WellConfigured() + reveal d.DpAgreesWithSpec(dp) + assert !(0 in d.getDomLinkTypes()) + unfold acc(d.Mem(), _) + assert !(0 in domain(d.linkTypes)) +} + +ghost +requires acc(d.Mem(), _) +requires d.DpAgreesWithSpec(dp) +requires d.WellConfigured() +requires egressID in d.getDomExternal() +ensures egressID != 0 +ensures io.IO_ifs(egressID) in domain(dp.GetNeighborIAs()) +decreases +func (d *DataPlane) EgressIDNotZeroLemma(egressID uint16, dp io.DataPlaneSpec) { + reveal d.WellConfigured() + reveal d.DpAgreesWithSpec(dp) +} + +ghost +requires acc(d.Mem(), _) +requires acc(&d.external, _) +requires d.external != nil ==> acc(d.external, _) +ensures acc(d.Mem(), _) +ensures acc(&d.external, _) +ensures d.external != nil ==> acc(d.external, _) +ensures d.getDomExternal() == domain(d.external) +decreases +func (d *DataPlane) getDomExternalLemma() { + if (d.external != nil) { + assert reveal d.getDomExternal() == unfolding acc(d.Mem(), _) in + (unfolding acc(accBatchConn(d.external), _) in + domain(d.external)) + } else { + assert reveal d.getDomExternal() == + unfolding acc(d.Mem(), _) in set[uint16]{} + } +} + +ghost +requires acc(msg.Mem(), R50) +decreases +pure func MsgToAbsVal(msg *ipv4.Message, ingressID uint16) (res io.IO_val) { + return unfolding acc(msg.Mem(), R50) in + absIO_val(msg.Buffers[0], ingressID) +} diff --git a/router/svc.go b/router/svc.go index 2f84ec164..8c8fd62bc 100644 --- a/router/svc.go +++ b/router/svc.go @@ -42,6 +42,7 @@ func newServices() (s *services) { // @ preserves acc(s.Mem(), R50) // @ requires acc(a.Mem(), R10) +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (s *services) AddSvc(svc addr.HostSVC, a *net.UDPAddr) { //@ unfold acc(s.Mem(), R50) s.mtx.Lock() @@ -69,6 +70,7 @@ func (s *services) AddSvc(svc addr.HostSVC, a *net.UDPAddr) { // @ preserves acc(s.Mem(), R50) // @ preserves acc(a.Mem(), R10) +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (s *services) DelSvc(svc addr.HostSVC, a *net.UDPAddr) { //@ unfold acc(s.Mem(), R50) s.mtx.Lock() @@ -102,6 +104,7 @@ func (s *services) DelSvc(svc addr.HostSVC, a *net.UDPAddr) { // @ requires acc(s.Mem(), _) // @ ensures !b ==> r == nil // @ ensures b ==> acc(r.Mem(), _) +// @ decreases 0 if sync.IgnoreBlockingForTermination() func (s *services) Any(svc addr.HostSVC) (r *net.UDPAddr, b bool) { //@ unfold acc(s.Mem(), _) s.mtx.Lock() @@ -130,8 +133,6 @@ func (s *services) Any(svc addr.HostSVC) (r *net.UDPAddr, b bool) { // @ ensures b ==> 0 < len(addrs) // @ ensures b ==> 0 <= res && res < len(addrs) // @ ensures !b ==> res == -1 -// We could ensure stronger postconditions for this method, -// but it is unclear right now if we need them. // @ decreases func (s *services) index(a *net.UDPAddr, addrs []*net.UDPAddr /*@ , ghost k addr.HostSVC @*/) (res int, b bool) { // @ unfold acc(validMapValue(k, addrs), R11) diff --git a/router/widen-lemma.gobra b/router/widen-lemma.gobra new file mode 100644 index 000000000..ae01a1280 --- /dev/null +++ b/router/widen-lemma.gobra @@ -0,0 +1,151 @@ +// Copyright 2022 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package router + +import ( + sl "verification/utils/slices" + "verification/io" + . "verification/utils/definitions" + "verification/dependencies/encoding/binary" + "github.com/scionproto/scion/pkg/slayers" + "github.com/scionproto/scion/pkg/slayers/path" + "github.com/scionproto/scion/pkg/slayers/path/scion" +) + +// Some things in this file can be simplified. Nonetheless, the important definition here +// is absIO_valWidenLemma. Everything else can be seen as an implementation detail. +ghost +requires 0 <= length && length <= len(raw) +requires acc(sl.Bytes(raw, 0, len(raw)), R49) +requires acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R49) +ensures acc(sl.Bytes(raw, 0, len(raw)), R49) +ensures acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R49) +ensures absIO_val(raw[:length], ingressID).isIO_val_Pkt2 ==> + absIO_val(raw[:length], ingressID) == absIO_val(raw, ingressID) +decreases +func absIO_valWidenLemma(raw []byte, ingressID uint16, length int) { + var ret1 io.IO_val + var ret2 io.IO_val + + if (slayers.ValidPktMetaHdr(raw[:length]) && slayers.IsSupportedPkt(raw[:length])) { + ValidPktMetaHdrWidenLemma(raw, length) + assert slayers.ValidPktMetaHdr(raw) + IsSupportedPktWidenLemma(raw, length) + assert slayers.IsSupportedPkt(raw) + absPktWidenLemma(raw, length) + + ret1 = io.IO_val(io.IO_val_Pkt2{path.ifsToIO_ifs(ingressID), absPkt(raw)}) + ret2 = io.IO_val(io.IO_val_Pkt2{path.ifsToIO_ifs(ingressID), absPkt(raw[:length])}) + assert ret1 == reveal absIO_val(raw, ingressID) + assert ret2 == reveal absIO_val(raw[:length], ingressID) + assert ret1 == ret2 + assert absIO_val(raw[:length], ingressID).isIO_val_Pkt2 ==> + absIO_val(raw[:length], ingressID) == absIO_val(raw, ingressID) + } else { + assert !(reveal absIO_val(raw[:length], ingressID).isIO_val_Pkt2) + } +} + +ghost +requires 0 <= length && length <= len(raw) +requires acc(sl.Bytes(raw, 0, len(raw)), R51) +requires acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R51) +requires slayers.ValidPktMetaHdr(raw[:length]) +ensures acc(sl.Bytes(raw, 0, len(raw)), R51) +ensures acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures slayers.ValidPktMetaHdr(raw) +decreases +func ValidPktMetaHdrWidenLemma(raw []byte, length int) { + unfold acc(sl.Bytes(raw, 0, len(raw)), R56) + unfold acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R56) + reveal slayers.ValidPktMetaHdr(raw[:length]) + ret1 := reveal slayers.ValidPktMetaHdr(raw) + ret2 := reveal slayers.ValidPktMetaHdr(raw[:length]) + assert ret1 == ret2 + fold acc(sl.Bytes(raw, 0, len(raw)), R56) + fold acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R56) +} + +ghost +requires 0 <= length && length <= len(raw) +requires acc(sl.Bytes(raw, 0, len(raw)), R51) +requires acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R51) +requires slayers.IsSupportedPkt(raw[:length]) +ensures acc(sl.Bytes(raw, 0, len(raw)), R51) +ensures acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures slayers.IsSupportedPkt(raw) +decreases +func IsSupportedPktWidenLemma(raw []byte, length int) { + unfold acc(sl.Bytes(raw, 0, len(raw)), R56) + unfold acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R56) + reveal slayers.IsSupportedPkt(raw[:length]) + ret1 := reveal slayers.IsSupportedPkt(raw) + ret2 := reveal slayers.IsSupportedPkt(raw[:length]) + assert ret1 == ret2 + fold acc(sl.Bytes(raw, 0, len(raw)), R56) + fold acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R56) +} + +ghost +requires 0 <= length && length <= len(raw) +requires acc(sl.Bytes(raw, 0, len(raw)), R50) +requires acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R50) +requires slayers.ValidPktMetaHdr(raw) +requires slayers.ValidPktMetaHdr(raw[:length]) +ensures acc(sl.Bytes(raw, 0, len(raw)), R50) +ensures acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R50) +ensures slayers.ValidPktMetaHdr(raw) +ensures slayers.ValidPktMetaHdr(raw[:length]) +ensures absPkt(raw) == absPkt(raw[:length]) +decreases +func absPktWidenLemma(raw []byte, length int) { + + reveal slayers.ValidPktMetaHdr(raw) + reveal slayers.ValidPktMetaHdr(raw[:length]) + unfold acc(sl.Bytes(raw, 0, len(raw)), R51) + unfold acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R51) + headerOffset1 := slayers.GetAddressOffset(raw) + headerOffset2 := slayers.GetAddressOffset(raw[:length]) + assert headerOffset1 == headerOffset2 + headerOffset := headerOffset1 + headerOffsetWithMetaLen := headerOffset + scion.MetaLen + hdr1 := binary.BigEndian.Uint32(raw[headerOffset:headerOffset+scion.MetaLen]) + hdr2 := binary.BigEndian.Uint32(raw[:length][headerOffset:headerOffset+scion.MetaLen]) + assert hdr1 == hdr2 + hdr := hdr1 + fold acc(sl.Bytes(raw, 0, len(raw)), R51) + fold acc(sl.Bytes(raw[:length], 0, len(raw[:length])), R51) + + metaHdr := scion.DecodedFrom(hdr) + currInfIdx := int(metaHdr.CurrINF) + currHfIdx := int(metaHdr.CurrHF) + seg1Len := int(metaHdr.SegLen[0]) + seg2Len := int(metaHdr.SegLen[1]) + seg3Len := int(metaHdr.SegLen[2]) + segs := io.CombineSegLens(seg1Len, seg2Len, seg3Len) + segLen := segs.LengthOfCurrSeg(currHfIdx) + prevSegLen := segs.LengthOfPrevSeg(currHfIdx) + numINF := segs.NumInfoFields() + offset := scion.HopFieldOffset(numINF, prevSegLen, headerOffsetWithMetaLen) + + scion.WidenCurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen, 0, length) + scion.WidenLeftSeg(raw, currInfIdx + 1, segs, headerOffsetWithMetaLen, 0, length) + scion.WidenMidSeg(raw, currInfIdx + 2, segs, headerOffsetWithMetaLen, 0, length) + scion.WidenRightSeg(raw, currInfIdx - 1, segs, headerOffsetWithMetaLen, 0, length) + + assert reveal absPkt(raw) == reveal absPkt(raw[:length]) +} diff --git a/tools/braccept/cases/scmp_unknown_hop.go b/tools/braccept/cases/scmp_unknown_hop.go index aeb114c5e..d101bdc69 100644 --- a/tools/braccept/cases/scmp_unknown_hop.go +++ b/tools/braccept/cases/scmp_unknown_hop.go @@ -340,3 +340,155 @@ func SCMPUnknownHopEgress(artifactsDir string, mac hash.Hash) runner.Case { StoreDir: filepath.Join(artifactsDir, "SCMPUnknownHopEgress"), } } + +// SCMPUnknownHopWrongRouter tests a packet from an AS local host sent to the wrong egress +// router. This packet must not be forwarded by the router. +func SCMPUnknownHopWrongRouter(artifactsDir string, mac hash.Hash) runner.Case { + options := gopacket.SerializeOptions{ + FixLengths: true, + ComputeChecksums: true, + } + + ethernet := &layers.Ethernet{ + SrcMAC: net.HardwareAddr{0xf0, 0x0d, 0xca, 0xfe, 0xbe, 0xef}, + DstMAC: net.HardwareAddr{0xf0, 0x0d, 0xca, 0xfe, 0x00, 0x1}, + EthernetType: layers.EthernetTypeIPv4, + } + ip := &layers.IPv4{ + Version: 4, + IHL: 5, + TTL: 64, + SrcIP: net.IP{192, 168, 0, 51}, + DstIP: net.IP{192, 168, 0, 11}, + Protocol: layers.IPProtocolUDP, + Flags: layers.IPv4DontFragment, + } + udp := &layers.UDP{ + SrcPort: layers.UDPPort(30041), + DstPort: layers.UDPPort(30001), + } + _ = udp.SetNetworkLayerForChecksum(ip) + + // (valid) path to ff00:0:8 via interface 181; this interface is configured on brC + // but we're sending it to brA. + sp := &scion.Decoded{ + Base: scion.Base{ + PathMeta: scion.MetaHdr{ + CurrHF: 0, + SegLen: [3]uint8{2, 0, 0}, + }, + NumINF: 1, + NumHops: 2, + }, + InfoFields: []path.InfoField{ + { + SegID: 0x111, + ConsDir: true, + Timestamp: util.TimeToSecs(time.Now()), + }, + }, + HopFields: []path.HopField{ + {ConsIngress: 0, ConsEgress: 181}, + {ConsIngress: 811, ConsEgress: 0}, + }, + } + sp.HopFields[0].Mac = path.MAC(mac, sp.InfoFields[0], sp.HopFields[0], nil) + + scionL := &slayers.SCION{ + Version: 0, + TrafficClass: 0xb8, + FlowID: 0xdead, + NextHdr: slayers.L4UDP, + PathType: scion.PathType, + SrcIA: xtest.MustParseIA("1-ff00:0:1"), + DstIA: xtest.MustParseIA("1-ff00:0:8"), + Path: sp, + } + srcA := addr.MustParseHost("192.168.0.51") + if err := scionL.SetSrcAddr(srcA); err != nil { + panic(err) + } + if err := scionL.SetDstAddr(addr.MustParseHost("174.16.8.1")); err != nil { + panic(err) + } + + scionudp := &slayers.UDP{} + scionudp.SrcPort = 40111 + scionudp.DstPort = 40222 + scionudp.SetNetworkLayerForChecksum(scionL) + + payload := []byte("actualpayloadbytes") + + // Prepare input packet + input := gopacket.NewSerializeBuffer() + if err := gopacket.SerializeLayers(input, options, + ethernet, ip, udp, scionL, scionudp, gopacket.Payload(payload), + ); err != nil { + panic(err) + } + + // Pointer to current hop field + pointer := slayers.CmnHdrLen + scionL.AddrHdrLen() + + scion.MetaLen + path.InfoLen*sp.NumINF + path.HopLen*int(sp.PathMeta.CurrHF) + + // Prepare quoted packet that is part of the SCMP error message. + quoted := gopacket.NewSerializeBuffer() + if err := gopacket.SerializeLayers(quoted, options, + scionL, scionudp, gopacket.Payload(payload), + ); err != nil { + panic(err) + } + quote := quoted.Bytes() + + // Prepare want packet + want := gopacket.NewSerializeBuffer() + ethernet.SrcMAC, ethernet.DstMAC = ethernet.DstMAC, ethernet.SrcMAC + ip.SrcIP, ip.DstIP = ip.DstIP, ip.SrcIP + udp.SrcPort, udp.DstPort = udp.DstPort, udp.SrcPort + + scionL.DstIA = scionL.SrcIA + scionL.SrcIA = xtest.MustParseIA("1-ff00:0:1") + if err := scionL.SetDstAddr(srcA); err != nil { + panic(err) + } + intlA := addr.MustParseHost("192.168.0.11") + if err := scionL.SetSrcAddr(intlA); err != nil { + panic(err) + } + + _, err := sp.Reverse() + if err != nil { + panic(err) + } + + scionL.NextHdr = slayers.End2EndClass + e2e := normalizedSCMPPacketAuthEndToEndExtn() + e2e.NextHdr = slayers.L4SCMP + scmpH := &slayers.SCMP{ + TypeCode: slayers.CreateSCMPTypeCode( + slayers.SCMPTypeParameterProblem, + slayers.SCMPCodeUnknownHopFieldEgress, + ), + } + + scmpH.SetNetworkLayerForChecksum(scionL) + scmpP := &slayers.SCMPParameterProblem{ + Pointer: uint16(pointer), + } + + if err := gopacket.SerializeLayers(want, options, + ethernet, ip, udp, scionL, e2e, scmpH, scmpP, gopacket.Payload(quote), + ); err != nil { + panic(err) + } + + return runner.Case{ + Name: "SCMPUnknownHopWrongRouter", + WriteTo: "veth_int_host", + ReadFrom: "veth_int_host", + Input: input.Bytes(), + Want: want.Bytes(), + StoreDir: filepath.Join(artifactsDir, "SCMPUnknownHopWrongRouter"), + NormalizePacket: scmpNormalizePacket, + } +} diff --git a/tools/braccept/main.go b/tools/braccept/main.go index 6954ea474..4b1891207 100644 --- a/tools/braccept/main.go +++ b/tools/braccept/main.go @@ -107,6 +107,7 @@ func realMain() int { cases.SCMPInternalXover(artifactsDir, hfMAC), cases.SCMPUnknownHop(artifactsDir, hfMAC), cases.SCMPUnknownHopEgress(artifactsDir, hfMAC), + cases.SCMPUnknownHopWrongRouter(artifactsDir, hfMAC), cases.SCMPInvalidHopParentToParent(artifactsDir, hfMAC), cases.SCMPInvalidHopChildToChild(artifactsDir, hfMAC), cases.SCMPTracerouteIngress(artifactsDir, hfMAC), diff --git a/verification/dependencies/bytes/bytes.gobra b/verification/dependencies/bytes/bytes.gobra index 1ef8ae9ef..dbfcbc260 100644 --- a/verification/dependencies/bytes/bytes.gobra +++ b/verification/dependencies/bytes/bytes.gobra @@ -18,8 +18,8 @@ import sl "github.com/scionproto/scion/verification/utils/slices" // are the same length and contain the same bytes. // A nil argument is equivalent to an empty slice. trusted -requires acc(sl.AbsSlice_Bytes(a, 0, len(a)), _) -requires acc(sl.AbsSlice_Bytes(b, 0, len(b)), _) +requires acc(sl.Bytes(a, 0, len(a)), _) +requires acc(sl.Bytes(b, 0, len(b)), _) decreases pure func Equal(a, b []byte) bool { return string(a) == string(b) diff --git a/verification/dependencies/context/context.gobra b/verification/dependencies/context/context.gobra index 654f9d187..ab6060516 100644 --- a/verification/dependencies/context/context.gobra +++ b/verification/dependencies/context/context.gobra @@ -59,27 +59,3 @@ ensures child.Mem() ensures child.Mem() --* parent.Mem() decreases _ func WithValue(parent Context, key, val interface{ pred Mem() }) (child Context) - -/* Below functions are closure-dependent and currently unsupported. - -type CancelFunc func() - -requires parent.Mem() -ensures child.Mem() -ensures child.Mem() --* parent.Mem() -decreases _ -func WithCancel(parent Context) (child Context, cancel CancelFunc) - -requires parent.Mem() -ensures child.Mem() -ensures child.Mem() --* parent.Mem() -decreases _ -func WithDeadline(parent Context, d time.Time) (child Context, cancel CancelFunc) - -requires parent.Mem() -ensures child.Mem() -ensures child.Mem() --* parent.Mem() -decreases _ -func WithTimeout(parent Context, timeout time.Duration) (child Context, cancel CancelFunc) - -*/ diff --git a/verification/dependencies/crypto/aes/cipher.gobra b/verification/dependencies/crypto/aes/cipher.gobra index 3dd4949b5..e9bd84255 100644 --- a/verification/dependencies/crypto/aes/cipher.gobra +++ b/verification/dependencies/crypto/aes/cipher.gobra @@ -19,8 +19,7 @@ const BlockSize = 16 // The key argument should be the AES key, // either 16, 24, or 32 bytes to select // AES-128, AES-192, or AES-256. -trusted -preserves acc(slices.AbsSlice_Bytes(key, 0, len(key)), R50) +preserves acc(slices.Bytes(key, 0, len(key)), R50) ensures err == nil ==> len(key) == 16 || len(key) == 24 || len(key) == 32 ensures err == nil ==> @@ -29,16 +28,4 @@ ensures err == nil ==> result.BlockSize() == len(key)) ensures err != nil ==> err.ErrorMem() decreases -func NewCipher(key []byte) (result cipher.Block, err error) { - k := len(key) - switch k { - default: - return nil, KeySizeError(k) - case 16, 24, 32: - break - } - if boring.Enabled { - return boring.NewAESCipher(key) - } - return newCipher(key) -} +func NewCipher(key []byte) (result cipher.Block, err error) \ No newline at end of file diff --git a/verification/dependencies/crypto/cipher/cbc.gobra b/verification/dependencies/crypto/cipher/cbc.gobra index a195b0260..81eaae634 100644 --- a/verification/dependencies/crypto/cipher/cbc.gobra +++ b/verification/dependencies/crypto/cipher/cbc.gobra @@ -15,24 +15,15 @@ package cipher -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" // NewCBCEncrypter returns a BlockMode which encrypts in cipher block chaining // mode, using the given Block. The length of iv must be the same as the // Block's block size. -trusted requires b != nil && b.Mem() requires len(iv) == b.BlockSize() -preserves acc(slices.AbsSlice_Bytes(iv, 0, len(iv)), _) +preserves acc(sl.Bytes(iv, 0, len(iv)), _) ensures result != nil && result.Mem() ensures result.BlockSize() == old(b.BlockSize()) decreases _ -func NewCBCEncrypter(b Block, iv []byte) (result BlockMode) { - if len(iv) != b.BlockSize() { - panic("cipher.NewCBCEncrypter: IV length must equal block size") - } - if cbc, ok := b.(cbcEncAble); ok { - return cbc.NewCBCEncrypter(iv) - } - return (*cbcEncrypter)(newCBC(b, iv)) -} \ No newline at end of file +func NewCBCEncrypter(b Block, iv []byte) (result BlockMode) \ No newline at end of file diff --git a/verification/dependencies/crypto/cipher/cipher.gobra b/verification/dependencies/crypto/cipher/cipher.gobra index 76c9d6364..1fa8e5ab4 100644 --- a/verification/dependencies/crypto/cipher/cipher.gobra +++ b/verification/dependencies/crypto/cipher/cipher.gobra @@ -14,7 +14,7 @@ package cipher import . "github.com/scionproto/scion/verification/utils/definitions" -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" // A Block represents an implementation of block cipher // using a given key. It provides the capability to encrypt @@ -33,16 +33,16 @@ type Block interface { // Encrypt encrypts the first block in src into dst. // Dst and src must overlap entirely or not at all. preserves Mem() - preserves slices.AbsSlice_Bytes(dst, 0, len(dst)) - preserves acc(slices.AbsSlice_Bytes(src, 0, len(src)), R10) + preserves sl.Bytes(dst, 0, len(dst)) + preserves acc(sl.Bytes(src, 0, len(src)), R10) decreases Encrypt(dst, src []byte) // Decrypt decrypts the first block in src into dst. // Dst and src must overlap entirely or not at all. preserves Mem() - preserves slices.AbsSlice_Bytes(dst, 0, len(dst)) - preserves acc(slices.AbsSlice_Bytes(src, 0, len(src)), R10) + preserves sl.Bytes(dst, 0, len(dst)) + preserves acc(sl.Bytes(src, 0, len(src)), R10) decreases Decrypt(dst, src []byte) } @@ -63,8 +63,8 @@ type Stream interface { // maintains state and does not reset at each XORKeyStream call. requires len(src) <= len(dst) preserves Mem() - preserves slices.AbsSlice_Bytes(dst, 0, len(dst)) - preserves acc(slices.AbsSlice_Bytes(src, 0, len(src)), R10) + preserves sl.Bytes(dst, 0, len(dst)) + preserves acc(sl.Bytes(src, 0, len(src)), R10) decreases XORKeyStream(dst, src []byte) } @@ -94,9 +94,9 @@ type BlockMode interface { // maintains state and does not reset at each CryptBlocks call. requires len(src) <= len(dst) preserves Mem() - preserves acc(slices.AbsSlice_Bytes(dst, 0, len(dst)), 1 - R10) - preserves dst !== src ==> acc(slices.AbsSlice_Bytes(dst, 0, len(dst)), R10) - preserves acc(slices.AbsSlice_Bytes(src, 0, len(src)), R10) + preserves acc(sl.Bytes(dst, 0, len(dst)), 1 - R10) + preserves dst !== src ==> acc(sl.Bytes(dst, 0, len(dst)), R10) + preserves acc(sl.Bytes(src, 0, len(src)), R10) ensures BlockSize() == old(BlockSize()) decreases CryptBlocks(dst, src []byte) diff --git a/verification/dependencies/crypto/subtle/constant_time.gobra b/verification/dependencies/crypto/subtle/constant_time.gobra index acdcf1c3f..8ecebd1c8 100644 --- a/verification/dependencies/crypto/subtle/constant_time.gobra +++ b/verification/dependencies/crypto/subtle/constant_time.gobra @@ -9,16 +9,16 @@ package subtle -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" // ConstantTimeCompare returns 1 if the two slices, x and y, have equal contents // and 0 otherwise. The time taken is a function of the length of the slices and // is independent of the contents. -requires acc(slices.AbsSlice_Bytes(x, 0, len(x)), _) -requires acc(slices.AbsSlice_Bytes(y, 0, len(y)), _) +requires acc(sl.Bytes(x, 0, len(x)), _) +requires acc(sl.Bytes(y, 0, len(y)), _) // postconditions hidden for now: -// ensures unfolding slices.AbsSlice_Bytes(x, 0, len(x)) in (unfolding slices.AbsSlice_Bytes(y, 0, len(y)) in len(x) == len(y) ==> (forall i int :: 0 <= i && i < len(x) ==> x[i] == y[i]) ==> res == 1) -// ensures unfolding slices.AbsSlice_Bytes(x, 0, len(x)) in (unfolding slices.AbsSlice_Bytes(y, 0, len(y)) in len(x) == len(y) ==> !(forall i int :: 0 <= i && i < len(x) ==> x[i] == y[i]) ==> res == 0) +// ensures unfolding sl.Bytes(x, 0, len(x)) in (unfolding sl.Bytes(y, 0, len(y)) in len(x) == len(y) ==> (forall i int :: 0 <= i && i < len(x) ==> x[i] == y[i]) ==> res == 1) +// ensures unfolding sl.Bytes(x, 0, len(x)) in (unfolding sl.Bytes(y, 0, len(y)) in len(x) == len(y) ==> !(forall i int :: 0 <= i && i < len(x) ==> x[i] == y[i]) ==> res == 0) ensures len(x) != len(y) ==> res == 0 decreases _ pure func ConstantTimeCompare(x, y []byte) (res int) diff --git a/verification/dependencies/encoding/binary/binary.gobra b/verification/dependencies/encoding/binary/binary.gobra index 8861e78b8..af43eb984 100644 --- a/verification/dependencies/encoding/binary/binary.gobra +++ b/verification/dependencies/encoding/binary/binary.gobra @@ -126,21 +126,41 @@ ensures res == "binary.LittleEndian" decreases pure func (l littleEndian) GoString() (res string) { return "binary.LittleEndian" } +// The specs here could be simpler now that we have FUint16Spec and FPutUint16Spec. + +decreases +pure func (e bigEndian) Uint16Spec(b0, b1 byte) (res uint16) { + return uint16(b1) | uint16(b0)<<8 +} + trusted // related to https://github.com/viperproject/gobra/issues/192 requires acc(&b[0], _) && acc(&b[1], _) ensures res >= 0 +ensures res == BigEndian.Uint16Spec(b[0], b[1]) decreases pure func (e bigEndian) Uint16(b []byte) (res uint16) { return uint16(b[1]) | uint16(b[0])<<8 } +decreases +pure func (e bigEndian) PutUint16Spec(b0, b1 byte, v uint16) bool { + return b0 == byte(v >> 8) && + b1 == byte(v) +} + +// Proven in verification/utils/bitwise/proofs.dfy +trusted preserves acc(&b[0]) && acc(&b[1]) +ensures BigEndian.PutUint16Spec(b[0], b[1], v) +ensures BigEndian.Uint16Spec(b[0], b[1]) == v decreases func (e bigEndian) PutUint16(b []byte, v uint16) { b[0] = byte(v >> 8) b[1] = byte(v) } +// The specs here could be simpler now that we have FUint32Spec and FPutUint32Spec. + decreases pure func (e bigEndian) Uint32Spec(b0, b1, b2, b3 byte) (res uint32) { return uint32(b3) | uint32(b2)<<8 | uint32(b1)<<16 | uint32(b0)<<24 @@ -163,8 +183,11 @@ pure func (e bigEndian) PutUint32Spec(b0, b1, b2, b3 byte, v uint32) bool { b3 == byte(v) } +// Proven in verification/utils/bitwise/proofs.dfy +trusted preserves acc(&b[0]) && acc(&b[1]) && acc(&b[2]) && acc(&b[3]) ensures BigEndian.PutUint32Spec(b[0], b[1], b[2], b[3], v) +ensures BigEndian.Uint32Spec(b[0], b[1], b[2], b[3]) == v decreases func (e bigEndian) PutUint32(b []byte, v uint32) { b[0] = byte(v >> 24) diff --git a/verification/dependencies/errors/errors_spec.gobra b/verification/dependencies/errors/errors_spec.gobra index 74e5ef38d..715c963be 100644 --- a/verification/dependencies/errors/errors_spec.gobra +++ b/verification/dependencies/errors/errors_spec.gobra @@ -68,7 +68,6 @@ type ErrorCell interface { // This is not meant to be called. Instead, // it shows that Mem() is strong enough to // allow for the assignment of an error. - ghost requires Mem() && CanSet(e) ensures Mem() && Get() === e decreases diff --git a/verification/dependencies/github.com/google/gopacket/base.gobra b/verification/dependencies/github.com/google/gopacket/base.gobra index 6a55f340e..9c0ae5c81 100644 --- a/verification/dependencies/github.com/google/gopacket/base.gobra +++ b/verification/dependencies/github.com/google/gopacket/base.gobra @@ -60,8 +60,8 @@ ensures len(res) != 0 ==> res === ub[start:end] decreases func (p Payload) Payload(ghost ub []byte) (res []byte, ghost start int, ghost end int) { res = []byte(p) - assert unfolding acc(p.Mem(ub), R20) in true - return res, 0, len(p) + assert unfolding acc(p.Mem(ub), R20) in true + return res, 0, len(p) } requires b != nil && b.Mem() diff --git a/verification/dependencies/github.com/google/gopacket/decode.gobra b/verification/dependencies/github.com/google/gopacket/decode.gobra index 0d11ef310..37d5e0469 100644 --- a/verification/dependencies/github.com/google/gopacket/decode.gobra +++ b/verification/dependencies/github.com/google/gopacket/decode.gobra @@ -8,7 +8,7 @@ package gopacket -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" type DecodeFeedback interface { pred Mem() @@ -90,7 +90,7 @@ type Decoder interface { pred Mem() requires acc(LayerTypesMem(), _) - requires slices.AbsSlice_Bytes(b, 0, len(b)) + requires sl.Bytes(b, 0, len(b)) preserves Mem() preserves p.Mem() decreases diff --git a/verification/dependencies/github.com/google/gopacket/flows.gobra b/verification/dependencies/github.com/google/gopacket/flows.gobra index 0aeac2e4b..585b46cdb 100644 --- a/verification/dependencies/github.com/google/gopacket/flows.gobra +++ b/verification/dependencies/github.com/google/gopacket/flows.gobra @@ -8,7 +8,7 @@ package gopacket -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" const MaxEndpointSize = 16 @@ -29,13 +29,13 @@ type Flow struct { src, dst [MaxEndpointSize]byte } -preserves acc(slices.AbsSlice_Bytes(src, 0, len(src)), 1/10000) && acc(slices.AbsSlice_Bytes(dst, 0, len(dst)), 1/10000) +preserves acc(sl.Bytes(src, 0, len(src)), 1/10000) && acc(sl.Bytes(dst, 0, len(dst)), 1/10000) requires len(src) <= MaxEndpointSize && len(dst) <= MaxEndpointSize ensures f.slen == len(src) ensures f.dlen == len(dst) -ensures unfolding acc(slices.AbsSlice_Bytes(src, 0, len(src)), 1/10000) in +ensures unfolding acc(sl.Bytes(src, 0, len(src)), 1/10000) in forall i int :: { &src[i] } 0 <= i && i < len(src) ==> f.src[i] == src[i] -ensures unfolding acc(slices.AbsSlice_Bytes(dst, 0, len(dst)), 1/10000) in +ensures unfolding acc(sl.Bytes(dst, 0, len(dst)), 1/10000) in forall i int :: { &dst[i] } 0 <= i && i < len(dst) ==> f.dst[i] == dst[i] ensures f.typ == t decreases diff --git a/verification/dependencies/github.com/google/gopacket/layerclass.gobra b/verification/dependencies/github.com/google/gopacket/layerclass.gobra index 23b02797d..9d0b48adf 100644 --- a/verification/dependencies/github.com/google/gopacket/layerclass.gobra +++ b/verification/dependencies/github.com/google/gopacket/layerclass.gobra @@ -45,80 +45,6 @@ func (l LayerType) LayerTypes() (res []LayerType) { return []LayerType{l} } -/* -// (verifiedSCION) The following commented methods could be easily verified -// after we introduce support for range: - -// LayerClassSlice implements a LayerClass with a slice. -type LayerClassSlice []bool - -// Contains returns true if the given layer type should be considered part -// of this layer class. -func (s LayerClassSlice) Contains(t LayerType) bool { - return int(t) < len(s) && s[t] -} - -// LayerTypes returns all layer types in this LayerClassSlice. -// Because of LayerClassSlice's implementation, this could be quite slow. -func (s LayerClassSlice) LayerTypes() (all []LayerType) { - for i := 0; i < len(s); i++ { - if s[i] { - all = append(all, LayerType(i)) - } - } - return -} - -// NewLayerClassSlice creates a new LayerClassSlice by creating a slice of -// size max(types) and setting slice[t] to true for each type t. Note, if -// you implement your own LayerType and give it a high value, this WILL create -// a very large slice. -func NewLayerClassSlice(types []LayerType) LayerClassSlice { - var max LayerType - for _, typ := range types { - if typ > max { - max = typ - } - } - t := make([]bool, int(max+1)) - for _, typ := range types { - t[typ] = true - } - return t -} - -// LayerClassMap implements a LayerClass with a map. -type LayerClassMap map[LayerType]bool - -// Contains returns true if the given layer type should be considered part -// of this layer class. -func (m LayerClassMap) Contains(t LayerType) bool { - return m[t] -} - -// LayerTypes returns all layer types in this LayerClassMap. -func (m LayerClassMap) LayerTypes() (all []LayerType) { - for t := range m { - all = append(all, t) - } - return -} - -// NewLayerClassMap creates a LayerClassMap and sets map[t] to true for each -// type in types. -func NewLayerClassMap(types []LayerType) LayerClassMap { - m := LayerClassMap{} - for _, typ := range types { - m[typ] = true - } - return m -} -*/ - -// TODO: add explicit implementation proofs - -// TODO: verify after verifying the remainder of the file -trusted preserves acc(types, R20) // NewLayerClass creates a LayerClass, attempting to be smart about which type // it creates based on which types are passed in. @@ -127,13 +53,4 @@ ensures res.Mem() // ensures forall i LayerType :: i in types ==> res.Contains(i) // ensures forall i LayerType :: !i in types ==> !res.Contains(i) decreases -func NewLayerClass(types []LayerType) (res LayerClass) { - for _, typ := range types { - if typ > maxLayerType { - // NewLayerClassSlice could create a very large object, so instead create - // a map. - return NewLayerClassMap(types) - } - } - return NewLayerClassSlice(types) -} \ No newline at end of file +func NewLayerClass(types []LayerType) (res LayerClass) \ No newline at end of file diff --git a/verification/dependencies/github.com/google/gopacket/layers/base.gobra b/verification/dependencies/github.com/google/gopacket/layers/base.gobra index 3c9ce26f6..edf930228 100644 --- a/verification/dependencies/github.com/google/gopacket/layers/base.gobra +++ b/verification/dependencies/github.com/google/gopacket/layers/base.gobra @@ -8,7 +8,7 @@ package layers -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" type BaseLayer struct { Contents []byte @@ -28,29 +28,29 @@ pred (b *BaseLayer) PayloadMem() { } requires b.LayerMem() -ensures slices.AbsSlice_Bytes(res, 0, len(res)) -ensures slices.AbsSlice_Bytes(res, 0, len(res)) --* b.LayerMem() +ensures sl.Bytes(res, 0, len(res)) +ensures sl.Bytes(res, 0, len(res)) --* b.LayerMem() decreases func (b *BaseLayer) LayerContents() (res []byte) { unfold b.LayerMem() res = b.Contents - fold slices.AbsSlice_Bytes(res, 0, len(res)) - package slices.AbsSlice_Bytes(res, 0, len(res)) --* b.LayerMem() { - unfold slices.AbsSlice_Bytes(res, 0, len(res)) + fold sl.Bytes(res, 0, len(res)) + package sl.Bytes(res, 0, len(res)) --* b.LayerMem() { + unfold sl.Bytes(res, 0, len(res)) fold b.LayerMem() } } requires b.PayloadMem() -ensures slices.AbsSlice_Bytes(res, 0, len(res)) -ensures slices.AbsSlice_Bytes(res, 0, len(res)) --* b.PayloadMem() +ensures sl.Bytes(res, 0, len(res)) +ensures sl.Bytes(res, 0, len(res)) --* b.PayloadMem() decreases func (b *BaseLayer) LayerPayload() (res []byte) { unfold b.PayloadMem() res = b.Payload - fold slices.AbsSlice_Bytes(res, 0, len(res)) - package slices.AbsSlice_Bytes(res, 0, len(res)) --* b.PayloadMem() { - unfold slices.AbsSlice_Bytes(res, 0, len(res)) + fold sl.Bytes(res, 0, len(res)) + package sl.Bytes(res, 0, len(res)) --* b.PayloadMem() { + unfold sl.Bytes(res, 0, len(res)) fold b.PayloadMem() } } diff --git a/verification/dependencies/github.com/google/gopacket/layers/bfd.gobra b/verification/dependencies/github.com/google/gopacket/layers/bfd.gobra index 605680e3d..743d748e4 100644 --- a/verification/dependencies/github.com/google/gopacket/layers/bfd.gobra +++ b/verification/dependencies/github.com/google/gopacket/layers/bfd.gobra @@ -11,7 +11,7 @@ package layers import ( "github.com/google/gopacket" - "github.com/scionproto/scion/verification/utils/slices" + sl "github.com/scionproto/scion/verification/utils/slices" ) type BFDVersion uint8 @@ -132,7 +132,7 @@ type BFDAuthHeader struct { } pred (b *BFDAuthHeader) Mem() { - acc(b) && slices.AbsSlice_Bytes(b.Data, 0, len(b.Data)) + acc(b) && sl.Bytes(b.Data, 0, len(b.Data)) } preserves acc(h, 1/10000) @@ -185,7 +185,7 @@ pure func (d *BFD) LayerType() gopacket.LayerType requires d.NonInitMem() preserves df.Mem() -preserves slices.AbsSlice_Bytes(data, 0, len(data)) +preserves sl.Bytes(data, 0, len(data)) ensures err == nil ==> d.Mem(data) ensures err != nil ==> err.ErrorMem() && d.NonInitMem() decreases diff --git a/verification/dependencies/github.com/google/gopacket/layers/layertypes.gobra b/verification/dependencies/github.com/google/gopacket/layers/layertypes.gobra index 66a11d919..5ab8c0fb1 100644 --- a/verification/dependencies/github.com/google/gopacket/layers/layertypes.gobra +++ b/verification/dependencies/github.com/google/gopacket/layers/layertypes.gobra @@ -38,215 +38,5 @@ decreases func generateDecoders() (d gopacket.Decoder) var ( -// LayerTypeARP = gopacket.RegisterLayerType(10, gopacket.LayerTypeMetadata{Name: "ARP", Decoder: generateDecoders()}) // gopacket.DecodeFunc(decodeARP) -// LayerTypeCiscoDiscovery = gopacket.RegisterLayerType(11, gopacket.LayerTypeMetadata{Name: "CiscoDiscovery", Decoder: generateDecoders()}) // gopacket.DecodeFunc(decodeCiscoDiscovery) -// LayerTypeEthernetCTP = gopacket.RegisterLayerType(12, gopacket.LayerTypeMetadata{Name: "EthernetCTP", Decoder: /* gopacket.DecodeFunc(decodeEthernetCTP)}) */ generateDecoders()}) -// LayerTypeEthernetCTPForwardData = gopacket.RegisterLayerType(13, gopacket.LayerTypeMetadata{Name: "EthernetCTPForwardData", Decoder: nil}) -// LayerTypeEthernetCTPReply = gopacket.RegisterLayerType(14, gopacket.LayerTypeMetadata{Name: "EthernetCTPReply", Decoder: nil}) -// LayerTypeDot1Q = gopacket.RegisterLayerType(15, gopacket.LayerTypeMetadata{Name: "Dot1Q", Decoder: /* gopacket.DecodeFunc(decodeDot1Q)} */ generateDecoders()}) -// LayerTypeEtherIP = gopacket.RegisterLayerType(16, gopacket.LayerTypeMetadata{Name: "EtherIP", Decoder: /* gopacket.DecodeFunc(decodeEtherIP)} */ generateDecoders()}) -// LayerTypeEthernet = gopacket.RegisterLayerType(17, gopacket.LayerTypeMetadata{Name: "Ethernet", Decoder: /* gopacket.DecodeFunc(decodeEthernet)} */ generateDecoders()}) -// LayerTypeGRE = gopacket.RegisterLayerType(18, gopacket.LayerTypeMetadata{Name: "GRE", Decoder: /* gopacket.DecodeFunc(decodeGRE)} */ generateDecoders()}) -// LayerTypeICMPv4 = gopacket.RegisterLayerType(19, gopacket.LayerTypeMetadata{Name: "ICMPv4", Decoder: /* gopacket.DecodeFunc(decodeICMPv4)} */ generateDecoders()}) -// LayerTypeIPv4 = gopacket.RegisterLayerType(20, gopacket.LayerTypeMetadata{Name: "IPv4", Decoder: /* gopacket.DecodeFunc(decodeIPv4)} */ generateDecoders()}) -// LayerTypeIPv6 = gopacket.RegisterLayerType(21, gopacket.LayerTypeMetadata{Name: "IPv6", Decoder: /* gopacket.DecodeFunc(decodeIPv6)} */ generateDecoders()}) -// LayerTypeLLC = gopacket.RegisterLayerType(22, gopacket.LayerTypeMetadata{Name: "LLC", Decoder: /* gopacket.DecodeFunc(decodeLLC)} */ generateDecoders()}) -// LayerTypeSNAP = gopacket.RegisterLayerType(23, gopacket.LayerTypeMetadata{Name: "SNAP", Decoder: /* gopacket.DecodeFunc(decodeSNAP)} */ generateDecoders()}) -// LayerTypeMPLS = gopacket.RegisterLayerType(24, gopacket.LayerTypeMetadata{Name: "MPLS", Decoder: /* gopacket.DecodeFunc(decodeMPLS)} */ generateDecoders()}) -// LayerTypePPP = gopacket.RegisterLayerType(25, gopacket.LayerTypeMetadata{Name: "PPP", Decoder: /* gopacket.DecodeFunc(decodePPP)} */ generateDecoders()}) -// LayerTypePPPoE = gopacket.RegisterLayerType(26, gopacket.LayerTypeMetadata{Name: "PPPoE", Decoder: /* gopacket.DecodeFunc(decodePPPoE)} */ generateDecoders()}) -// LayerTypeRUDP = gopacket.RegisterLayerType(27, gopacket.LayerTypeMetadata{Name: "RUDP", Decoder: /* gopacket.DecodeFunc(decodeRUDP)} */ generateDecoders()}) -// LayerTypeSCTP = gopacket.RegisterLayerType(28, gopacket.LayerTypeMetadata{Name: "SCTP", Decoder: /* gopacket.DecodeFunc(decodeSCTP)} */ generateDecoders()}) -// LayerTypeSCTPUnknownChunkType = gopacket.RegisterLayerType(29, gopacket.LayerTypeMetadata{Name: "SCTPUnknownChunkType", Decoder: nil}) -// LayerTypeSCTPData = gopacket.RegisterLayerType(30, gopacket.LayerTypeMetadata{Name: "SCTPData", Decoder: nil}) -// LayerTypeSCTPInit = gopacket.RegisterLayerType(31, gopacket.LayerTypeMetadata{Name: "SCTPInit", Decoder: nil}) -// LayerTypeSCTPSack = gopacket.RegisterLayerType(32, gopacket.LayerTypeMetadata{Name: "SCTPSack", Decoder: nil}) -// LayerTypeSCTPHeartbeat = gopacket.RegisterLayerType(33, gopacket.LayerTypeMetadata{Name: "SCTPHeartbeat", Decoder: nil}) -// LayerTypeSCTPError = gopacket.RegisterLayerType(34, gopacket.LayerTypeMetadata{Name: "SCTPError", Decoder: nil}) -// LayerTypeSCTPShutdown = gopacket.RegisterLayerType(35, gopacket.LayerTypeMetadata{Name: "SCTPShutdown", Decoder: nil}) -// LayerTypeSCTPShutdownAck = gopacket.RegisterLayerType(36, gopacket.LayerTypeMetadata{Name: "SCTPShutdownAck", Decoder: nil}) -// LayerTypeSCTPCookieEcho = gopacket.RegisterLayerType(37, gopacket.LayerTypeMetadata{Name: "SCTPCookieEcho", Decoder: nil}) -// LayerTypeSCTPEmptyLayer = gopacket.RegisterLayerType(38, gopacket.LayerTypeMetadata{Name: "SCTPEmptyLayer", Decoder: nil}) -// LayerTypeSCTPInitAck = gopacket.RegisterLayerType(39, gopacket.LayerTypeMetadata{Name: "SCTPInitAck", Decoder: nil}) -// LayerTypeSCTPHeartbeatAck = gopacket.RegisterLayerType(40, gopacket.LayerTypeMetadata{Name: "SCTPHeartbeatAck", Decoder: nil}) -// LayerTypeSCTPAbort = gopacket.RegisterLayerType(41, gopacket.LayerTypeMetadata{Name: "SCTPAbort", Decoder: nil}) -// LayerTypeSCTPShutdownComplete = gopacket.RegisterLayerType(42, gopacket.LayerTypeMetadata{Name: "SCTPShutdownComplete", Decoder: nil}) -// LayerTypeSCTPCookieAck = gopacket.RegisterLayerType(43, gopacket.LayerTypeMetadata{Name: "SCTPCookieAck", Decoder: nil}) -// LayerTypeTCP = gopacket.RegisterLayerType(44, gopacket.LayerTypeMetadata{Name: "TCP", Decoder: /* gopacket.DecodeFunc(decodeTCP)} */ generateDecoders()}) -// LayerTypeUDP = gopacket.RegisterLayerType(45, gopacket.LayerTypeMetadata{Name: "UDP", Decoder: /* gopacket.DecodeFunc(decodeUDP)} */ generateDecoders()}) -// LayerTypeIPv6HopByHop = gopacket.RegisterLayerType(46, gopacket.LayerTypeMetadata{Name: "IPv6HopByHop", Decoder: /* gopacket.DecodeFunc(decodeIPv6HopByHop)} */ generateDecoders()}) -// LayerTypeIPv6Routing = gopacket.RegisterLayerType(47, gopacket.LayerTypeMetadata{Name: "IPv6Routing", Decoder: /* gopacket.DecodeFunc(decodeIPv6Routing)} */ generateDecoders()}) -// LayerTypeIPv6Fragment = gopacket.RegisterLayerType(48, gopacket.LayerTypeMetadata{Name: "IPv6Fragment", Decoder: /* gopacket.DecodeFunc(decodeIPv6Fragment)} */ generateDecoders()}) -// LayerTypeIPv6Destination = gopacket.RegisterLayerType(49, gopacket.LayerTypeMetadata{Name: "IPv6Destination", Decoder: /* gopacket.DecodeFunc(decodeIPv6Destination)} */ generateDecoders()}) -// LayerTypeIPSecAH = gopacket.RegisterLayerType(50, gopacket.LayerTypeMetadata{Name: "IPSecAH", Decoder: /* gopacket.DecodeFunc(decodeIPSecAH)} */ generateDecoders()}) -// LayerTypeIPSecESP = gopacket.RegisterLayerType(51, gopacket.LayerTypeMetadata{Name: "IPSecESP", Decoder: /* gopacket.DecodeFunc(decodeIPSecESP)} */ generateDecoders()}) -// LayerTypeUDPLite = gopacket.RegisterLayerType(52, gopacket.LayerTypeMetadata{Name: "UDPLite", Decoder: /* gopacket.DecodeFunc(decodeUDPLite)} */ generateDecoders()}) -// LayerTypeFDDI = gopacket.RegisterLayerType(53, gopacket.LayerTypeMetadata{Name: "FDDI", Decoder: /* gopacket.DecodeFunc(decodeFDDI)} */ generateDecoders()}) -// LayerTypeLoopback = gopacket.RegisterLayerType(54, gopacket.LayerTypeMetadata{Name: "Loopback", Decoder: /* gopacket.DecodeFunc(decodeLoopback)} */ generateDecoders()}) -// LayerTypeEAP = gopacket.RegisterLayerType(55, gopacket.LayerTypeMetadata{Name: "EAP", Decoder: /* gopacket.DecodeFunc(decodeEAP)} */ generateDecoders()}) -// LayerTypeEAPOL = gopacket.RegisterLayerType(56, gopacket.LayerTypeMetadata{Name: "EAPOL", Decoder: /* gopacket.DecodeFunc(decodeEAPOL)} */ generateDecoders()}) -// LayerTypeICMPv6 = gopacket.RegisterLayerType(57, gopacket.LayerTypeMetadata{Name: "ICMPv6", Decoder: /* gopacket.DecodeFunc(decodeICMPv6)} */ generateDecoders()}) -// LayerTypeLinkLayerDiscovery = gopacket.RegisterLayerType(58, gopacket.LayerTypeMetadata{Name: "LinkLayerDiscovery", Decoder: /* gopacket.DecodeFunc(decodeLinkLayerDiscovery)} */ generateDecoders()}) -// LayerTypeCiscoDiscoveryInfo = gopacket.RegisterLayerType(59, gopacket.LayerTypeMetadata{Name: "CiscoDiscoveryInfo", Decoder: /* gopacket.DecodeFunc(decodeCiscoDiscoveryInfo)} */ generateDecoders()}) -// LayerTypeLinkLayerDiscoveryInfo = gopacket.RegisterLayerType(60, gopacket.LayerTypeMetadata{Name: "LinkLayerDiscoveryInfo", Decoder: nil}) -// LayerTypeNortelDiscovery = gopacket.RegisterLayerType(61, gopacket.LayerTypeMetadata{Name: "NortelDiscovery", Decoder: /* gopacket.DecodeFunc(decodeNortelDiscovery)} */ generateDecoders()}) -// LayerTypeIGMP = gopacket.RegisterLayerType(62, gopacket.LayerTypeMetadata{Name: "IGMP", Decoder: /* gopacket.DecodeFunc(decodeIGMP)} */ generateDecoders()}) -// LayerTypePFLog = gopacket.RegisterLayerType(63, gopacket.LayerTypeMetadata{Name: "PFLog", Decoder: /* gopacket.DecodeFunc(decodePFLog)} */ generateDecoders()}) -// LayerTypeRadioTap = gopacket.RegisterLayerType(64, gopacket.LayerTypeMetadata{Name: "RadioTap", Decoder: /* gopacket.DecodeFunc(decodeRadioTap)} */ generateDecoders()}) -// LayerTypeDot11 = gopacket.RegisterLayerType(65, gopacket.LayerTypeMetadata{Name: "Dot11", Decoder: /* gopacket.DecodeFunc(decodeDot11)} */ generateDecoders()}) -// LayerTypeDot11Ctrl = gopacket.RegisterLayerType(66, gopacket.LayerTypeMetadata{Name: "Dot11Ctrl", Decoder: /* gopacket.DecodeFunc(decodeDot11Ctrl)} */ generateDecoders()}) -// LayerTypeDot11Data = gopacket.RegisterLayerType(67, gopacket.LayerTypeMetadata{Name: "Dot11Data", Decoder: /* gopacket.DecodeFunc(decodeDot11Data)} */ generateDecoders()}) -// LayerTypeDot11DataCFAck = gopacket.RegisterLayerType(68, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAck", Decoder: /* gopacket.DecodeFunc(decodeDot11DataCFAck)} */ generateDecoders()}) -// LayerTypeDot11DataCFPoll = gopacket.RegisterLayerType(69, gopacket.LayerTypeMetadata{Name: "Dot11DataCFPoll", Decoder: /* gopacket.DecodeFunc(decodeDot11DataCFPoll)} */ generateDecoders()}) -// LayerTypeDot11DataCFAckPoll = gopacket.RegisterLayerType(70, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAckPoll", Decoder: /* gopacket.DecodeFunc(decodeDot11DataCFAckPoll)} */ generateDecoders()}) -// LayerTypeDot11DataNull = gopacket.RegisterLayerType(71, gopacket.LayerTypeMetadata{Name: "Dot11DataNull", Decoder: /* gopacket.DecodeFunc(decodeDot11DataNull)} */ generateDecoders()}) -// LayerTypeDot11DataCFAckNoData = gopacket.RegisterLayerType(72, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAck", Decoder: /* gopacket.DecodeFunc(decodeDot11DataCFAck)} */ generateDecoders()}) -// LayerTypeDot11DataCFPollNoData = gopacket.RegisterLayerType(73, gopacket.LayerTypeMetadata{Name: "Dot11DataCFPoll", Decoder: /* gopacket.DecodeFunc(decodeDot11DataCFPoll)} */ generateDecoders()}) -// LayerTypeDot11DataCFAckPollNoData = gopacket.RegisterLayerType(74, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAckPoll", Decoder: /* gopacket.DecodeFunc(decodeDot11DataCFAckPoll)} */ generateDecoders()}) -// LayerTypeDot11DataQOSData = gopacket.RegisterLayerType(75, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSData", Decoder: /* gopacket.DecodeFunc(decodeDot11DataQOSData)} */ generateDecoders()}) -// LayerTypeDot11DataQOSDataCFAck = gopacket.RegisterLayerType(76, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFAck", Decoder: /* gopacket.DecodeFunc(decodeDot11DataQOSDataCFAck)} */ generateDecoders()}) -// LayerTypeDot11DataQOSDataCFPoll = gopacket.RegisterLayerType(77, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFPoll", Decoder: /* gopacket.DecodeFunc(decodeDot11DataQOSDataCFPoll)} */ generateDecoders()}) -// LayerTypeDot11DataQOSDataCFAckPoll = gopacket.RegisterLayerType(78, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFAckPoll", Decoder: /* gopacket.DecodeFunc(decodeDot11DataQOSDataCFAckPoll)} */ generateDecoders()}) -// LayerTypeDot11DataQOSNull = gopacket.RegisterLayerType(79, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSNull", Decoder: /* gopacket.DecodeFunc(decodeDot11DataQOSNull)} */ generateDecoders()}) -// LayerTypeDot11DataQOSCFPollNoData = gopacket.RegisterLayerType(80, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSCFPoll", Decoder: /* gopacket.DecodeFunc(decodeDot11DataQOSCFPollNoData)} */ generateDecoders()}) -// LayerTypeDot11DataQOSCFAckPollNoData = gopacket.RegisterLayerType(81, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSCFAckPoll", Decoder: /* gopacket.DecodeFunc(decodeDot11DataQOSCFAckPollNoData)} */ generateDecoders()}) -// LayerTypeDot11InformationElement = gopacket.RegisterLayerType(82, gopacket.LayerTypeMetadata{Name: "Dot11InformationElement", Decoder: /* gopacket.DecodeFunc(decodeDot11InformationElement)} */ generateDecoders()}) -// LayerTypeDot11CtrlCTS = gopacket.RegisterLayerType(83, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCTS", Decoder: /* gopacket.DecodeFunc(decodeDot11CtrlCTS)} */ generateDecoders()}) -// LayerTypeDot11CtrlRTS = gopacket.RegisterLayerType(84, gopacket.LayerTypeMetadata{Name: "Dot11CtrlRTS", Decoder: /* gopacket.DecodeFunc(decodeDot11CtrlRTS)} */ generateDecoders()}) -// LayerTypeDot11CtrlBlockAckReq = gopacket.RegisterLayerType(85, gopacket.LayerTypeMetadata{Name: "Dot11CtrlBlockAckReq", Decoder: /* gopacket.DecodeFunc(decodeDot11CtrlBlockAckReq)} */ generateDecoders()}) -// LayerTypeDot11CtrlBlockAck = gopacket.RegisterLayerType(86, gopacket.LayerTypeMetadata{Name: "Dot11CtrlBlockAck", Decoder: /* gopacket.DecodeFunc(decodeDot11CtrlBlockAck)} */ generateDecoders()}) -// LayerTypeDot11CtrlPowersavePoll = gopacket.RegisterLayerType(87, gopacket.LayerTypeMetadata{Name: "Dot11CtrlPowersavePoll", Decoder: /* gopacket.DecodeFunc(decodeDot11CtrlPowersavePoll)} */ generateDecoders()}) -// LayerTypeDot11CtrlAck = gopacket.RegisterLayerType(88, gopacket.LayerTypeMetadata{Name: "Dot11CtrlAck", Decoder: /* gopacket.DecodeFunc(decodeDot11CtrlAck)} */ generateDecoders()}) -// LayerTypeDot11CtrlCFEnd = gopacket.RegisterLayerType(89, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCFEnd", Decoder: /* gopacket.DecodeFunc(decodeDot11CtrlCFEnd)} */ generateDecoders()}) -// LayerTypeDot11CtrlCFEndAck = gopacket.RegisterLayerType(90, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCFEndAck", Decoder: /* gopacket.DecodeFunc(decodeDot11CtrlCFEndAck)} */ generateDecoders()}) -// LayerTypeDot11MgmtAssociationReq = gopacket.RegisterLayerType(91, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAssociationReq", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtAssociationReq)} */ generateDecoders()}) -// LayerTypeDot11MgmtAssociationResp = gopacket.RegisterLayerType(92, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAssociationResp", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtAssociationResp)} */ generateDecoders()}) -// LayerTypeDot11MgmtReassociationReq = gopacket.RegisterLayerType(93, gopacket.LayerTypeMetadata{Name: "Dot11MgmtReassociationReq", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtReassociationReq)} */ generateDecoders()}) -// LayerTypeDot11MgmtReassociationResp = gopacket.RegisterLayerType(94, gopacket.LayerTypeMetadata{Name: "Dot11MgmtReassociationResp", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtReassociationResp)} */ generateDecoders()}) -// LayerTypeDot11MgmtProbeReq = gopacket.RegisterLayerType(95, gopacket.LayerTypeMetadata{Name: "Dot11MgmtProbeReq", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtProbeReq)} */ generateDecoders()}) -// LayerTypeDot11MgmtProbeResp = gopacket.RegisterLayerType(96, gopacket.LayerTypeMetadata{Name: "Dot11MgmtProbeResp", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtProbeResp)} */ generateDecoders()}) -// LayerTypeDot11MgmtMeasurementPilot = gopacket.RegisterLayerType(97, gopacket.LayerTypeMetadata{Name: "Dot11MgmtMeasurementPilot", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtMeasurementPilot)} */ generateDecoders()}) -// LayerTypeDot11MgmtBeacon = gopacket.RegisterLayerType(98, gopacket.LayerTypeMetadata{Name: "Dot11MgmtBeacon", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtBeacon)} */ generateDecoders()}) -// LayerTypeDot11MgmtATIM = gopacket.RegisterLayerType(99, gopacket.LayerTypeMetadata{Name: "Dot11MgmtATIM", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtATIM)} */ generateDecoders()}) -// LayerTypeDot11MgmtDisassociation = gopacket.RegisterLayerType(100, gopacket.LayerTypeMetadata{Name: "Dot11MgmtDisassociation", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtDisassociation)} */ generateDecoders()}) -// LayerTypeDot11MgmtAuthentication = gopacket.RegisterLayerType(101, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAuthentication", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtAuthentication)} */ generateDecoders()}) -// LayerTypeDot11MgmtDeauthentication = gopacket.RegisterLayerType(102, gopacket.LayerTypeMetadata{Name: "Dot11MgmtDeauthentication", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtDeauthentication)} */ generateDecoders()}) -// LayerTypeDot11MgmtAction = gopacket.RegisterLayerType(103, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAction", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtAction)} */ generateDecoders()}) -// LayerTypeDot11MgmtActionNoAck = gopacket.RegisterLayerType(104, gopacket.LayerTypeMetadata{Name: "Dot11MgmtActionNoAck", Decoder: /* opacket.DecodeFunc(decodeDot11MgmtActionNoAck)} */ generateDecoders()}) -// LayerTypeDot11MgmtArubaWLAN = gopacket.RegisterLayerType(105, gopacket.LayerTypeMetadata{Name: "Dot11MgmtArubaWLAN", Decoder: /* gopacket.DecodeFunc(decodeDot11MgmtArubaWLAN)} */ generateDecoders()}) -// LayerTypeDot11WEP = gopacket.RegisterLayerType(106, gopacket.LayerTypeMetadata{Name: "Dot11WEP", Decoder: /* gopacket.DecodeFunc(decodeDot11WEP)} */ generateDecoders()}) -// LayerTypeDNS = gopacket.RegisterLayerType(107, gopacket.LayerTypeMetadata{Name: "DNS", Decoder: /* gopacket.DecodeFunc(decodeDNS)} */ generateDecoders()}) -// LayerTypeUSB = gopacket.RegisterLayerType(108, gopacket.LayerTypeMetadata{Name: "USB", Decoder: /* gopacket.DecodeFunc(decodeUSB)} */ generateDecoders()}) -// LayerTypeUSBRequestBlockSetup = gopacket.RegisterLayerType(109, gopacket.LayerTypeMetadata{Name: "USBRequestBlockSetup", Decoder: /* gopacket.DecodeFunc(decodeUSBRequestBlockSetup)} */ generateDecoders()}) -// LayerTypeUSBControl = gopacket.RegisterLayerType(110, gopacket.LayerTypeMetadata{Name: "USBControl", Decoder: /* gopacket.DecodeFunc(decodeUSBControl)} */ generateDecoders()}) -// LayerTypeUSBInterrupt = gopacket.RegisterLayerType(111, gopacket.LayerTypeMetadata{Name: "USBInterrupt", Decoder: /* gopacket.DecodeFunc(decodeUSBInterrupt)} */ generateDecoders()}) -// LayerTypeUSBBulk = gopacket.RegisterLayerType(112, gopacket.LayerTypeMetadata{Name: "USBBulk", Decoder: /* gopacket.DecodeFunc(decodeUSBBulk)} */ generateDecoders()}) -// LayerTypeLinuxSLL = gopacket.RegisterLayerType(113, gopacket.LayerTypeMetadata{Name: "Linux SLL", Decoder: /* gopacket.DecodeFunc(decodeLinuxSLL)} */ generateDecoders()}) -// LayerTypeSFlow = gopacket.RegisterLayerType(114, gopacket.LayerTypeMetadata{Name: "SFlow", Decoder: /* gopacket.DecodeFunc(decodeSFlow)} */ generateDecoders()}) -// LayerTypePrismHeader = gopacket.RegisterLayerType(115, gopacket.LayerTypeMetadata{Name: "Prism monitor mode header", Decoder: /* gopacket.DecodeFunc(decodePrismHeader)} */ generateDecoders()}) -// LayerTypeVXLAN = gopacket.RegisterLayerType(116, gopacket.LayerTypeMetadata{Name: "VXLAN", Decoder: /* gopacket.DecodeFunc(decodeVXLAN)} */ generateDecoders()}) -// LayerTypeNTP = gopacket.RegisterLayerType(117, gopacket.LayerTypeMetadata{Name: "NTP", Decoder: /* opacket.DecodeFunc(decodeNTP)} */ generateDecoders()}) -// LayerTypeDHCPv4 = gopacket.RegisterLayerType(118, gopacket.LayerTypeMetadata{Name: "DHCPv4", Decoder: /* gopacket.DecodeFunc(decodeDHCPv4)} */ generateDecoders()}) -// LayerTypeVRRP = gopacket.RegisterLayerType(119, gopacket.LayerTypeMetadata{Name: "VRRP", Decoder: /* gopacket.DecodeFunc(decodeVRRP)} */ generateDecoders()}) -// LayerTypeGeneve = gopacket.RegisterLayerType(120, gopacket.LayerTypeMetadata{Name: "Geneve", Decoder: /* gopacket.DecodeFunc(decodeGeneve)} */ generateDecoders()}) -// LayerTypeSTP = gopacket.RegisterLayerType(121, gopacket.LayerTypeMetadata{Name: "STP", Decoder: /* gopacket.DecodeFunc(decodeSTP)} */ generateDecoders()}) - LayerTypeBFD = gopacket.RegisterLayerType(122, gopacket.LayerTypeMetadata{Name: "BFD", Decoder: /* gopacket.DecodeFunc(decodeBFD)} */ generateDecoders()}) -// LayerTypeOSPF = gopacket.RegisterLayerType(123, gopacket.LayerTypeMetadata{Name: "OSPF", Decoder: /* gopacket.DecodeFunc(decodeOSPF)} */ generateDecoders()}) -// LayerTypeICMPv6RouterSolicitation = gopacket.RegisterLayerType(124, gopacket.LayerTypeMetadata{Name: "ICMPv6RouterSolicitation", Decoder: /* gopacket.DecodeFunc(decodeICMPv6RouterSolicitation)} */ generateDecoders()}) -// LayerTypeICMPv6RouterAdvertisement = gopacket.RegisterLayerType(125, gopacket.LayerTypeMetadata{Name: "ICMPv6RouterAdvertisement", Decoder: /* gopacket.DecodeFunc(decodeICMPv6RouterAdvertisement)} */ generateDecoders()}) -// LayerTypeICMPv6NeighborSolicitation = gopacket.RegisterLayerType(126, gopacket.LayerTypeMetadata{Name: "ICMPv6NeighborSolicitation", Decoder: /* gopacket.DecodeFunc(decodeICMPv6NeighborSolicitation)} */ generateDecoders()}) -// LayerTypeICMPv6NeighborAdvertisement = gopacket.RegisterLayerType(127, gopacket.LayerTypeMetadata{Name: "ICMPv6NeighborAdvertisement", Decoder: /* gopacket.DecodeFunc(decodeICMPv6NeighborAdvertisement)} */ generateDecoders()}) -// LayerTypeICMPv6Redirect = gopacket.RegisterLayerType(128, gopacket.LayerTypeMetadata{Name: "ICMPv6Redirect", Decoder: /* gopacket.DecodeFunc(decodeICMPv6Redirect)} */ generateDecoders()}) -// LayerTypeGTPv1U = gopacket.RegisterLayerType(129, gopacket.LayerTypeMetadata{Name: "GTPv1U", Decoder: /* gopacket.DecodeFunc(decodeGTPv1u)} */ generateDecoders()}) -// LayerTypeEAPOLKey = gopacket.RegisterLayerType(130, gopacket.LayerTypeMetadata{Name: "EAPOLKey", Decoder: /* gopacket.DecodeFunc(decodeEAPOLKey)} */ generateDecoders()}) -// LayerTypeLCM = gopacket.RegisterLayerType(131, gopacket.LayerTypeMetadata{Name: "LCM", Decoder: /* gopacket.DecodeFunc(decodeLCM)} */ generateDecoders()}) -// LayerTypeICMPv6Echo = gopacket.RegisterLayerType(132, gopacket.LayerTypeMetadata{Name: "ICMPv6Echo", Decoder: /* gopacket.DecodeFunc(decodeICMPv6Echo)} */ generateDecoders()}) -// LayerTypeSIP = gopacket.RegisterLayerType(133, gopacket.LayerTypeMetadata{Name: "SIP", Decoder: /* gopacket.DecodeFunc(decodeSIP)} */ generateDecoders()}) -// LayerTypeDHCPv6 = gopacket.RegisterLayerType(134, gopacket.LayerTypeMetadata{Name: "DHCPv6", Decoder: /* gopacket.DecodeFunc(decodeDHCPv6)} */ generateDecoders()}) -// LayerTypeMLDv1MulticastListenerReport = gopacket.RegisterLayerType(135, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerReport", Decoder: /* gopacket.DecodeFunc(decodeMLDv1MulticastListenerReport)} */ generateDecoders()}) -// LayerTypeMLDv1MulticastListenerDone = gopacket.RegisterLayerType(136, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerDone", Decoder: /* gopacket.DecodeFunc(decodeMLDv1MulticastListenerDone)} */ generateDecoders()}) -// LayerTypeMLDv1MulticastListenerQuery = gopacket.RegisterLayerType(137, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerQuery", Decoder: /* gopacket.DecodeFunc(decodeMLDv1MulticastListenerQuery)} */ generateDecoders()}) -// LayerTypeMLDv2MulticastListenerReport = gopacket.RegisterLayerType(138, gopacket.LayerTypeMetadata{Name: "MLDv2MulticastListenerReport", Decoder: /* gopacket.DecodeFunc(decodeMLDv2MulticastListenerReport)} */ generateDecoders()}) -// LayerTypeMLDv2MulticastListenerQuery = gopacket.RegisterLayerType(139, gopacket.LayerTypeMetadata{Name: "MLDv2MulticastListenerQuery", Decoder: /* gopacket.DecodeFunc(decodeMLDv2MulticastListenerQuery)} */ generateDecoders()}) -// LayerTypeTLS = gopacket.RegisterLayerType(140, gopacket.LayerTypeMetadata{Name: "TLS", Decoder: /* gopacket.DecodeFunc(decodeTLS)} */ generateDecoders()}) -// LayerTypeModbusTCP = gopacket.RegisterLayerType(141, gopacket.LayerTypeMetadata{Name: "ModbusTCP", Decoder: /* gopacket.DecodeFunc(decodeModbusTCP)} */ generateDecoders()}) -// LayerTypeRMCP = gopacket.RegisterLayerType(142, gopacket.LayerTypeMetadata{Name: "RMCP", Decoder: /* gopacket.DecodeFunc(decodeRMCP)} */ generateDecoders()}) -// LayerTypeASF = gopacket.RegisterLayerType(143, gopacket.LayerTypeMetadata{Name: "ASF", Decoder: /* gopacket.DecodeFunc(decodeASF)} */ generateDecoders()}) -// LayerTypeASFPresencePong = gopacket.RegisterLayerType(144, gopacket.LayerTypeMetadata{Name: "ASFPresencePong", Decoder: /* gopacket.DecodeFunc(decodeASFPresencePong)} */ generateDecoders()}) -// LayerTypeERSPANII = gopacket.RegisterLayerType(145, gopacket.LayerTypeMetadata{Name: "ERSPAN Type II", Decoder: /* gopacket.DecodeFunc(decodeERSPANII)} */ generateDecoders()}) -// LayerTypeRADIUS = gopacket.RegisterLayerType(146, gopacket.LayerTypeMetadata{Name: "RADIUS", Decoder: /* gopacket.DecodeFunc(decodeRADIUS)} */ generateDecoders()}) + LayerTypeBFD = gopacket.RegisterLayerType(122, gopacket.LayerTypeMetadata{Name: "BFD", Decoder: /* gopacket.DecodeFunc(decodeBFD)} */ generateDecoders()}) ) - -/* -var ( - // LayerClassIPNetwork contains TCP/IP network layer types. - LayerClassIPNetwork = gopacket.NewLayerClass([]gopacket.LayerType{ - LayerTypeIPv4, - LayerTypeIPv6, - }) - // LayerClassIPTransport contains TCP/IP transport layer types. - LayerClassIPTransport = gopacket.NewLayerClass([]gopacket.LayerType{ - LayerTypeTCP, - LayerTypeUDP, - LayerTypeSCTP, - }) - // LayerClassIPControl contains TCP/IP control protocols. - LayerClassIPControl = gopacket.NewLayerClass([]gopacket.LayerType{ - LayerTypeICMPv4, - LayerTypeICMPv6, - }) - // LayerClassSCTPChunk contains SCTP chunk types (not the top-level SCTP - // layer). - LayerClassSCTPChunk = gopacket.NewLayerClass([]gopacket.LayerType{ - LayerTypeSCTPUnknownChunkType, - LayerTypeSCTPData, - LayerTypeSCTPInit, - LayerTypeSCTPSack, - LayerTypeSCTPHeartbeat, - LayerTypeSCTPError, - LayerTypeSCTPShutdown, - LayerTypeSCTPShutdownAck, - LayerTypeSCTPCookieEcho, - LayerTypeSCTPEmptyLayer, - LayerTypeSCTPInitAck, - LayerTypeSCTPHeartbeatAck, - LayerTypeSCTPAbort, - LayerTypeSCTPShutdownComplete, - LayerTypeSCTPCookieAck, - }) - // LayerClassIPv6Extension contains IPv6 extension headers. - LayerClassIPv6Extension = gopacket.NewLayerClass([]gopacket.LayerType{ - LayerTypeIPv6HopByHop, - LayerTypeIPv6Routing, - LayerTypeIPv6Fragment, - LayerTypeIPv6Destination, - }) - LayerClassIPSec = gopacket.NewLayerClass([]gopacket.LayerType{ - LayerTypeIPSecAH, - LayerTypeIPSecESP, - }) - // LayerClassICMPv6NDP contains ICMPv6 neighbor discovery protocol - // messages. - LayerClassICMPv6NDP = gopacket.NewLayerClass([]gopacket.LayerType{ - LayerTypeICMPv6RouterSolicitation, - LayerTypeICMPv6RouterAdvertisement, - LayerTypeICMPv6NeighborSolicitation, - LayerTypeICMPv6NeighborAdvertisement, - LayerTypeICMPv6Redirect, - }) - // LayerClassMLDv1 contains multicast listener discovery protocol - LayerClassMLDv1 = gopacket.NewLayerClass([]gopacket.LayerType{ - LayerTypeMLDv1MulticastListenerQuery, - LayerTypeMLDv1MulticastListenerReport, - LayerTypeMLDv1MulticastListenerDone, - }) - // LayerClassMLDv2 contains multicast listener discovery protocol v2 - LayerClassMLDv2 = gopacket.NewLayerClass([]gopacket.LayerType{ - LayerTypeMLDv1MulticastListenerReport, - LayerTypeMLDv1MulticastListenerDone, - LayerTypeMLDv2MulticastListenerReport, - LayerTypeMLDv1MulticastListenerQuery, - LayerTypeMLDv2MulticastListenerQuery, - }) -) -*/ \ No newline at end of file diff --git a/verification/dependencies/github.com/google/gopacket/layertype.gobra b/verification/dependencies/github.com/google/gopacket/layertype.gobra index 11f4b5851..018d721dc 100644 --- a/verification/dependencies/github.com/google/gopacket/layertype.gobra +++ b/verification/dependencies/github.com/google/gopacket/layertype.gobra @@ -19,7 +19,7 @@ initEnsures forall t LayerType :: { Registered(t) } t < 0 ==> !Registered(t) initEnsures forall t LayerType :: { Registered(t) } 3 < t ==> !Registered(t) package gopacket -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" /** Types **/ type LayerType int64 @@ -185,7 +185,7 @@ func OverrideLayerType(num int, meta LayerTypeMetadata) (res LayerType) { } preserves c.Mem() -preserves slices.AbsSlice_Bytes(data, 0, len(data)) +preserves sl.Bytes(data, 0, len(data)) ensures err != nil ==> err.ErrorMem() decreases func (t LayerType) Decode(data []byte, c PacketBuilder) (err error) diff --git a/verification/dependencies/github.com/google/gopacket/packet.gobra b/verification/dependencies/github.com/google/gopacket/packet.gobra index 1790dd5e2..b410e4715 100644 --- a/verification/dependencies/github.com/google/gopacket/packet.gobra +++ b/verification/dependencies/github.com/google/gopacket/packet.gobra @@ -10,7 +10,7 @@ package gopacket import ( "time" - "github.com/scionproto/scion/verification/utils/slices" + sl "github.com/scionproto/scion/verification/utils/slices" ) type CaptureInfo struct { @@ -105,8 +105,8 @@ type Packet interface { requires Mem() // (VerifiedSCION) not sure if we need this - // ensures slices.AbsSlice_Bytes(res, 0, len(res)) - // ensures slices.AbsSlice_Bytes(res, 0, len(res)) --* Mem() + // ensures sl.Bytes(res, 0, len(res)) + // ensures sl.Bytes(res, 0, len(res)) --* Mem() decreases Data() (res []byte) diff --git a/verification/dependencies/github.com/google/gopacket/parser.gobra b/verification/dependencies/github.com/google/gopacket/parser.gobra index 6298e29af..df1f93d41 100644 --- a/verification/dependencies/github.com/google/gopacket/parser.gobra +++ b/verification/dependencies/github.com/google/gopacket/parser.gobra @@ -9,7 +9,7 @@ package gopacket import ( - "github.com/scionproto/scion/verification/utils/slices" + sl "github.com/scionproto/scion/verification/utils/slices" . "github.com/scionproto/scion/verification/utils/definitions" ) @@ -17,7 +17,7 @@ import ( // values into specified slice. Returns either first encountered // unsupported LayerType value or decoding error. In case of success, // returns (LayerTypeZero, nil). -preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R13) +preserves acc(sl.Bytes(b, 0, len(b)), R13) preserves acc(ltypes, R13) && acc(*ltypes, R13) ensures err != nil ==> err.ErrorMem() func decodingLayerFunc_spec(b []byte, ltypes *[]LayerType) (l LayerType, err error) @@ -28,7 +28,7 @@ type DecodingLayer interface { requires NonInitMem() requires df != nil - preserves slices.AbsSlice_Bytes(data, 0, len(data)) + preserves acc(sl.Bytes(data, 0, len(data)), R40) preserves df.Mem() ensures res == nil ==> Mem(data) ensures res != nil ==> (NonInitMem() && res.ErrorMem()) diff --git a/verification/dependencies/github.com/google/gopacket/writer.gobra b/verification/dependencies/github.com/google/gopacket/writer.gobra index f20973514..413e26b8b 100644 --- a/verification/dependencies/github.com/google/gopacket/writer.gobra +++ b/verification/dependencies/github.com/google/gopacket/writer.gobra @@ -8,8 +8,9 @@ package gopacket -import . "github.com/scionproto/scion/verification/utils/definitions" -import "github.com/scionproto/scion/verification/utils/slices" +import . "verification/utils/definitions" +import sl "verification/utils/slices" +import "verification/utils/seqs" type SerializableLayer interface { pred Mem(ubuf []byte) @@ -17,7 +18,7 @@ type SerializableLayer interface { requires !opts.FixLengths requires b != nil && b.Mem() requires Mem(ubuf) - preserves slices.AbsSlice_Bytes(ubuf, 0, len(ubuf)) + preserves sl.Bytes(ubuf, 0, len(ubuf)) ensures err == nil ==> (Mem(ubuf) && b.Mem()) ensures err != nil ==> err.ErrorMem() decreases @@ -35,7 +36,7 @@ type SerializeOptions struct { type SerializeBuffer interface { pred Mem() - // morally, corresponds to slices.AbsSlice_Bytes(ub, 0, len(ub)) --* (Mem() && UBuf() === ub) + // morally, corresponds to sl.Bytes(ub, 0, len(ub)) --* (Mem() && UBuf() === ub) pred MemWithoutUBuf(ub []byte) ghost @@ -44,25 +45,34 @@ type SerializeBuffer interface { decreases UBuf() []byte + ghost + pure + requires acc(Mem(), _) + decreases + View() (ghost res seq[byte]) + ghost requires Mem() ensures res === old(UBuf()) - ensures slices.AbsSlice_Bytes(res, 0, len(res)) + ensures sl.Bytes(res, 0, len(res)) ensures MemWithoutUBuf(res) + ensures old(View()) == seqs.ToSeqByte(res) decreases ExchangePred() (res []byte) ghost requires MemWithoutUBuf(ub) - requires slices.AbsSlice_Bytes(ub, 0, len(ub)) + requires sl.Bytes(ub, 0, len(ub)) ensures Mem() && UBuf() === ub + ensures View() == old(seqs.ToSeqByte(ub)) decreases RestoreMem(ghost ub []byte) requires Mem() ensures res === old(UBuf()) - ensures slices.AbsSlice_Bytes(res, 0, len(res)) + ensures sl.Bytes(res, 0, len(res)) ensures MemWithoutUBuf(res) + ensures old(View()) == seqs.ToSeqByte(res) decreases Bytes() (res []byte) diff --git a/verification/dependencies/github.com/prometheus/client_golang/prometheus/desc.gobra b/verification/dependencies/github.com/prometheus/client_golang/prometheus/desc.gobra index 97e378442..b45c4f15f 100644 --- a/verification/dependencies/github.com/prometheus/client_golang/prometheus/desc.gobra +++ b/verification/dependencies/github.com/prometheus/client_golang/prometheus/desc.gobra @@ -42,7 +42,7 @@ type Desc struct { help string // constLabelPairs contains precalculated DTO label pairs based on // the constant labels. - // (joao) Not needed for our purposes + // (VerifiedSCION) Not needed for our purposes // constLabelPairs []*dto.LabelPair // variableLabels contains names of labels for which the metric // maintains variable values. diff --git a/verification/dependencies/github.com/prometheus/client_golang/prometheus/metric.gobra b/verification/dependencies/github.com/prometheus/client_golang/prometheus/metric.gobra index 04393ff96..e64ce246e 100644 --- a/verification/dependencies/github.com/prometheus/client_golang/prometheus/metric.gobra +++ b/verification/dependencies/github.com/prometheus/client_golang/prometheus/metric.gobra @@ -39,7 +39,7 @@ type Metric interface { // Write encodes the Metric into a "Metric" Protocol Buffer data // transmission object. - // (joao) Not supported; we do not need to go down this rabbit hole for our purposes + // (VerifiedSCION) Not supported; we do not need to go down this rabbit hole for our purposes // Write(*dto.Metric) error } diff --git a/verification/dependencies/golang.org/x/net/internal/socket/socket.gobra b/verification/dependencies/golang.org/x/net/internal/socket/socket.gobra index 4d28c738d..b985da7b2 100644 --- a/verification/dependencies/golang.org/x/net/internal/socket/socket.gobra +++ b/verification/dependencies/golang.org/x/net/internal/socket/socket.gobra @@ -42,19 +42,18 @@ type Message struct { NN int // # of bytes read or written from/to OOB Flags int // protocol-specific information on the received message - // (VerifiedSCION) the following are, morally, ghost fields: // is it still ok to read the Addr of the Message? - IsActive bool + ghost IsActive bool // do we have a fixed amount of perms to the Addr or a wildcard amount? - WildcardPerm bool + ghost WildcardPerm bool } pred (m *Message) Mem() { acc(m) && len(m.Buffers) == 1 && acc(&m.Buffers[0]) && - sl.AbsSlice_Bytes(m.Buffers[0], 0, len(m.Buffers[0])) && - sl.AbsSlice_Bytes(m.OOB, 0, len(m.OOB)) && + sl.Bytes(m.Buffers[0], 0, len(m.Buffers[0])) && + sl.Bytes(m.OOB, 0, len(m.OOB)) && // typeOf(m.Addr) == type[*net.UDPAddr] && ((m.Addr != nil && m.IsActive && !m.WildcardPerm) ==> m.Addr.Mem()) && ((m.Addr != nil && m.IsActive && m.WildcardPerm) ==> acc(m.Addr.Mem(), _)) && diff --git a/verification/dependencies/golang.org/x/net/internal/socket/socket.gobra.old b/verification/dependencies/golang.org/x/net/internal/socket/socket.gobra.old deleted file mode 100644 index e61f20e9f..000000000 --- a/verification/dependencies/golang.org/x/net/internal/socket/socket.gobra.old +++ /dev/null @@ -1,186 +0,0 @@ -// Specification for package "golang.org/x/net/internal/socket" -// Based on file https://github.com/golang/net/blob/master/internal/socket/socket.go - -package socket - -import ( - "net" - - sl "github.com/scionproto/scion/verification/utils/slices" -) - - -// A Message represents an IO message. -type Message struct { - // When writing, the Buffers field must contain at least one - // byte to write. - // When reading, the Buffers field will always contain a byte - // to read. - Buffers [][]byte - - // OOB contains protocol-specific control or miscellaneous - // ancillary data known as out-of-band data. - OOB []byte - - // Addr specifies a destination address when writing. - // It can be nil when the underlying protocol of the raw - // connection uses connection-oriented communication. - // After a successful read, it may contain the source address - // on the received packet. - Addr net.Addr - - N int // # of bytes read or written from/to Buffers - NN int // # of bytes read or written from/to OOB - Flags int // protocol-specific information on the received message - - // (VerifiedSCION) the following are, morally, ghost fields: - // is it still ok to read the buffers and Addr of the Message? - IsActive bool - // do we have a fixed amount of perms to the Addr a wildcard amount? - WildcardPerm bool -} - -pred (m *Message) Mem(lenBuffers int) { - acc(m) && - len(m.Buffers) == lenBuffers && - (forall i int :: { &m.Buffers[i] } 0 <= i && i < len(m.Buffers) ==> acc(&m.Buffers[i])) && - (m.IsActive ==> - (forall i int :: { &m.Buffers[i] } 0 <= i && i < len(m.Buffers) ==> - sl.AbsSlice_Bytes(m.Buffers[i], 0, len(m.Buffers[i])))) && - sl.AbsSlice_Bytes(m.OOB, 0, len(m.OOB)) && - // typeOf(m.Addr) == type[*net.UDPAddr] && - ((m.Addr != nil && m.IsActive && !m.WildcardPerm) ==> m.Addr.Mem()) && - ((m.Addr != nil && m.IsActive && m.WildcardPerm) ==> acc(m.Addr.Mem(), _)) && - 0 <= m.N -} - -pred (m *Message) MemWithoutHalf(lenBuffers int) { - acc(m, 1/2) && - len(m.Buffers) == lenBuffers && - (forall i int :: { &m.Buffers[i] } 0 <= i && i < len(m.Buffers) ==> acc(&m.Buffers[i])) && - (m.IsActive ==> - (forall i int :: { &m.Buffers[i] } 0 <= i && i < len(m.Buffers) ==> - sl.AbsSlice_Bytes(m.Buffers[i], 0, len(m.Buffers[i])))) && - sl.AbsSlice_Bytes(m.OOB, 0, len(m.OOB)) && - // typeOf(m.Addr) == type[*net.UDPAddr] && - ((m.Addr != nil && m.IsActive && !m.WildcardPerm) ==> m.Addr.Mem()) && - ((m.Addr != nil && m.IsActive && m.WildcardPerm) ==> acc(m.Addr.Mem(), _)) && - 0 <= m.N -} - -ghost -requires acc(m.Mem(lenBuffers), _) -decreases -pure func (m *Message) HasWildcardPermAddr(lenBuffers int) bool { - return unfolding acc(m.Mem(lenBuffers), _) in m.WildcardPerm -} - -ghost -requires acc(m.Mem(lenBuffers), _) -decreases -pure func (m *Message) HasActiveBuffers(lenBuffers int) bool { - return unfolding acc(m.Mem(lenBuffers), _) in m.IsActive -} - -ghost -requires acc(m.Mem(lenBuffers), _) -decreases -pure func (m *Message) GetAddr(lenBuffers int) net.Addr { - return unfolding acc(m.Mem(lenBuffers), _) in m.Addr -} - -ghost -requires acc(m.Mem(lenBuffers), _) -decreases -pure func (m *Message) GetMessage(lenBuffers int) Message { - return unfolding acc(m.Mem(lenBuffers), _) in *m -} - -ghost -requires acc(m.MemWithoutHalf(lenBuffers), _) -decreases -pure func (m *Message) GetAddrWithoutHalf(lenBuffers int) net.Addr { - return unfolding acc(m.MemWithoutHalf(lenBuffers), _) in m.Addr -} - -ghost -requires acc(m.Mem(lenBuffers), _) -requires 0 <= i && i < lenBuffers -decreases -pure func (m *Message) GetBuffer(lenBuffers int, i int) []byte { - return unfolding acc(m.Mem(lenBuffers), _) in m.Buffers[i] -} - -// Only defined for the case where lenBuffers == 1 -ghost -requires acc(m.Mem(1), _) -decreases -pure func (m *Message) GetFstBuffer() []byte { - return unfolding acc(m.Mem(1), _) in m.Buffers[0] -} - -// Only defined for the case where lenBuffers == 1 -ghost -requires acc(m.Mem(1), _) -decreases -pure func (m *Message) GetN() int { - return unfolding acc(m.Mem(1), _) in m.N -} - -ghost -requires m.Mem(1) -ensures acc(m, 1/2) && m.MemWithoutHalf(1) -ensures old(m.GetAddr(1)) === m.GetAddrWithoutHalf(1) -ensures m.N == old(unfolding m.Mem(1) in m.N) -ensures m.Buffers === old(unfolding m.Mem(1) in m.Buffers) -ensures old(m.GetFstBuffer()) === unfolding m.MemWithoutHalf(1) in m.Buffers[0] -ensures old(m.GetN()) == m.N -ensures old(m.HasWildcardPermAddr(1)) == m.WildcardPerm -ensures old(m.HasActiveBuffers(1)) == m.IsActive -ensures old(m.GetMessage(1)) === *m -decreases -func (m *Message) SplitPerm() { - unfold m.Mem(1) - fold m.MemWithoutHalf(1) -} - -ghost -requires acc(m, 1/2) && m.MemWithoutHalf(1) -ensures m.Mem(1) -ensures m.GetAddr(1) === old(m.GetAddrWithoutHalf(1)) -ensures old(m.N) == unfolding m.Mem(1) in m.N -ensures m.GetFstBuffer() === old(unfolding m.MemWithoutHalf(1) in m.Buffers[0]) -ensures unfolding m.Mem(1) in m.Buffers === old(m.Buffers) -ensures m.GetN() == old(m.N) -ensures m.HasWildcardPermAddr(1) == old(m.WildcardPerm) -ensures m.HasActiveBuffers(1) == old(m.IsActive) -ensures m.GetMessage(1) === old(*m) -decreases -func (m *Message) CombinePerm() { - unfold m.MemWithoutHalf(1) - fold m.Mem(1) -} - -ghost -requires forall j int :: { &msgs[j] } 0 <= j && j < len(msgs) ==> msgs[j].Mem(1) -ensures forall j int :: { &msgs[j] } 0 <= j && j < len(msgs) ==> acc(&msgs[j], 1/2) && msgs[j].MemWithoutHalf(1) -ensures forall j int :: { &msgs[j] } 0 <= j && j < len(msgs) ==> old(msgs[j].GetMessage(1)) === msgs[j] -ensures forall j int :: { &msgs[j] } 0 <= j && j < len(msgs) ==> msgs[j].WildcardPerm == old(msgs[j].HasWildcardPermAddr(1)) -ensures forall j int :: { &msgs[j] } 0 <= j && j < len(msgs) ==> msgs[j].IsActive == old(msgs[j].HasActiveBuffers(1)) -decreases -func SplitPermMsgs(msgs []Message) { - invariant 0 <= i && i <= len(msgs) - invariant forall j int :: { &msgs[j] } i <= j && j < len(msgs) ==> msgs[j].Mem(1) - invariant forall j int :: { &msgs[j] } 0 <= j && j < i ==> acc(&msgs[j], 1/2) && msgs[j].MemWithoutHalf(1) - invariant forall j int :: { &msgs[j] } 0 <= j && j < i ==> msgs[j].WildcardPerm == old(msgs[j].HasWildcardPermAddr(1)) - invariant forall j int :: { &msgs[j] } 0 <= j && j < i ==> msgs[j].IsActive == old(msgs[j].HasActiveBuffers(1)) - invariant forall j int :: { &msgs[j] } 0 <= j && j < i ==> msgs[j] === old(msgs[j].GetMessage(1)) - invariant forall j int :: { &msgs[j] } i <= j && j < len(msgs) ==> msgs[j].GetMessage(1) === old(msgs[j].GetMessage(1)) - decreases len(msgs) - i - for i := 0; i < len(msgs); i++ { - assert forall j int :: { &msgs[j] }{ &msgs[j].WildcardPerm } 0 <= j && j < i ==> msgs[j].WildcardPerm == old(msgs[j].HasWildcardPermAddr(1)) - msgs[i].SplitPerm() - assert forall j int :: { &msgs[j] } 0 <= j && j < i ==> msgs[j].WildcardPerm == old(msgs[j].HasWildcardPermAddr(1)) - assert msgs[i].WildcardPerm == old(msgs[i].HasWildcardPermAddr(1)) - } -} \ No newline at end of file diff --git a/verification/dependencies/golang.org/x/net/internal/socket/socket_test.gobra b/verification/dependencies/golang.org/x/net/internal/socket/socket_test.gobra index 21b652166..9008c38e0 100644 --- a/verification/dependencies/golang.org/x/net/internal/socket/socket_test.gobra +++ b/verification/dependencies/golang.org/x/net/internal/socket/socket_test.gobra @@ -16,12 +16,12 @@ package socket -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" func foldMem_test() { var m@ Message - fold slices.AbsSlice_Bytes(m.OOB, 0, len(m.OOB)) + fold sl.Bytes(m.OOB, 0, len(m.OOB)) m.Buffers = make([][]byte, 1) - fold slices.AbsSlice_Bytes(m.Buffers[0], 0, len(m.Buffers[0])) + fold sl.Bytes(m.Buffers[0], 0, len(m.Buffers[0])) fold m.Mem() } \ No newline at end of file diff --git a/verification/dependencies/golang.org/x/net/ipv4/endpoint.gobra b/verification/dependencies/golang.org/x/net/ipv4/endpoint.gobra index 38c0cf5e7..b32881ddc 100644 --- a/verification/dependencies/golang.org/x/net/ipv4/endpoint.gobra +++ b/verification/dependencies/golang.org/x/net/ipv4/endpoint.gobra @@ -46,74 +46,41 @@ type PacketConn struct { // SetDeadline sets the read and write deadlines associated with the // endpoint. -trusted preserves c.Mem() ensures e != nil ==> e.ErrorMem() ensures c.GetUnderlyingConn() === old(c.GetUnderlyingConn()) decreases _ -func (c *PacketConn) SetDeadline(t time.Time) (e error) { - if !c.payloadHandler.ok() { - return errInvalidConn - } - return c.payloadHandler.PacketConn.SetDeadline(t) -} +func (c *PacketConn) SetDeadline(t time.Time) (e error) // SetReadDeadline sets the read deadline associated with the // endpoint. -trusted preserves c.Mem() ensures e != nil ==> e.ErrorMem() ensures c.GetUnderlyingConn() === old(c.GetUnderlyingConn()) decreases _ -func (c *PacketConn) SetReadDeadline(t time.Time) (e error) { - if !c.payloadHandler.ok() { - return errInvalidConn - } - return c.payloadHandler.PacketConn.SetReadDeadline(t) -} +func (c *PacketConn) SetReadDeadline(t time.Time) (e error) // SetWriteDeadline sets the write deadline associated with the // endpoint. -trusted preserves c.Mem() ensures e != nil ==> e.ErrorMem() ensures c.GetUnderlyingConn() === old(c.GetUnderlyingConn()) decreases _ -func (c *PacketConn) SetWriteDeadline(t time.Time) (e error) { - if !c.payloadHandler.ok() { - return errInvalidConn - } - return c.payloadHandler.PacketConn.SetWriteDeadline(t) -} +func (c *PacketConn) SetWriteDeadline(t time.Time) (e error) // Close closes the endpoint. -trusted requires c.Mem() ensures e != nil ==> e.ErrorMem() decreases _ -func (c *PacketConn) Close() (e error) { - if !c.payloadHandler.ok() { - return errInvalidConn - } - return c.payloadHandler.PacketConn.Close() -} +func (c *PacketConn) Close() (e error) // NewPacketConn returns a new PacketConn using c as its underlying // transport. -trusted requires c != nil && c.Mem() ensures p != nil && p.Mem() ensures p.GetUnderlyingConn() === c decreases _ -func NewPacketConn(c net.PacketConn) (p *PacketConn) { - cc, _ := socket.NewConn(c.(net.Conn)) - p := &PacketConn{ - genericOpt: genericOpt{Conn: cc}, - dgramOpt: dgramOpt{Conn: cc}, - payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, - } - return p -} +func NewPacketConn(c net.PacketConn) (p *PacketConn) ghost requires acc(p.Mem(), _) @@ -146,107 +113,58 @@ type RawConn struct { // SetDeadline sets the read and write deadlines associated with the // endpoint. -trusted preserves c.Mem() ensures e != nil ==> e.ErrorMem() decreases _ -func (c *RawConn) SetDeadline(t time.Time) (e error) { - if !c.packetHandler.ok() { - return errInvalidConn - } - return c.packetHandler.IPConn.SetDeadline(t) -} +func (c *RawConn) SetDeadline(t time.Time) (e error) // SetReadDeadline sets the read deadline associated with the // endpoint. -trusted preserves c.Mem() ensures e != nil ==> e.ErrorMem() decreases _ -func (c *RawConn) SetReadDeadline(t time.Time) (e error) { - if !c.packetHandler.ok() { - return errInvalidConn - } - return c.packetHandler.IPConn.SetReadDeadline(t) -} +func (c *RawConn) SetReadDeadline(t time.Time) (e error) // SetWriteDeadline sets the write deadline associated with the // endpoint. -trusted preserves c.Mem() ensures e != nil ==> e.ErrorMem() decreases _ -func (c *RawConn) SetWriteDeadline(t time.Time) (e error) { - if !c.packetHandler.ok() { - return errInvalidConn - } - return c.packetHandler.IPConn.SetWriteDeadline(t) -} +func (c *RawConn) SetWriteDeadline(t time.Time) (e error) // Close closes the endpoint. -trusted requires c.Mem() ensures e != nil ==> e.ErrorMem() decreases _ -func (c *RawConn) Close() (e error) { - if !c.packetHandler.ok() { - return errInvalidConn - } - return c.packetHandler.IPConn.Close() -} +func (c *RawConn) Close() (e error) // NewRawConn returns a new RawConn using c as its underlying // transport. -trusted requires c.Mem() ensures err == nil ==> r.Mem() ensures err != nil ==> err.ErrorMem() decreases _ -func NewRawConn(c net.PacketConn) (r *RawConn, err error) { - cc, err := socket.NewConn(c.(net.Conn)) - if err != nil { - return nil, err - } - r := &RawConn{ - genericOpt: genericOpt{Conn: cc}, - dgramOpt: dgramOpt{Conn: cc}, - packetHandler: packetHandler{IPConn: c.(*net.IPConn), Conn: cc}, - } - so, ok := sockOpts[ssoHeaderPrepend] - if !ok { - return nil, errNotImplemented - } - if err := so.SetInt(r.dgramOpt.Conn, boolint(true)); err != nil { - return nil, err - } - return r, nil -} +func NewRawConn(c net.PacketConn) (r *RawConn, err error) // (VerifiedSCION) Promote method from payloadHandler to PacketConn. // ReadBatch reads a batch of messages. // On a successful read it returns the number of messages received, up // to len(ms). -trusted preserves acc(c.Mem(), _) preserves forall i int :: { &ms[i] } 0 <= i && i < len(ms) ==> (&ms[i]).Mem() ensures c.GetUnderlyingConn() === old(c.GetUnderlyingConn()) ensures err == nil ==> (0 <= n && n <= len(ms)) ensures err != nil ==> err.ErrorMem() -func (c *PacketConn) ReadBatch(ms []socket.Message, flags int) (n int, err error) { - return c.payloadHandler.ReadBatch(ms, flags) -} +func (c *PacketConn) ReadBatch(ms []socket.Message, flags int) (n int, err error) // (VerifiedSCION) Promote method from payloadHandler to PacketConn. // WriteBatch writes a batch of messages. // It returns the number of messages written on a successful write. -trusted preserves forall i int :: { &ms[i] } 0 <= i && i < len(ms) ==> acc((&ms[i]).Mem(), R10) preserves acc(c.Mem(), _) ensures c.GetUnderlyingConn() === old(c.GetUnderlyingConn()) ensures err == nil ==> 0 <= n && n <= len(ms) ensures err != nil ==> err.ErrorMem() -func (c *PacketConn) WriteBatch(ms []socket.Message, flags int) (n int, err error) { - return c.payloadHandler.WriteBatch(ms, flags) -} \ No newline at end of file +func (c *PacketConn) WriteBatch(ms []socket.Message, flags int) (n int, err error) \ No newline at end of file diff --git a/verification/dependencies/golang.org/x/net/ipv6/endpoint.gobra b/verification/dependencies/golang.org/x/net/ipv6/endpoint.gobra index aee3a6c80..85061a9e9 100644 --- a/verification/dependencies/golang.org/x/net/ipv6/endpoint.gobra +++ b/verification/dependencies/golang.org/x/net/ipv6/endpoint.gobra @@ -27,36 +27,16 @@ type Conn struct { // PathMTU returns a path MTU value for the destination associated // with the endpoint. -trusted preserves c.Mem() ensures e != nil ==> e.ErrorMem() decreases _ -func (c *Conn) PathMTU() (n int, e error) { - if !c.ok() { - return 0, errInvalidConn - } - so, ok := sockOpts[ssoPathMTU] - if !ok { - return 0, errNotImplemented - } - _, mtu, err := so.getMTUInfo(c.Conn) - if err != nil { - return 0, err - } - return mtu, nil -} +func (c *Conn) PathMTU() (n int, e error) // NewConn returns a new Conn. -trusted requires c.Mem() ensures conn.Mem() decreases _ -func NewConn(c net.Conn) (conn *Conn) { - cc, _ := socket.NewConn(c) - return &Conn{ - genericOpt: genericOpt{Conn: cc}, - } -} +func NewConn(c net.Conn) (conn *Conn) // A PacketConn represents a packet network endpoint that uses IPv6 // transport. It is used to control several IP-level socket options @@ -69,73 +49,41 @@ type PacketConn struct { // SetDeadline sets the read and write deadlines associated with the // endpoint. -trusted preserves c.Mem() ensures e != nil ==> e.ErrorMem() ensures c.GetUnderlyingConn() === old(c.GetUnderlyingConn()) decreases _ -func (c *PacketConn) SetDeadline(t time.Time) (e error) { - if !c.payloadHandler.ok() { - return errInvalidConn - } - return c.payloadHandler.SetDeadline(t) -} +func (c *PacketConn) SetDeadline(t time.Time) (e error) // SetReadDeadline sets the read deadline associated with the // endpoint. -trusted preserves c.Mem() ensures e != nil ==> e.ErrorMem() ensures c.GetUnderlyingConn() === old(c.GetUnderlyingConn()) decreases _ -func (c *PacketConn) SetReadDeadline(t time.Time) (e error) { - if !c.payloadHandler.ok() { - return errInvalidConn - } - return c.payloadHandler.SetReadDeadline(t) -} +func (c *PacketConn) SetReadDeadline(t time.Time) (e error) // SetWriteDeadline sets the write deadline associated with the // endpoint. -trusted preserves c.Mem() ensures e != nil ==> e.ErrorMem() ensures c.GetUnderlyingConn() === old(c.GetUnderlyingConn()) decreases _ -func (c *PacketConn) SetWriteDeadline(t time.Time) (e error) { - if !c.payloadHandler.ok() { - return errInvalidConn - } - return c.payloadHandler.SetWriteDeadline(t) -} +func (c *PacketConn) SetWriteDeadline(t time.Time) (e error) // Close closes the endpoint. -trusted requires c.Mem() ensures e != nil ==> e.ErrorMem() decreases _ -func (c *PacketConn) Close() (e error) { - if !c.payloadHandler.ok() { - return errInvalidConn - } - return c.payloadHandler.Close() -} +func (c *PacketConn) Close() (e error) // NewPacketConn returns a new PacketConn using c as its underlying // transport. -trusted requires c != nil && c.Mem() ensures p != nil && p.Mem() ensures p.GetUnderlyingConn() === c decreases _ -func NewPacketConn(c net.PacketConn) (p *PacketConn) { - cc, _ := socket.NewConn(c.(net.Conn)) - return &PacketConn{ - genericOpt: genericOpt{Conn: cc}, - dgramOpt: dgramOpt{Conn: cc}, - payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, - } -} +func NewPacketConn(c net.PacketConn) (p *PacketConn) ghost requires acc(p.Mem(), _) @@ -161,26 +109,20 @@ func (p *PacketConn) ExchangeWildcardPerm() (c net.PacketConn) // ReadBatch reads a batch of messages. // On a successful read it returns the number of messages received, up // to len(ms). -trusted preserves forall i int :: { &ms[i] } 0 <= i && i < len(ms) ==> (&ms[i]).Mem() preserves acc(c.Mem(), _) ensures c.GetUnderlyingConn() === old(c.GetUnderlyingConn()) ensures err == nil ==> (0 <= n && n <= len(ms)) ensures err != nil ==> err.ErrorMem() -func (c *PacketConn) ReadBatch(ms []socket.Message, flags int) (n int, err error) { - return c.payloadHandler.ReadBatch(ms, flags) -} +func (c *PacketConn) ReadBatch(ms []socket.Message, flags int) (n int, err error) // (VerifiedSCION) Promote method from payloadHandler to PacketConn. // WriteBatch writes a batch of messages. // It returns the number of messages written on a successful write. -trusted preserves forall i int :: { &ms[i] } 0 <= i && i < len(ms) ==> acc((&ms[i]).Mem(), R10) preserves acc(c.Mem(), _) ensures c.GetUnderlyingConn() === old(c.GetUnderlyingConn()) ensures err == nil ==> 0 <= n && n <= len(ms) ensures err != nil ==> err.ErrorMem() -func (c *PacketConn) WriteBatch(ms []socket.Message, flags int) (n int, err error) { - return c.payloadHandler.WriteBatch(ms, flags) -} \ No newline at end of file +func (c *PacketConn) WriteBatch(ms []socket.Message, flags int) (n int, err error) \ No newline at end of file diff --git a/verification/dependencies/net/ip.gobra b/verification/dependencies/net/ip.gobra index 2619530d9..5f552f876 100644 --- a/verification/dependencies/net/ip.gobra +++ b/verification/dependencies/net/ip.gobra @@ -10,7 +10,7 @@ package net import . "github.com/scionproto/scion/verification/utils/definitions" -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" // IP address lengths (bytes). const ( @@ -133,8 +133,8 @@ func (ip *IP) UnmarshalText(text []byte) error // considered to be equal. // (VerifiedSCION) we consider this function to be morally pure pure -requires acc(slices.AbsSlice_Bytes(ip, 0, len(ip)), _) -requires acc(slices.AbsSlice_Bytes(x, 0, len(x)), _) +requires acc(sl.Bytes(ip, 0, len(ip)), _) +requires acc(sl.Bytes(x, 0, len(x)), _) decreases _ func (ip IP) Equal(x IP) bool diff --git a/verification/dependencies/net/net.gobra b/verification/dependencies/net/net.gobra index e2ff126be..9b3003c20 100644 --- a/verification/dependencies/net/net.gobra +++ b/verification/dependencies/net/net.gobra @@ -13,7 +13,7 @@ import ( "time" . "github.com/scionproto/scion/verification/utils/definitions" - "github.com/scionproto/scion/verification/utils/slices" + sl "github.com/scionproto/scion/verification/utils/slices" ) // Addr represents a network end point address. @@ -36,14 +36,14 @@ type Conn interface { // Read reads data from the connection. requires acc(Mem(), _) - preserves slices.AbsSlice_Bytes(b, 0, len(b)) + preserves sl.Bytes(b, 0, len(b)) ensures err == nil ==> 0 <= n && n <= len(b) ensures err != nil ==> err.ErrorMem() Read(b []byte) (n int, err error) // Write writes data to the connection. preserves acc(Mem(), _) - preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R15) + preserves acc(sl.Bytes(b, 0, len(b)), R15) ensures err == nil ==> 0 <= n && n <= len(b) ensures err != nil ==> err.ErrorMem() Write(b []byte) (n int, err error) @@ -93,7 +93,7 @@ type PacketConn interface { pred Mem() preserves acc(Mem(), _) - preserves slices.AbsSlice_Bytes(p, 0, len(p)) + preserves sl.Bytes(p, 0, len(p)) ensures err == nil ==> 0 <= n && n <= len(p) ensures err == nil ==> acc(addr.Mem(), _) ensures err != nil ==> err.ErrorMem() @@ -101,7 +101,7 @@ type PacketConn interface { requires acc(addr.Mem(), _) preserves acc(Mem(), _) - preserves acc(slices.AbsSlice_Bytes(p, 0, len(p)), R15) + preserves acc(sl.Bytes(p, 0, len(p)), R15) ensures err == nil ==> 0 <= n && n <= len(p) ensures err != nil ==> err.ErrorMem() WriteTo(p []byte, addr Addr) (n int, err error) diff --git a/verification/dependencies/net/udpsock.gobra b/verification/dependencies/net/udpsock.gobra index 20d23a34e..d1a9f2b71 100644 --- a/verification/dependencies/net/udpsock.gobra +++ b/verification/dependencies/net/udpsock.gobra @@ -12,7 +12,7 @@ package net import "time" import . "github.com/scionproto/scion/verification/utils/definitions" -import "github.com/scionproto/scion/verification/utils/slices" +import sl "github.com/scionproto/scion/verification/utils/slices" // UDPAddr represents the address of a UDP end point. type UDPAddr struct { @@ -24,7 +24,7 @@ type UDPAddr struct { pred (a *UDPAddr) Mem() { // The second conjunct should be eventually replaced by a.IP.Mem(). // However, doing this at the moment requires changes in the VerifiedSCION codebase. - acc(a, R5) && slices.AbsSlice_Bytes(a.IP, 0, len(a.IP)) + acc(a, R5) && sl.Bytes(a.IP, 0, len(a.IP)) } (*UDPAddr) implements Addr { @@ -58,7 +58,7 @@ pred (u *UDPConn) Mem() { // ReadFromUDP acts like ReadFrom but returns a UDPAddr. preserves acc(c.Mem(), _) -preserves slices.AbsSlice_Bytes(b, 0, len(b)) +preserves sl.Bytes(b, 0, len(b)) ensures err == nil ==> 0 <= n && n <= len(b) ensures err == nil ==> acc(addr.Mem(), _) ensures err != nil ==> err.ErrorMem() @@ -66,7 +66,7 @@ func (c *UDPConn) ReadFromUDP(b []byte) (n int, addr *UDPAddr, err error) // ReadFrom implements the PacketConn ReadFrom method. preserves acc(c.Mem(), _) -preserves slices.AbsSlice_Bytes(b, 0, len(b)) +preserves sl.Bytes(b, 0, len(b)) ensures err == nil ==> 0 <= n && n <= len(b) ensures err == nil ==> acc(addr.Mem(), _) ensures err != nil ==> err.ErrorMem() @@ -82,7 +82,7 @@ func (c *UDPConn) WriteToUDP(b []byte, addr *UDPAddr) (int, error) // WriteTo implements the PacketConn WriteTo method. requires acc(addr.Mem(), _) preserves acc(c.Mem(), _) -preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R15) +preserves acc(sl.Bytes(b, 0, len(b)), R15) ensures err == nil ==> 0 <= n && n <= len(b) ensures err != nil ==> err.ErrorMem() func (c *UDPConn) WriteTo(b []byte, addr Addr) (n int, err error) @@ -136,7 +136,7 @@ decreases _ func (c *UDPConn) SetReadBuffer(bytes int) (err error) preserves acc(c.Mem(), _) -preserves acc(slices.AbsSlice_Bytes(b, 0, len(b)), R15) +preserves acc(sl.Bytes(b, 0, len(b)), R15) ensures err == nil ==> 0 <= n && n <= len(b) ensures err != nil ==> err.ErrorMem() func (c *UDPConn) Write(b []byte) (n int, err error) \ No newline at end of file diff --git a/verification/dependencies/strconv/itoa.gobra.verified_backup b/verification/dependencies/strconv/itoa.gobra.verified_backup deleted file mode 100644 index b42f462ad..000000000 --- a/verification/dependencies/strconv/itoa.gobra.verified_backup +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in https://golang.org/LICENSE - -// Signatures for the public declarations in file -// https://github.com/golang/go/blob/master/src/strconv/itoa.gobra - -package strconv - -// import "math/bits" - -const fastSmalls = true // enable fast path for small integers - -// FormatUint returns the string representation of i in the given base, -// for 2 <= base <= 36. The result uses the lower-case letters 'a' to 'z' -// for digit values >= 10. -requires i >= 0 -requires 2 <= base && base <= 36 -decreases -func FormatUint(i uint64, base int) string { - if fastSmalls && i < nSmalls && base == 10 { - return small(int(i)) - } - _, s := formatBits(nil, i, base, false, false) - return s -} - -// FormatInt returns the string representation of i in the given base, -// for 2 <= base <= 36. The result uses the lower-case letters 'a' to 'z' -// for digit values >= 10. -requires 2 <= base && base <= 36 -decreases -func FormatInt(i int64, base int) string { - if fastSmalls && 0 <= i && i < nSmalls && base == 10 { - return small(int(i)) - } - _, s := formatBits(nil, uint64(i), base, i < 0, false) - return s -} - -// Itoa is equivalent to FormatInt(int64(i), 10). -decreases -func Itoa(i int) string { - return FormatInt(int64(i), 10) -} - -// AppendInt appends the string form of the integer i, -// as generated by FormatInt, to dst and returns the extended buffer. -requires 2 <= base && base <= len(digits) -requires forall i int :: 0 <= i && i < len(dst) ==> acc(&dst[i]) -ensures forall i int :: 0 <= i && i < len(res) ==> acc(&res[i]) -decreases -func AppendInt(dst []byte, i int64, base int) (res []byte) { - if fastSmalls && 0 <= i && i < nSmalls && base == 10 { - // (Gobra) unpacking strings is not yet supported. The conversion - // to []byte was introduced to overcome that - return append(perm(1/2), dst, []byte(small(int(i)))...) - } - dst, _ = formatBits(dst, uint64(i), base, i < 0, true) - return dst -} - -// AppendUint appends the string form of the unsigned integer i, -// as generated by FormatUint, to dst and returns the extended buffer. -requires 2 <= base && base <= len(digits) -requires forall i int :: 0 <= i && i < len(dst) ==> acc(&dst[i]) -ensures forall i int :: 0 <= i && i < len(res) ==> acc(&res[i]) -decreases -func AppendUint(dst []byte, i uint64, base int) (res []byte) { - // valid assumption, i is of type uint64. Currently, Gobra - // does not prove this. - assume i >= 0 - if fastSmalls && i < nSmalls && base == 10 { - // (Gobra) unpacking strings is not yet supported. The conversion - // to []byte was introduced to overcome that - return append(perm(1/2), dst, []byte(small(int(i)))...) - } - dst, _ = formatBits(dst, i, base, false, true) - return dst -} - -// small returns the string for an i with 0 <= i < nSmalls. -requires 0 <= i && i < nSmalls -decreases -func small(i int) string { - if i < 10 { - return digits[i : i+1] - } - return smallsString[i*2 : i*2+2] -} - -const nSmalls = 100 - -const smallsString = "00010203040506070809" + - "10111213141516171819" + - "20212223242526272829" + - "30313233343536373839" + - "40414243444546474849" + - "50515253545556575859" + - "60616263646566676869" + - "70717273747576777879" + - "80818283848586878889" + - "90919293949596979899" - -const host32bit = ^uint(0)>>32 == 0 - -const digits = "0123456789abcdefghijklmnopqrstuvwxyz" - -// formatBits computes the string representation of u in the given base. -// If neg is set, u is treated as negative int64 value. If append_ is -// set, the string is appended to dst and the resulting byte slice is -// returned as the first result value; otherwise the string is returned -// as the second result value. -// -requires append_ ==> forall i int :: 0 <= i && i < len(dst) ==> acc(&dst[i]) -requires 2 <= base && base <= len(digits) -ensures append_ ==> forall i int :: 0 <= i && i < len(d) ==> acc(&d[i]) -decreases _ -func formatBits(dst []byte, u uint64, base int, neg, append_ bool) (d []byte, s string) // { -// if base < 2 || base > len(digits) { -// panic("strconv: illegal AppendInt/FormatInt base") -// } -// // 2 <= base && base <= len(digits) -// -// var a [64 + 1]byte // +1 for sign of 64bit value in base 2 -// i := len(a) -// -// if neg { -// u = -u -// } -// -// // convert bits -// // We use uint values where we can because those will -// // fit into a single register even on a 32bit machine. -// if base == 10 { -// // common case: use constants for / because -// // the compiler can optimize it into a multiply+shift -// -// if host32bit { -// // convert the lower digits using 32bit operations -// for u >= 1e9 { -// // Avoid using r = a%b in addition to q = a/b -// // since 64bit division and modulo operations -// // are calculated by runtime functions on 32bit machines. -// q := u / 1e9 -// us := uint(u - q*1e9) // u % 1e9 fits into a uint -// for j := 4; j > 0; j-- { -// is := us % 100 * 2 -// us /= 100 -// i -= 2 -// a[i+1] = smallsString[is+1] -// a[i+0] = smallsString[is+0] -// } -// -// // us < 10, since it contains the last digit -// // from the initial 9-digit us. -// i-- -// a[i] = smallsString[us*2+1] -// -// u = q -// } -// // u < 1e9 -// } -// -// // u guaranteed to fit into a uint -// us := uint(u) -// for us >= 100 { -// is := us % 100 * 2 -// us /= 100 -// i -= 2 -// a[i+1] = smallsString[is+1] -// a[i+0] = smallsString[is+0] -// } -// -// // us < 100 -// is := us * 2 -// i-- -// a[i] = smallsString[is+1] -// if us >= 10 { -// i-- -// a[i] = smallsString[is] -// } -// -// } else if isPowerOfTwo(base) { -// // Use shifts and masks instead of / and %. -// // Base is a power of 2 and 2 <= base <= len(digits) where len(digits) is 36. -// // The largest power of 2 below or equal to 36 is 32, which is 1 << 5; -// // i.e., the largest possible shift count is 5. By &-ind that value with -// // the constant 7 we tell the compiler that the shift count is always -// // less than 8 which is smaller than any register width. This allows -// // the compiler to generate better code for the shift operation. -// shift := uint(bits.TrailingZeros(uint(base))) & 7 -// b := uint64(base) -// m := uint(base) - 1 // == 1<= b { -// i-- -// a[i] = digits[uint(u)&m] -// u >>= shift -// } -// // u < base -// i-- -// a[i] = digits[uint(u)] -// } else { -// // general case -// b := uint64(base) -// for u >= b { -// i-- -// // Avoid using r = a%b in addition to q = a/b -// // since 64bit division and modulo operations -// // are calculated by runtime functions on 32bit machines. -// q := u / b -// a[i] = digits[uint(u-q*b)] -// u = q -// } -// // u < base -// i-- -// a[i] = digits[uint(u)] -// } -// -// // add sign, if any -// if neg { -// i-- -// a[i] = '-' -// } -// -// if append_ { -// d = append(dst, a[i:]...) -// return -// } -// s = string(a[i:]) -// return -//} - -decreases -func isPowerOfTwo(x int) bool { - return x&(x-1) == 0 -} diff --git a/verification/dependencies/strings/strings.gobra b/verification/dependencies/strings/strings.gobra index 4af6da2ff..13fb98747 100644 --- a/verification/dependencies/strings/strings.gobra +++ b/verification/dependencies/strings/strings.gobra @@ -89,7 +89,7 @@ func Fields(s string) (res []string) requires forall i int :: { &elems[i] } 0 <= i && i < len(elems) ==> acc(&elems[i], _) ensures len(elems) == 0 ==> res == "" ensures len(elems) == 1 ==> res == elems[0] -// (joao) Leads to precondition of call might not hold (permission to elems[i] might not suffice) +// (VerifiedSCION) Leads to precondition of call might not hold (permission to elems[i] might not suffice) // ensures len(elems) > 1 ==> res == elems[0] + sep + Join(elems[1:], sep) decreases _ pure func Join(elems []string, sep string) (res string) diff --git a/verification/dependencies/sync/mutex.gobra b/verification/dependencies/sync/mutex.gobra index feb75100e..7b18c6566 100644 --- a/verification/dependencies/sync/mutex.gobra +++ b/verification/dependencies/sync/mutex.gobra @@ -25,11 +25,16 @@ ensures m.LockP() && m.LockInv() == inv decreases func (m *Mutex) SetInv(ghost inv pred()) +ghost +decreases _ +pure func IgnoreBlockingForTermination() bool + requires acc(m.LockP(), _) -ensures m.LockP() && m.UnlockP() && m.LockInv()() +ensures m.LockP() && m.UnlockP() && m.LockInv()() +decreases _ if IgnoreBlockingForTermination() func (m *Mutex) Lock() requires acc(m.LockP(), _) && m.UnlockP() && m.LockInv()() ensures m.LockP() -decreases +decreases _ func (m *Mutex) Unlock() diff --git a/verification/dependencies/syscall/syscall_unix.gobra b/verification/dependencies/syscall/syscall_unix.gobra index c0b53a89a..9e46ace9a 100644 --- a/verification/dependencies/syscall/syscall_unix.gobra +++ b/verification/dependencies/syscall/syscall_unix.gobra @@ -54,7 +54,6 @@ pred (s *Errno) Mem() { acc(s) } -ghost preserves s.Mem() && s.CanSet(e) ensures s.Get() === e decreases diff --git a/verification/dependencies/time/time.gobra b/verification/dependencies/time/time.gobra index 055354185..141537282 100644 --- a/verification/dependencies/time/time.gobra +++ b/verification/dependencies/time/time.gobra @@ -65,7 +65,7 @@ func (m Month) String() (res string) type Weekday int const ( - // (joao) this used to be defined in terms of iota + // (VerifiedSCION) this used to be defined in terms of iota Sunday Weekday = 0 Monday Weekday = 1 Tuesday Weekday = 2 @@ -176,17 +176,6 @@ func (d Duration) Microseconds() int64 { return int64(d) / 1000 } decreases func (d Duration) Milliseconds() int64 { return int64(d) / 1000000 } -// Seconds returns the duration as a floating point number of seconds. -/* (joao) no support for float -func (d Duration) Seconds() float64 - -// Minutes returns the duration as a floating point number of minutes. -func (d Duration) Minutes() float64 - -// Hours returns the duration as a floating point number of hours. -func (d Duration) Hours() float64 -*/ - // Truncate returns the result of rounding d toward zero to a multiple of m. // If m <= 0, Truncate returns d unchanged. ensures m <= 0 ==> res == d diff --git a/verification/io/bios.gobra b/verification/io/bios.gobra index c2edeada0..190e6d5c5 100644 --- a/verification/io/bios.gobra +++ b/verification/io/bios.gobra @@ -20,8 +20,7 @@ package io type IO_bio3sIN adt { IO_bio3s_enter{} - IO_bio3s_xover_up2down{} - IO_bio3s_xover_core{} + IO_bio3s_xover{} IO_bio3s_exit{} } diff --git a/verification/io/dataplane_abstract.gobra b/verification/io/dataplane_abstract.gobra index 346f9532d..e78c901e9 100644 --- a/verification/io/dataplane_abstract.gobra +++ b/verification/io/dataplane_abstract.gobra @@ -16,30 +16,21 @@ package io -type DataPlaneSpec adt { - DataPlaneSpec_{ - linkTypes dict[IO_ifs]IO_Link - neighborIAs dict[IO_ifs]IO_as - localIA IO_as - topology TopologySpec - } -} - -// TopologySpec describes the entire network topology. -// coreAS: IDs of the core Autonomous Systems // links: representation of the network topology as a graph. // `links[(a1,x)] == (a2,y)` means that the interface x of AS a1 is connected // to the interface y of AS a2. -type TopologySpec adt { - TopologySpec_{ - coreAS set[IO_as] - links dict[AsIfsPair]AsIfsPair +type DataPlaneSpec adt { + DataPlaneSpec_{ + linkTypes dict[IO_ifs]IO_Link + neighborIAs dict[IO_ifs]IO_as + localIA IO_as + links dict[AsIfsPair]AsIfsPair } } type AsIfsPair struct { - asid IO_as - ifs IO_ifs + asid IO_as + ifs IO_ifs } ghost @@ -47,15 +38,15 @@ opaque decreases pure func (dp DataPlaneSpec) Valid() bool { return (forall ifs IO_ifs :: {ifs in domain(dp.neighborIAs)} ifs in domain(dp.neighborIAs) ==> - (AsIfsPair{dp.localIA, ifs} in domain(dp.topology.links) && + (AsIfsPair{dp.localIA, ifs} in domain(dp.links) && dp.Lookup(AsIfsPair{dp.localIA, ifs}).asid == dp.neighborIAs[ifs])) && - (forall ifs IO_ifs :: {ifs in domain(dp.neighborIAs)} AsIfsPair{dp.localIA, ifs} in domain(dp.topology.links) ==> + (forall ifs IO_ifs :: {ifs in domain(dp.neighborIAs)} AsIfsPair{dp.localIA, ifs} in domain(dp.links) ==> ifs in domain(dp.neighborIAs)) && - (forall pairs AsIfsPair :: {dp.Lookup(pairs)} pairs in domain(dp.topology.links) ==> + (forall pairs AsIfsPair :: {dp.Lookup(pairs)} pairs in domain(dp.links) ==> let next_pair := dp.Lookup(pairs) in - (next_pair in domain(dp.topology.links)) && - dp.Lookup(next_pair) == pairs) - // && domain(dp.linkTypes) == domain(dp.neighborIAs) + (next_pair in domain(dp.links)) && + dp.Lookup(next_pair) == pairs) && + domain(dp.linkTypes) == domain(dp.neighborIAs) } ghost @@ -90,21 +81,15 @@ pure func (dp DataPlaneSpec) Asid() IO_as { return dp.localIA } -ghost -decreases -pure func (dp DataPlaneSpec) Core() set[IO_as] { - return dp.topology.coreAS -} - ghost decreases pure func (dp DataPlaneSpec) GetLinks() dict[AsIfsPair]AsIfsPair { - return dp.topology.links + return dp.links } ghost -requires pair in domain(dp.topology.links) +requires pair in domain(dp.links) decreases pure func(dp DataPlaneSpec) Lookup(pair AsIfsPair) AsIfsPair { - return dp.topology.links[pair] + return dp.links[pair] } \ No newline at end of file diff --git a/verification/io/hopfields.gobra b/verification/io/hopfields.gobra index 735da0e78..d2f71864b 100644 --- a/verification/io/hopfields.gobra +++ b/verification/io/hopfields.gobra @@ -27,4 +27,4 @@ type IO_HF adt { EgIF2 option[IO_ifs] HVF IO_msgterm } -} +} \ No newline at end of file diff --git a/verification/io/io-spec.gobra b/verification/io/io-spec.gobra index b0cc58d78..c7e98c92d 100644 --- a/verification/io/io-spec.gobra +++ b/verification/io/io-spec.gobra @@ -18,15 +18,17 @@ package io -// Unlike the original IO-spec from Isabelle, we need additional information about the network topology. -// To ensure the well-formedness of all map accesses we require an additional conjunction +// called BogusTrigger instead of Unit here because the name Unit is already in use. +type BogusTrigger struct{} + +// Unlike the original IO-spec from Isabelle, we need additional information about the network topology. +// To ensure the well-formedness of all map accesses we require an additional conjunction // for all the events (dp.Valid()) // This is the main IO Specification. pred (dp DataPlaneSpec) dp3s_iospec_ordered(s IO_dp3s_state_local, t Place) { dp.dp3s_iospec_bio3s_enter(s, t) && - dp.dp3s_iospec_bio3s_xover_up2down(s, t) && - dp.dp3s_iospec_bio3s_xover_core(s, t) && + dp.dp3s_iospec_bio3s_xover(s, t) && dp.dp3s_iospec_bio3s_exit(s, t) && dp.dp3s_iospec_bio3s_send(s, t) && dp.dp3s_iospec_bio3s_recv(s, t) && @@ -49,44 +51,6 @@ requires CBio_IN_bio3s_enter(t, v) decreases pure func CBio_IN_bio3s_enter_T(t Place, v IO_val) Place -/*** Helper functions, not in Isabelle ***/ -// Establishes the traversed segment for packets which are not incremented (internal). -ghost -requires len(currseg.Future) > 0 -decreases -pure func establishGuardTraversedseg(currseg IO_seg3, direction bool) IO_seg3 { - return let uinfo := direction ? - upd_uinfo(currseg.UInfo, currseg.Future[0]) : - currseg.UInfo in - IO_seg3_ { - AInfo: currseg.AInfo, - UInfo: uinfo, - ConsDir: currseg.ConsDir, - Peer: currseg.Peer, - Past: currseg.Past, - Future: currseg.Future, - History: currseg.History} -} - -// Establishes the traversed segment for packets that are incremented (external). -ghost -requires len(currseg.Future) > 0 -decreases -pure func establishGuardTraversedsegInc(currseg IO_seg3, direction bool) IO_seg3 { - return let uinfo := direction ? - upd_uinfo(currseg.UInfo, currseg.Future[0]) : - currseg.UInfo in - IO_seg3_ { - AInfo: currseg.AInfo, - UInfo: uinfo, - ConsDir: currseg.ConsDir, - Peer: currseg.Peer, - Past: seq[IO_HF]{currseg.Future[0]} ++ currseg.Past, - Future: currseg.Future[1:], - History: seq[IO_ahi]{currseg.Future[0].Toab()} ++ currseg.History} -} -/*** End of helper functions, not in Isabelle ***/ - // This corresponds to the condition of the if statement in the io-spec case for enter ghost requires v.isIO_Internal_val1 @@ -108,142 +72,101 @@ pure func (dp DataPlaneSpec) dp3s_iospec_bio3s_enter_guard(s IO_dp3s_state_local v.IO_Internal_val1_2, fut) && dp.dp3s_forward( - IO_pkt2( - IO_Packet2{ - traversedseg, - v.IO_Internal_val1_1.LeftSeg, - v.IO_Internal_val1_1.MidSeg, - v.IO_Internal_val1_1.RightSeg}), + IO_Packet2 { + traversedseg, + v.IO_Internal_val1_1.LeftSeg, + v.IO_Internal_val1_1.MidSeg, + v.IO_Internal_val1_1.RightSeg, + }, v.IO_Internal_val1_3, v.IO_Internal_val1_4) } pred (dp DataPlaneSpec) dp3s_iospec_bio3s_enter(s IO_dp3s_state_local, t Place) { - // TODO: we may need more triggering terms here - forall v IO_val :: { dp.dp3s_iospec_bio3s_enter_guard(s, t, v) } ( + forall v IO_val :: { TriggerBodyIoEnter(v) } ( match v { - case IO_Internal_val1{_, _, ?newpkt, ?nextif}: + case IO_Internal_val1{_, _, ?newpkt, ?nextif}: + // Gobra requires the triggering term to occur inside the qtfier body, + // otherwise we get an error in the call to CBio_IN_bio3s_enter_T. + // We named the variable `_ignored` because using `_` here leads to a strange + // type error. + let _ignored := TriggerBodyIoEnter(v) in (dp.Valid() && dp.dp3s_iospec_bio3s_enter_guard(s, t, v) ==> - (CBio_IN_bio3s_enter(t, v) && - dp.dp3s_iospec_ordered( - dp3s_add_obuf(s, nextif, newpkt), - CBio_IN_bio3s_enter_T(t, v)))) - default: + (CBio_IN_bio3s_enter(t, v) && + dp.dp3s_iospec_ordered( + dp3s_add_obuf(s, nextif, newpkt), + CBio_IN_bio3s_enter_T(t, v)))) + default: true }) } -pred CBio_IN_bio3s_xover_up2down(t Place, v IO_val) +ghost +decreases +pure func TriggerBodyIoEnter(v IO_val) BogusTrigger { return BogusTrigger{} } + +pred CBio_IN_bio3s_xover(t Place, v IO_val) ghost -requires CBio_IN_bio3s_xover_up2down(t, v) +requires CBio_IN_bio3s_xover(t, v) decreases -pure func dp3s_iospec_bio3s_xover_up2down_T(t Place, v IO_val) Place +pure func dp3s_iospec_bio3s_xover_T(t Place, v IO_val) Place -// This corresponds to the condition of the if statement in the io-spec case for xover_up2down +// This corresponds to the condition of the if statement in the io-spec case for xover ghost requires v.isIO_Internal_val1 requires dp.Valid() decreases -pure func (dp DataPlaneSpec) dp3s_iospec_bio3s_xover_up2down_guard(s IO_dp3s_state_local, t Place, v IO_val) bool { - return let currseg := v.IO_Internal_val1_1.CurrSeg in +pure func (dp DataPlaneSpec) dp3s_iospec_bio3s_xover_guard(s IO_dp3s_state_local, t Place, v IO_val) bool { + return let currseg := v.IO_Internal_val1_1.CurrSeg in match v.IO_Internal_val1_1.LeftSeg{ - case none[IO_seg2]: + case none[IO_seg2]: false - default: + default: let nextseg := get(v.IO_Internal_val1_1.LeftSeg) in - (!currseg.ConsDir && - nextseg.ConsDir && - len(nextseg.Future) > 0 && + (len(nextseg.Future) > 0 && len(currseg.Future) > 0 && - len(v.IO_Internal_val1_3.CurrSeg.Future) > 0 && let hf1, hf2 := currseg.Future[0], nextseg.Future[0] in let traversedseg := establishGuardTraversedsegInc(currseg, !currseg.ConsDir) in - (dp.xover_up2down2_link_type(dp.Asid(), hf1, hf2) && - dp.dp3s_xover_common( - s, - v.IO_Internal_val1_1, - currseg, - nextseg, - traversedseg, - IO_pkt2(IO_Packet2{nextseg, v.IO_Internal_val1_1.MidSeg, v.IO_Internal_val1_1.RightSeg, some(traversedseg)}), - hf1, - hf2, - v.IO_Internal_val1_2, - v.IO_Internal_val1_3, - v.IO_Internal_val1_4,))) + let nextfut := nextseg.Future[1:] in + dp.dp3s_xover_guard( + s, + v.IO_Internal_val1_1, + currseg, + nextseg, + traversedseg, + IO_Packet2 { nextseg, v.IO_Internal_val1_1.MidSeg, v.IO_Internal_val1_1.RightSeg, some(traversedseg) }, + hf1, + hf2, + nextfut, + v.IO_Internal_val1_2, + v.IO_Internal_val1_3, + v.IO_Internal_val1_4,)) } } -pred (dp DataPlaneSpec) dp3s_iospec_bio3s_xover_up2down(s IO_dp3s_state_local, t Place) { - forall v IO_val :: { dp.dp3s_iospec_bio3s_xover_up2down_guard(s, t, v) }{ CBio_IN_bio3s_xover_up2down(t, v) } { dp.dp3s_iospec_ordered(dp3s_add_obuf(s, v.IO_Internal_val1_4, v.IO_Internal_val1_3), dp3s_iospec_bio3s_xover_up2down_T(t, v)) } ( +pred (dp DataPlaneSpec) dp3s_iospec_bio3s_xover(s IO_dp3s_state_local, t Place) { + forall v IO_val :: { TriggerBodyIoXover(v) } ( match v { - case IO_Internal_val1{_, _, ?newpkt, ?nextif}: - (dp.Valid() && dp.dp3s_iospec_bio3s_xover_up2down_guard(s, t, v) ==> - (CBio_IN_bio3s_xover_up2down(t, v) && - dp.dp3s_iospec_ordered( - dp3s_add_obuf(s, nextif, newpkt), - dp3s_iospec_bio3s_xover_up2down_T(t, v)))) - default: + case IO_Internal_val1{_, _, ?newpkt, ?nextif}: + // Gobra requires the triggering term to occur inside the qtfier body, + // otherwise we get an error in the call to dp3s_iospec_bio3s_xover_T. + // We named the variable `_ignored` because using `_` here leads to a strange + // type error. + let _ignored := TriggerBodyIoXover(v) in + (dp.Valid() && dp.dp3s_iospec_bio3s_xover_guard(s, t, v) ==> + (CBio_IN_bio3s_xover(t, v) && + dp.dp3s_iospec_ordered( + dp3s_add_obuf(s, nextif, newpkt), + dp3s_iospec_bio3s_xover_T(t, v)))) + default: true }) } -pred CBio_IN_bio3s_xover_core(t Place, v IO_val) - -ghost -requires CBio_IN_bio3s_xover_core(t, v) -decreases -pure func dp3s_iospec_bio3s_xover_core_T(t Place, v IO_val) Place - -// This corresponds to the condition of the if statement in the io-spec case for xover_core ghost -requires v.isIO_Internal_val1 -requires dp.Valid() decreases -pure func (dp DataPlaneSpec) dp3s_iospec_bio3s_xover_core_guard(s IO_dp3s_state_local, t Place, v IO_val) bool { - return (dp.Asid() in dp.Core() && - let currseg := v.IO_Internal_val1_1.CurrSeg in - match v.IO_Internal_val1_1.LeftSeg { - case none[IO_seg2]: - false - default: - let nextseg := get(v.IO_Internal_val1_1.LeftSeg) in - currseg.ConsDir == nextseg.ConsDir && - len(nextseg.Future) > 0 && - len(currseg.Future) > 0 && - len(v.IO_Internal_val1_3.CurrSeg.Future) > 0 && - let hf1, hf2 := currseg.Future[0], nextseg.Future[0] in - let traversedseg := establishGuardTraversedsegInc(currseg, !currseg.ConsDir) in - (dp.xover_core2_link_type(hf1, hf2, dp.Asid(), currseg.ConsDir) && - dp.dp3s_xover_common( - s, - v.IO_Internal_val1_1, - currseg, - nextseg, - traversedseg, - IO_pkt2(IO_Packet2{nextseg, v.IO_Internal_val1_1.MidSeg, v.IO_Internal_val1_1.RightSeg, some(traversedseg)}), - hf1, - hf2, - v.IO_Internal_val1_2, - v.IO_Internal_val1_3, - v.IO_Internal_val1_4)) - }) -} - -pred (dp DataPlaneSpec) dp3s_iospec_bio3s_xover_core(s IO_dp3s_state_local, t Place) { - forall v IO_val :: { dp.dp3s_iospec_bio3s_xover_core_guard(s, t, v) }{ CBio_IN_bio3s_xover_core(t, v) }{ dp.dp3s_iospec_ordered(dp3s_add_obuf(s, v.IO_Internal_val1_4, v.IO_Internal_val1_3), dp3s_iospec_bio3s_xover_core_T(t, v)) } ( - match v { - case IO_Internal_val1{_, _, ?newpkt, ?nextif}: - (dp.Valid() && dp.dp3s_iospec_bio3s_xover_core_guard(s, t, v) ==> - (CBio_IN_bio3s_xover_core(t, v) && - dp.dp3s_iospec_ordered( - dp3s_add_obuf(s, nextif, newpkt), - dp3s_iospec_bio3s_xover_core_T(t, v)))) - default: - true - }) -} +pure func TriggerBodyIoXover(v IO_val) BogusTrigger { return BogusTrigger{} } pred CBio_IN_bio3s_exit(t Place, v IO_val) @@ -265,19 +188,27 @@ pure func (dp DataPlaneSpec) dp3s_iospec_bio3s_exit_guard(s IO_dp3s_state_local, } pred (dp DataPlaneSpec) dp3s_iospec_bio3s_exit(s IO_dp3s_state_local, t Place) { - forall v IO_val :: { dp.dp3s_iospec_bio3s_exit_guard(s, t, v) }{ CBio_IN_bio3s_exit(t, v) }{ dp.dp3s_iospec_ordered(dp3s_add_obuf(s, some(v.IO_Internal_val2_3), v.IO_Internal_val2_2), dp3s_iospec_bio3s_exit_T(t, v)) } ( + forall v IO_val :: { TriggerBodyIoExit(v) } ( match v { - case IO_Internal_val2{_, ?newpkt, ?nextif}: + case IO_Internal_val2{_, ?newpkt, ?nextif}: + // Gobra requires the triggering term to occur inside the qtfier body, + // otherwise we get an error in the call to dp3s_iospec_bio3s_exit_T. + // We named the variable `_ignored` because using `_` here leads to a strange + // type error. + let _ignored := TriggerBodyIoExit(v) in (dp.Valid() && dp.dp3s_iospec_bio3s_exit_guard(s, t, v) ==> - (CBio_IN_bio3s_exit(t, v) && - dp.dp3s_iospec_ordered( - dp3s_add_obuf(s, some(nextif), newpkt), - dp3s_iospec_bio3s_exit_T(t, v)))) - default: + (CBio_IN_bio3s_exit(t, v) && + dp.dp3s_iospec_ordered( + dp3s_add_obuf(s, some(nextif), newpkt), + dp3s_iospec_bio3s_exit_T(t, v)))) + default: true - }) + }) } +ghost +decreases +pure func TriggerBodyIoExit(v IO_val) BogusTrigger { return BogusTrigger{} } pred CBioIO_bio3s_send(t Place, v IO_val) @@ -296,22 +227,35 @@ pure func (dp DataPlaneSpec) dp3s_iospec_bio3s_send_guard(s IO_dp3s_state_local, (let obuf_set := s.obuf[v.IO_val_Pkt2_1] in (v.IO_val_Pkt2_2 in obuf_set)) } -// TODO: annotate WriteBatch, skipped for now pred (dp DataPlaneSpec) dp3s_iospec_bio3s_send(s IO_dp3s_state_local, t Place) { - forall v IO_val :: { dp.dp3s_iospec_bio3s_send_guard(s, t, v) }{ CBioIO_bio3s_send(t, v) }{ dp.dp3s_iospec_ordered(s, dp3s_iospec_bio3s_send_T(t, v)) }{ CBioIO_bio3s_send(t, v) }{ dp.dp3s_iospec_ordered(s, dp3s_iospec_bio3s_send_T(t, v)) } ( + forall v IO_val :: { TriggerBodyIoSend(v) } ( match v { - case IO_val_Pkt2{_, _}: + case IO_val_Pkt2{_, _}: + // Gobra requires the triggering term to occur inside the qtfier body, + // otherwise we get an error in the call to dp3s_iospec_bio3s_send_T. + // We named the variable `_ignored` because using `_` here leads to a strange + // type error. + let _ignored := TriggerBodyIoSend(v) in (dp.Valid() && dp.dp3s_iospec_bio3s_send_guard(s, t, v) ==> - CBioIO_bio3s_send(t, v) && - dp.dp3s_iospec_ordered(s, dp3s_iospec_bio3s_send_T(t, v))) - case IO_val_Unsupported{_, _}: + CBioIO_bio3s_send(t, v) && + dp.dp3s_iospec_ordered(s, dp3s_iospec_bio3s_send_T(t, v))) + case IO_val_Unsupported{_, _}: + // Gobra requires the triggering term to occur inside the qtfier body, + // otherwise we get an error in the call to dp3s_iospec_bio3s_send_T. + // We named the variable `_ignored` because using `_` here leads to a strange + // type error. + let _ignored := TriggerBodyIoSend(v) in (CBioIO_bio3s_send(t, v) && dp.dp3s_iospec_ordered(s, dp3s_iospec_bio3s_send_T(t, v))) - default: + default: true }) } +ghost +decreases +pure func TriggerBodyIoSend(v IO_val) BogusTrigger { return BogusTrigger{} } + pred CBioIO_bio3s_recv(t Place) ghost @@ -319,7 +263,7 @@ requires CBioIO_bio3s_recv(t) decreases pure func dp3s_iospec_bio3s_recv_T(t Place) Place -// We can safely make this assumption as Isabelle's IO-spec never +// We can safely make this assumption as Isabelle's IO-spec never // receives the other IO values (Unit and Internal). ghost requires CBioIO_bio3s_recv(t) @@ -330,12 +274,12 @@ pure func dp3s_iospec_bio3s_recv_R(t Place) (val IO_val) pred (dp DataPlaneSpec) dp3s_iospec_bio3s_recv(s IO_dp3s_state_local, t Place) { CBioIO_bio3s_recv(t) && (match dp3s_iospec_bio3s_recv_R(t) { - case IO_val_Pkt2{?recvif, ?pkt}: + case IO_val_Pkt2{?recvif, ?pkt}: dp.dp3s_iospec_ordered( dp3s_add_ibuf(s, recvif, pkt), dp3s_iospec_bio3s_recv_T(t)) - case IO_val_Unsupported{_, _}: + case IO_val_Unsupported{_, _}: dp.dp3s_iospec_ordered(s, dp3s_iospec_bio3s_recv_T(t)) - default: + default: dp.dp3s_iospec_ordered(undefined(), dp3s_iospec_bio3s_recv_T(t)) }) } @@ -359,31 +303,19 @@ pred (dp DataPlaneSpec) dp3s_iospec_stop(s IO_dp3s_state_local, t Place) { ghost decreases requires token(t) && CBio_IN_bio3s_enter(t, v) -ensures token(old(CBio_IN_bio3s_enter_T(t, v))) +ensures token(old(CBio_IN_bio3s_enter_T(t, v))) func Enter(ghost t Place, ghost v IO_val) ghost decreases -requires token(t) && CBio_IN_bio3s_xover_core(t, v) -ensures token(old(dp3s_iospec_bio3s_xover_core_T(t, v))) -func Xover_core(ghost t Place, ghost v IO_val) - -ghost -decreases -requires token(t) && CBio_IN_bio3s_xover_up2down(t, v) -ensures token(old(dp3s_iospec_bio3s_xover_up2down_T(t, v))) -func Xover_up2down(ghost t Place, ghost v IO_val) +requires token(t) && CBio_IN_bio3s_xover(t, v) +ensures token(old(dp3s_iospec_bio3s_xover_T(t, v))) +func Xover(ghost t Place, ghost v IO_val) ghost decreases requires token(t) && CBio_IN_bio3s_exit(t, v) -ensures token(old(dp3s_iospec_bio3s_exit_T(t, v))) +ensures token(old(dp3s_iospec_bio3s_exit_T(t, v))) func Exit(ghost t Place, ghost v IO_val) -ghost -decreases -requires token(t) && CBioIO_bio3s_send(t, v) -ensures token(old(dp3s_iospec_bio3s_send_T(t, v))) -func Send(ghost t Place, ghost v IO_val) - -/** End of helper functions to perfrom BIO operations **/ \ No newline at end of file +/** End of helper functions to perfrom BIO operations **/ diff --git a/verification/io/io_spec_definitions.gobra b/verification/io/io_spec_definitions.gobra new file mode 100644 index 000000000..e7dc18e96 --- /dev/null +++ b/verification/io/io_spec_definitions.gobra @@ -0,0 +1,173 @@ +// Copyright 2024 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package io + +/*** This file contains helpful definitions that do not have a counterpart in the Isabelle formalization. ***/ + +// Establishes the traversed segment for packets which are not incremented (internal). +ghost +requires len(currseg.Future) > 0 +decreases +pure func establishGuardTraversedseg(currseg IO_seg3, direction bool) IO_seg3 { + return let uinfo := direction ? + upd_uinfo(currseg.UInfo, currseg.Future[0]) : + currseg.UInfo in + IO_seg3_ { + AInfo: currseg.AInfo, + UInfo: uinfo, + ConsDir: currseg.ConsDir, + Peer: currseg.Peer, + Past: currseg.Past, + Future: currseg.Future, + History: currseg.History, + } +} + +// Establishes the traversed segment for packets that are incremented (external). +ghost +requires len(currseg.Future) > 0 +decreases +pure func establishGuardTraversedsegInc(currseg IO_seg3, direction bool) IO_seg3 { + return let uinfo := direction ? + upd_uinfo(currseg.UInfo, currseg.Future[0]) : + currseg.UInfo in + IO_seg3_ { + AInfo: currseg.AInfo, + UInfo: uinfo, + ConsDir: currseg.ConsDir, + Peer: currseg.Peer, + Past: seq[IO_HF]{currseg.Future[0]} ++ currseg.Past, + Future: currseg.Future[1:], + History: seq[IO_ahi]{currseg.Future[0].Toab()} ++ currseg.History, + } +} + +ghost +requires len(seg.Future) > 0 +decreases +pure func (seg IO_seg3) UpdateCurrHf(hf IO_HF) IO_seg3 { + return IO_seg3_ { + seg.AInfo, + seg.UInfo, + seg.ConsDir, + seg.Peer, + seg.Past, + seq[IO_HF]{hf} ++ seg.Future[1:], + seg.History, + } +} + +ghost +requires pkt.PathNotFullyTraversed() +decreases +pure func (pkt IO_pkt2) UpdateHopField(hf IO_HF) IO_pkt2 { + return let newCurrSeg := pkt.CurrSeg.UpdateCurrHf(hf) in + IO_Packet2{newCurrSeg, pkt.LeftSeg, pkt.MidSeg, pkt.RightSeg} +} + +ghost +decreases +pure func (seg IO_seg3) UpdateCurrSeg(info AbsInfoField) IO_seg3 { + return IO_seg3_ { + info.AInfo, + info.UInfo, + info.ConsDir, + info.Peer, + seg.Past, + seg.Future, + seg.History, + } +} + +ghost +decreases +pure func (pkt IO_pkt2) UpdateInfoField(info AbsInfoField) IO_pkt2 { + return let newCurrSeg := pkt.CurrSeg.UpdateCurrSeg(info) in + IO_Packet2{newCurrSeg, pkt.LeftSeg, pkt.MidSeg, pkt.RightSeg} +} + +// This type simplifies the infoField, making it easier +// to use than the IO_seg3 from the IO-spec. +type AbsInfoField adt { + AbsInfoField_ { + AInfo IO_ainfo + UInfo set[IO_msgterm] + ConsDir bool + Peer bool + } +} + +// The segment lengths of a packet are frequently used together. +// This type combines them into a single structure to simplify +// their specification. +type SegLens adt { + SegLens_ { + Seg1Len int + Seg2Len int + Seg3Len int + } +} + +ghost +decreases +pure func (s SegLens) Valid() bool { + return s.Seg1Len > 0 && + s.Seg2Len >= 0 && + s.Seg3Len >= 0 +} + +ghost +decreases +pure func CombineSegLens(seg1Len int, seg2Len int, seg3Len int) SegLens { + return SegLens_ { + seg1Len, + seg2Len, + seg3Len, + } +} + +ghost +decreases +pure func (s SegLens) NumInfoFields() int { + return s.Seg3Len > 0 ? 3 : (s.Seg2Len > 0 ? 2 : (s.Seg1Len > 0 ? 1 : 0)) +} + +ghost +decreases +pure func (s SegLens) TotalHops() int { + return s.Seg1Len + s.Seg2Len + s.Seg3Len +} + +ghost +decreases +pure func (s SegLens) LengthOfCurrSeg(currHF int) int { + return s.Seg1Len > currHF ? s.Seg1Len : ((s.Seg1Len + s.Seg2Len) > currHF ? s.Seg2Len : s.Seg3Len) +} + +ghost +requires 0 <= currHF +ensures res <= currHF +decreases +pure func (s SegLens) LengthOfPrevSeg(currHF int) (res int) { + return s.Seg1Len > currHF ? 0 : ((s.Seg1Len + s.Seg2Len) > currHF ? s.Seg1Len : s.Seg1Len + s.Seg2Len) +} + +ghost +decreases +pure func (pkt IO_pkt2) PathNotFullyTraversed() bool { + return len(pkt.CurrSeg.Future) > 0 +} diff --git a/verification/io/other_defs.gobra b/verification/io/other_defs.gobra index b65e70abc..7f600a457 100644 --- a/verification/io/other_defs.gobra +++ b/verification/io/other_defs.gobra @@ -18,9 +18,7 @@ package io -type Unit adt { - Unit_{} -} +type Unit struct{} // interface IDs type IO_ifs uint16 @@ -108,7 +106,7 @@ func (h IO_HF) Toab() IO_ahi { type IO_Link adt { IO_CustProv{} IO_ProvCust{} - IO_PeerOrCore{} + IO_Core{} IO_NoLink{} } @@ -137,7 +135,7 @@ requires dp.Valid() requires asid == dp.Asid() decreases pure func (dp DataPlaneSpec) egif_core2(hf1 IO_HF, asid IO_as) bool{ - return dp.egif2_type(hf1, asid, IO_Link(IO_PeerOrCore{})) + return dp.egif2_type(hf1, asid, IO_Link(IO_Core{})) } ghost @@ -161,7 +159,7 @@ requires dp.Valid() requires asid == dp.Asid() decreases pure func (dp DataPlaneSpec) inif_core2(hf1 IO_HF, asid IO_as) bool{ - return dp.inif2_type(hf1, asid, IO_Link(IO_PeerOrCore{})) + return dp.inif2_type(hf1, asid, IO_Link(IO_Core{})) } ghost @@ -178,9 +176,9 @@ requires ifs != none[IO_ifs] ==> asid == dp.Asid() decreases pure func (dp DataPlaneSpec) if_type(asid IO_as, ifs option[IO_ifs], link IO_Link) bool{ return match ifs { - case none[IO_ifs]: + case none[IO_ifs]: false - default: + default: dp.link_type(asid, get(ifs)) == link } } @@ -202,6 +200,7 @@ type IO_dp2_state adt { } ghost +opaque decreases pure func (m IO_msgterm) extract_asid() IO_as { return m.MsgTerm_Hash_.MsgTerm_MPair_1.MsgTerm_Key_.Key_macK_ diff --git a/verification/io/packets.gobra b/verification/io/packets.gobra index 4944fc21c..8047357bd 100644 --- a/verification/io/packets.gobra +++ b/verification/io/packets.gobra @@ -19,8 +19,9 @@ package io // pkt2 +// Here, we already instantiated the type params, instead of +// leaving them generic as done in Isabelle. type IO_pkt2 adt { - // Here, we already instantiated the type params IO_Packet2 { CurrSeg IO_seg3 LeftSeg option[IO_seg2] diff --git a/verification/io/router.gobra b/verification/io/router.gobra index 4574c48f0..533bdee8d 100644 --- a/verification/io/router.gobra +++ b/verification/io/router.gobra @@ -27,52 +27,77 @@ pure func if2term(ifs option[IO_ifs]) IO_msgterm { case none[IO_ifs]: MsgTerm_Empty{} default: - IO_msgterm(MsgTerm_AS{IO_as(get(ifs))}) + MsgTerm_AS{IO_as(get(ifs))} } } ghost decreases pure func (dp DataPlaneSpec) hf_valid(d bool, ts uint, uinfo set[IO_msgterm], hf IO_HF) bool { + return hf_valid_impl(dp.Asid(), ts, uinfo, hf) +} + +ghost +decreases +pure func hf_valid_impl(asid IO_as, ts uint, uinfo set[IO_msgterm], hf IO_HF) bool { return let inif := hf.InIF2 in let egif := hf.EgIF2 in - let x := hf.HVF in - let l := IO_msgterm(MsgTerm_L{ - seq[IO_msgterm]{ - IO_msgterm(MsgTerm_Num{ts}), - if2term(inif), - if2term(egif), - IO_msgterm(MsgTerm_FS{uinfo})}}) in - x == mac(macKey(asidToKey(dp.Asid())), l) + let hvf := hf.HVF in + let next := nextMsgtermSpec(asid, inif, egif, ts, uinfo) in + hvf == next +} + +ghost +opaque +ensures result.extract_asid() == asid +decreases +pure func nextMsgtermSpec(asid IO_as, inif option[IO_ifs], egif option[IO_ifs], ts uint, uinfo set[IO_msgterm]) (result IO_msgterm) { + return let l := plaintextToMac(inif, egif, ts, uinfo) in + let res := mac(macKey(asidToKey(asid)), l) in + let _ := reveal res.extract_asid() in + res +} + +ghost +decreases +pure func plaintextToMac(inif option[IO_ifs], egif option[IO_ifs], ts uint, uinfo set[IO_msgterm]) IO_msgterm { + return MsgTerm_L { + seq[IO_msgterm]{ + MsgTerm_Num{ts}, + if2term(inif), + if2term(egif), + MsgTerm_FS{uinfo}, + }, + } } ghost decreases pure func macKey(key IO_key) IO_msgterm { - return IO_msgterm(MsgTerm_Key{key}) + return MsgTerm_Key{key} } ghost decreases pure func mac(fst IO_msgterm, snd IO_msgterm) IO_msgterm { - return IO_msgterm( MsgTerm_Hash { - MsgTerm_Hash_ : IO_msgterm( MsgTerm_MPair{ - MsgTerm_MPair_1 : fst, - MsgTerm_MPair_2 : snd, - }), - }) + return MsgTerm_Hash { + MsgTerm_Hash_: MsgTerm_MPair { + MsgTerm_MPair_1: fst, + MsgTerm_MPair_2: snd, + }, + } } // helper function, not defined in IO spec ghost decreases -pure func asidToKey(asid IO_as) IO_key{ - return IO_key(Key_macK{asid}) +pure func asidToKey(asid IO_as) IO_key { + return Key_macK{asid} } ghost decreases -pure func upd_uinfo(segid set[IO_msgterm], hf IO_HF) set[IO_msgterm]{ +pure func upd_uinfo(segid set[IO_msgterm], hf IO_HF) set[IO_msgterm] { return let setHVF := set[IO_msgterm]{hf.HVF} in (segid union setHVF) setminus (segid intersection setHVF) } @@ -96,7 +121,7 @@ pure func (dp DataPlaneSpec) is_target(asid IO_as, nextif IO_ifs, a2 IO_as, i2 I ghost decreases pure func dp3s_add_ibuf(s IO_dp3s_state_local, i option[IO_ifs], pkt IO_pkt3) IO_dp3s_state_local { - return IO_dp3s_state_local_{ + return IO_dp3s_state_local_ { ibuf: insert(s.ibuf, i, pkt), obuf: s.obuf, } @@ -105,7 +130,7 @@ pure func dp3s_add_ibuf(s IO_dp3s_state_local, i option[IO_ifs], pkt IO_pkt3) IO ghost decreases pure func dp3s_add_obuf(s IO_dp3s_state_local, i option[IO_ifs], pkt IO_pkt3) IO_dp3s_state_local { - return IO_dp3s_state_local_{ + return IO_dp3s_state_local_ { ibuf: s.ibuf, obuf: insert(s.obuf, i, pkt), } @@ -129,7 +154,6 @@ pure func (dp DataPlaneSpec) dp3s_forward_ext(m IO_pkt3, newpkt IO_pkt3, nextif let hf1, fut := currseg.Future[0], currseg.Future[1:] in let traversedseg := newpkt.CurrSeg in dp.dp2_forward_ext_guard(dp.Asid(), m, nextif, currseg, traversedseg, newpkt, fut, hf1) && - dp.dp2_check_interface_top(currseg.ConsDir, dp.Asid(), hf1) && (nextif in domain(dp.GetNeighborIAs())) && let a2 := dp.GetNeighborIA(nextif) in let i2 := dp.Lookup(AsIfsPair{dp.Asid(), nextif}).ifs in @@ -152,8 +176,6 @@ pure func (dp DataPlaneSpec) dp3s_forward_ext_xover(m IO_pkt3, newpkt IO_pkt3, n dp.is_target(dp.Asid(), nextif, a2, i2) } - -// TODO: should we change IO_ifs to being implemented as an option type? ghost requires len(m.CurrSeg.Future) > 0 requires dp.Valid() @@ -184,7 +206,7 @@ ghost requires len(intermediatepkt.CurrSeg.Future) > 0 requires dp.Valid() decreases -pure func (dp DataPlaneSpec) dp3s_xover_common( +pure func (dp DataPlaneSpec) dp3s_xover_guard( s IO_dp3s_state_local, m IO_pkt3, currseg IO_seg3, @@ -193,6 +215,7 @@ pure func (dp DataPlaneSpec) dp3s_xover_common( intermediatepkt IO_pkt3, hf1 IO_HF, hf2 IO_HF, + nextfut seq[IO_HF], recvif IO_ifs, newpkt IO_pkt3, nextif option[IO_ifs], @@ -201,6 +224,6 @@ pure func (dp DataPlaneSpec) dp3s_xover_common( // this is because of the way math. maps are implemented, we can only obtain a key that is in the map before. return some(recvif) in domain(s.ibuf) && (let lookupRes := s.ibuf[some(recvif)] in (m in lookupRes)) && - dp.dp2_xover_common_guard(m, currseg, nextseg, traversedseg, intermediatepkt, hf1, hf2, dp.Asid(), recvif) && + dp.dp2_xover_guard(m, currseg, nextseg, traversedseg, intermediatepkt, hf1, hf2, nextfut, dp.Asid(), recvif) && dp.dp3s_forward_xover(intermediatepkt, newpkt, nextif) } diff --git a/verification/io/router_events.gobra b/verification/io/router_events.gobra index 4ba87d715..54822ddbe 100644 --- a/verification/io/router_events.gobra +++ b/verification/io/router_events.gobra @@ -18,16 +18,6 @@ package io -ghost -requires dp.Valid() -requires asid == dp.Asid() -decreases -pure func (dp DataPlaneSpec) dp2_check_recvif(d bool, asid IO_as, recvif IO_ifs) bool { - return d? - (dp.link_type(asid, recvif) == IO_CustProv{} || dp.link_type(asid, recvif) == IO_PeerOrCore{}) : - (dp.link_type(asid, recvif) == IO_ProvCust{} || dp.link_type(asid, recvif) == IO_PeerOrCore{}) -} - /* Abbreviations */ ghost requires dp.Valid() @@ -52,7 +42,7 @@ pure func (dp DataPlaneSpec) valid_link_types_in2(hf1 IO_HF, a IO_as) bool { ghost decreases -pure func (dp DataPlaneSpec) dp2_check_interface(d bool, asid IO_as, hf1 IO_HF, recvif IO_ifs) bool { +pure func (dp DataPlaneSpec) dp2_enter_interface(d bool, asid IO_as, hf1 IO_HF, recvif IO_ifs) bool { return (d && hf1.InIF2 === some(recvif)) || (!d && hf1.EgIF2 === some(recvif)) } @@ -77,7 +67,7 @@ requires dp.Asid() == asid decreases pure func (dp DataPlaneSpec) dp2_forward_ext_guard(asid IO_as, m IO_pkt2, nextif IO_ifs, currseg, traversedseg IO_seg2, newpkt IO_pkt2, fut seq[IO_HF], hf1 IO_HF) bool { return m.CurrSeg == currseg && - newpkt == IO_pkt2(IO_Packet2{traversedseg, m.LeftSeg, m.MidSeg, m.RightSeg}) && + newpkt == IO_Packet2{traversedseg, m.LeftSeg, m.MidSeg, m.RightSeg} && // The outgoing interface is correct: dp2_exit_interface(currseg.ConsDir, asid, hf1, nextif) && // Next validate the current hop field with the *original* UInfo field): @@ -100,7 +90,8 @@ decreases pure func (dp DataPlaneSpec) dp2_enter_guard(m IO_pkt2, currseg IO_seg2, traversedseg IO_seg2, asid IO_as, hf1 IO_HF, recvif IO_ifs, fut seq[IO_HF]) bool { return m.CurrSeg == currseg && currseg.Future == seq[IO_HF]{hf1} ++ fut && - dp.dp2_check_interface(currseg.ConsDir, asid, hf1, recvif) && + dp.dp2_enter_interface(currseg.ConsDir, asid, hf1, recvif) && + (dp.dp2_check_interface_top(currseg.ConsDir, asid, hf1) || fut == seq[IO_HF]{}) && update_uinfo(!currseg.ConsDir, currseg, traversedseg, hf1) && same_segment2(currseg, traversedseg) && same_other2(currseg, traversedseg) && diff --git a/verification/io/segments.gobra b/verification/io/segments.gobra index acaca602d..9b3248c1a 100644 --- a/verification/io/segments.gobra +++ b/verification/io/segments.gobra @@ -20,8 +20,9 @@ package io type IO_ainfo = uint +// Here, we already instantiated the type params, contrary to what +// is done in Isabelle, where they are left generic. type IO_seg2 adt { - // Here, we already instantiated the type params IO_seg3_ { AInfo IO_ainfo // nat in Isabelle UInfo set[IO_msgterm] diff --git a/verification/io/xover.gobra b/verification/io/xover.gobra index e9cda55ad..8f2d91c69 100644 --- a/verification/io/xover.gobra +++ b/verification/io/xover.gobra @@ -18,44 +18,40 @@ package io +// Switching between segments (xover) // Xover events are similar to the enter event in that a packet is received form an external -// channel and forwarded (internally or externally), but in contrast to the enter event, -// additional processing steps are required to switch from the current segment, -// which has reached its end, to the next segment. -// We have two events for xovering segments. One in which we xover from a segment against -// construction direction to one in construction direction (up2down), and one in which we -// xover between segments of the same directionality. The latter can only happen at a -// core node, hence we call it “xover_core" +// channel and forwarded (internally or externally), but in contrast to the enter event, additional +// processing steps are required to switch from the current segment, which has reached its end, to the +// next segment. -// Common guard between"dp2_xover_up2down"and "dp2_xover_core": -// Check if we are at the end of one segment and that there is a non empty -// Future segment. There are three different segments in this definition: -// currseg, the 'old segment' with exactly one hop field remaining in the -// Future path, traversedseg, which is currseg after we push its -// remaining hop field into the Past path, and nextseg, which is -// the new segment that we are xovering over to. +// Guard: +// Check if we are at the end of one segment and that there is a non empty Future segment. +// There are three different segments in this definition: currseg, the 'old segment' with +// exactly one hop field remaining in the Future path, traversedseg, which is currseg after we +// push its remaining hop field into the Past path, and nextseg, which is the new segment that we +// are xovering over to ghost requires dp.Valid() requires asid == dp.Asid() decreases -pure func (dp DataPlaneSpec) dp2_xover_common_guard(m IO_pkt2, +pure func (dp DataPlaneSpec) dp2_xover_guard(m IO_pkt2, currseg IO_seg2, nextseg IO_seg2, traversedseg IO_seg2, newpkt IO_pkt2, hf1 IO_HF, hf2 IO_HF, + nextfut seq[IO_HF], asid IO_as, recvif IO_ifs) bool { return m.CurrSeg == currseg && m.LeftSeg == some(nextseg) && nextseg.History == seq[IO_ahi]{} && - newpkt == IO_pkt2(IO_Packet2{nextseg, m.MidSeg, m.RightSeg, some(traversedseg)}) && + newpkt == IO_Packet2{nextseg, m.MidSeg, m.RightSeg, some(traversedseg)} && currseg.Future == seq[IO_HF]{hf1} && - len(nextseg.Future) > 0 && - nextseg.Future[0] == hf2 && - dp.dp2_check_interface(currseg.ConsDir, asid, hf1, recvif) && - dp.dp2_check_recvif(currseg.ConsDir, asid, recvif) && + nextseg.Future == seq[IO_HF]{hf2} ++ nextfut && + dp.dp2_enter_interface(currseg.ConsDir, asid, hf1, recvif) && + dp.xover2_link_type_dir(dp.Asid(), currseg.ConsDir, hf1, nextseg.ConsDir, hf2) && update_uinfo(!currseg.ConsDir, currseg, traversedseg, hf1) && inc_seg2(currseg, traversedseg, hf1, seq[IO_HF]{}) && dp.hf_valid(currseg.ConsDir, currseg.AInfo, traversedseg.UInfo, hf1) && @@ -85,17 +81,28 @@ ghost requires dp.Valid() requires asid == dp.Asid() decreases -pure func (dp DataPlaneSpec) xover_up2down2_link_type(asid IO_as, hf1 IO_HF, hf2 IO_HF) bool { - return (dp.egif2_type(hf1, asid, IO_ProvCust{}) && dp.egif2_type(hf2, asid, IO_ProvCust{})) || - (dp.egif2_type(hf1, asid, IO_ProvCust{}) && dp.egif2_type(hf2, asid, IO_PeerOrCore{})) || - (dp.egif2_type(hf1, asid, IO_PeerOrCore{}) && dp.egif2_type(hf2, asid, IO_ProvCust{})) +pure func (dp DataPlaneSpec) xover2_link_type(asid IO_as, hf1 IO_HF, hf2 IO_HF) bool { + return (dp.inif2_type(hf1, asid, IO_ProvCust{}) && dp.egif2_type(hf2, asid, IO_ProvCust{})) || + (dp.inif2_type(hf1, asid, IO_ProvCust{}) && dp.egif2_type(hf2, asid, IO_Core{})) || + (dp.inif2_type(hf1, asid, IO_Core{}) && dp.egif2_type(hf2, asid, IO_ProvCust{})) +} + +ghost +decreases +pure func swap_if_dir2(hf IO_HF, d bool) IO_HF { + return IO_HF_ { + InIF2: d ? hf.InIF2 : hf.EgIF2, + EgIF2: d ? hf.EgIF2 : hf.InIF2, + HVF: hf.HVF, + } } ghost requires dp.Valid() requires asid == dp.Asid() decreases -pure func (dp DataPlaneSpec) xover_core2_link_type(hf1 IO_HF, hf2 IO_HF, asid IO_as, d bool) bool { - return (!d && dp.egif2_type(hf1, asid, IO_ProvCust{}) && dp.inif2_type(hf2, asid, IO_PeerOrCore{})) || - (d && dp.inif2_type(hf1, asid, IO_PeerOrCore{}) && dp.egif2_type(hf2, asid, IO_ProvCust{})) +pure func (dp DataPlaneSpec) xover2_link_type_dir(asid IO_as, d1 bool, hf1 IO_HF, d2 bool, hf2 IO_HF) bool { + return dp.xover2_link_type(asid, swap_if_dir2(hf1, d1), swap_if_dir2(hf2, d2)) } + + diff --git a/verification/utils/bitwise/bitwise-eqs.gobra b/verification/utils/bitwise/bitwise-eqs.gobra index 605e754b5..205841a92 100644 --- a/verification/utils/bitwise/bitwise-eqs.gobra +++ b/verification/utils/bitwise/bitwise-eqs.gobra @@ -52,4 +52,25 @@ ghost ensures res == b & 0x3F ensures 0 <= res && res < 64 decreases -pure func And3fAtMost64(b uint8) (res uint8) \ No newline at end of file +pure func And3fAtMost64(b uint8) (res uint8) + +ghost +ensures 0 | 1 == 1 +ensures 0 | 2 == 2 +ensures 1 | 2 == 3 +ensures 0 & 1 == 0 +ensures 0 & 2 == 0 +ensures 1 & 1 == 1 +ensures 1 & 2 == 0 +ensures 2 & 1 == 0 +ensures 2 & 2 == 2 +ensures 3 & 1 == 1 +ensures 3 & 2 == 2 +decreases +pure func InfoFieldFirstByteSerializationLemmas() bool + +ensures csum > 0xffff ==> + let newCsum := (csum >> 16) + (csum & 0xffff) in + newCsum < csum +decreases +pure func FoldChecksumLemma(csum uint32) struct{} \ No newline at end of file diff --git a/verification/utils/bitwise/proofs.dfy b/verification/utils/bitwise/proofs.dfy index d788a165f..dc0286d4f 100644 --- a/verification/utils/bitwise/proofs.dfy +++ b/verification/utils/bitwise/proofs.dfy @@ -101,3 +101,71 @@ lemma SerializeAndDeserializeLemma(m: MetaHdr, b0: bv8, b1: bv8, b2: bv8, b3: bv ensures var line := SerializedToLine(m); PutUint32Spec(b0, b1, b2, b3, line) ==> (DecodedFrom(Uint32Spec(b0, b1, b2, b3)) == m) {} + +lemma SerializeAndDeserializeMetaHdrLemma(m: MetaHdr) + requires InBounds(m) + ensures DecodedFrom(SerializedToLine(m)) == m +{} + +lemma InfoFieldFirstByteSerializationLemmas() + // or + ensures 0 as bv8 | 1 == 1 + ensures 0 as bv8 | 2 == 2 + ensures 1 as bv8 | 2 == 3 + // and + ensures 0 as bv8 & 1 == 0 + ensures 0 as bv8 & 2 == 0 + ensures 1 as bv8 & 1 == 1 + ensures 1 as bv8 & 2 == 0 + ensures 2 as bv8 & 1 == 0 + ensures 2 as bv8 & 2 == 2 + ensures 3 as bv8 & 1 == 1 + ensures 3 as bv8 & 2 == 2 +{} + + +// Functional specs for encoding/binary (BigEndian) +function FUint16Spec(b0: bv8, b1: bv8): bv16 { + (b1 as bv16) | ((b0 as bv16) << 8) +} + +function FPutUint16Spec(v: bv16): (bv8, bv8) { + ((v >> 8) as bv8, (v & 0xFF) as bv8) +} + +lemma FUint16AfterFPutUint16(v: bv16) + ensures var (b0, b1) := FPutUint16Spec(v); + FUint16Spec(b0, b1) == v +{} + +lemma FPutUint16AfterFUint16(b0: bv8, b1: bv8) + ensures var v := FUint16Spec(b0, b1); + FPutUint16Spec(v) == (b0, b1) +{} + +function FUint32Spec(b0: bv8, b1: bv8, b2: bv8, b3: bv8): bv32 { + (b3 as bv32) | ((b2 as bv32) << 8) | ((b1 as bv32) << 16) | ((b0 as bv32) << 24) +} + +function FPutUint32Spec(v: bv32): (bv8, bv8, bv8, bv8) { + (((v >> 24) & 0xFF) as bv8, + ((v >> 16) & 0xFF) as bv8, + ((v >> 8) & 0xFF) as bv8, + (v & 0xFF) as bv8) +} + +lemma FUint32AfterFPutUint32(v: bv32) + ensures var (b0, b1, b2, b3) := FPutUint32Spec(v); + FUint32Spec(b0, b1, b2, b3) == v +{} + +lemma FPutUint32AfterFUint32(b0: bv8, b1: bv8, b2: bv8, b3: bv8) + ensures var v := FUint32Spec(b0, b1, b2, b3); + FPutUint32Spec(v) == (b0, b1, b2, b3) +{} + +lemma FoldChecksumLemma(csum: bv32) + ensures csum > 0xffff ==> + var newCsum := (csum >> 16) + (csum & 0xffff); + newCsum < csum +{} \ No newline at end of file diff --git a/verification/utils/definitions/definitions.gobra b/verification/utils/definitions/definitions.gobra index b7086de2a..a9ac46f6b 100644 --- a/verification/utils/definitions/definitions.gobra +++ b/verification/utils/definitions/definitions.gobra @@ -75,6 +75,7 @@ const ( R53 R54 R55 + R56 ) // To be used as a precondition of functions and methods that can never be called @@ -91,20 +92,23 @@ pure func Uncallable() (res bool) { return false } -ghost -ensures false -decreases _ -func IgnoreBranch() +/**** Functions to introduce temporary assumptions **/ +// Kills the branches that reach this point. ghost ensures false decreases _ -func IgnoreFromHere() +func TODO() +// Does the same as TODO, but should be used when it kills a branch +// that cannot be verified until an issue in SCION is fixed and ported +// to our branch of SCION. ghost ensures false decreases _ -func TODO() +func ToDoAfterScionFix(url string) + +/**** End of functions to introduce temporary assumptions **/ // type to be used as a stub for sets of private fields in formalizations of // third party libs @@ -116,4 +120,17 @@ requires b decreases func Asserting(ghost b bool) bool { return true -} \ No newline at end of file +} + +type Lemma struct{} + +// Assumption for IO-Specification +ghost +ensures b +decreases +func AssumeForIO(b bool) + +ghost +ensures b +decreases +func TemporaryAssumeForIO(b bool) diff --git a/verification/utils/ghost_sync/ghost-mutex.gobra b/verification/utils/ghost_sync/ghost-mutex.gobra new file mode 100644 index 000000000..e0295f43d --- /dev/null +++ b/verification/utils/ghost_sync/ghost-mutex.gobra @@ -0,0 +1,60 @@ +// Copyright 2022 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package ghost_sync + +import "sync" +import . "verification/utils/definitions" + +// This package contains the definition of a Ghost Mutex (a.k.a. Ghost Lock), as described +// in https://arxiv.org/pdf/2311.14452. These Mutexes are used to provide a ghost-like +// interface to an invariant, for the duration of a an atomic operation. To use ghost +// mutexes soundly, we must ensure two properties: +// (1) All calls to Lock() must be acoompanied by a call to Unlock(). +// (2) All operations performed between a call to Lock() and the corresponding call to +// Unlock() must be atomic. +// Currently, Gobra does not check any of these two properties. Property (1) could be done +// by using obligations. + +type GhostMutex struct { + privateField PrivateField +} + +pred (m gpointer[GhostMutex]) LockP() +pred (m gpointer[GhostMutex]) UnlockP() + +ghost +requires acc(m.LockP(), _) +decreases _ +pure func (m gpointer[GhostMutex]) LockInv() pred() + +ghost +requires inv() && acc(m) && *m == GhostMutex{} +ensures m.LockP() && m.LockInv() == inv +decreases +func (m gpointer[GhostMutex]) SetInv(inv pred()) + +ghost +requires acc(m.LockP(), _) +ensures m.LockP() && m.UnlockP() && m.LockInv()() +decreases _ if sync.IgnoreBlockingForTermination() +func (m gpointer[GhostMutex]) Lock() + +ghost +requires acc(m.LockP(), _) && m.UnlockP() && m.LockInv()() +ensures m.LockP() +decreases _ +func (m gpointer[GhostMutex]) Unlock() diff --git a/verification/utils/seqs/seqs.gobra b/verification/utils/seqs/seqs.gobra index 6860a3762..af788fbe8 100644 --- a/verification/utils/seqs/seqs.gobra +++ b/verification/utils/seqs/seqs.gobra @@ -16,6 +16,8 @@ package seqs +import sl "verification/utils/slices" + ghost pure requires 0 <= n @@ -36,4 +38,12 @@ requires size >= 0 ensures len(res) == size ensures forall i int :: { res[i] } 0 <= i && i < size ==> res[i] == nil decreases _ -pure func NewSeqByteSlice(size int) (res seq[[]byte]) \ No newline at end of file +pure func NewSeqByteSlice(size int) (res seq[[]byte]) + +ghost +requires acc(sl.Bytes(ub, 0, len(ub)), _) +ensures len(res) == len(ub) +ensures forall i int :: { res[i] } 0 <= i && i < len(ub) ==> + res[i] == sl.GetByte(ub, 0, len(ub), i) +decreases _ +pure func ToSeqByte(ub []byte) (res seq[byte]) \ No newline at end of file diff --git a/verification/utils/slices/slices.gobra b/verification/utils/slices/slices.gobra index 4ca932905..d44e840b0 100644 --- a/verification/utils/slices/slices.gobra +++ b/verification/utils/slices/slices.gobra @@ -23,7 +23,7 @@ package slices // - For each type, there might be two different types of operations: those that keep track // of contents (the name of the operation ends in "C"), and those who do not. -pred AbsSlice_Bytes(s []byte, start int, end int) { +pred Bytes(s []byte, start int, end int) { // start inclusive 0 <= start && start <= end && @@ -33,71 +33,61 @@ pred AbsSlice_Bytes(s []byte, start int, end int) { } pure -requires acc(AbsSlice_Bytes(s, start, end), _) +requires acc(Bytes(s, start, end), _) requires start <= i && i < end decreases func GetByte(s []byte, start int, end int, i int) byte { - return unfolding acc(AbsSlice_Bytes(s, start, end), _) in s[i] + return unfolding acc(Bytes(s, start, end), _) in s[i] } ghost requires 0 < p -requires acc(AbsSliceC_Bytes(s, 0, len(s), contents), p) -ensures acc(AbsSlice_Bytes(s, 0, len(s)), p) -decreases -func GetAbsSlice_Bytes(s []byte, contents seq[byte], p perm) { - unfold acc(AbsSliceC_Bytes(s, 0, len(s), contents), p) - fold acc(AbsSlice_Bytes(s, 0, len(s)), p) -} - -ghost -requires 0 < p -requires acc(AbsSlice_Bytes(s, start, end), p) +requires acc(Bytes(s, start, end), p) requires start <= idx && idx <= end -ensures acc(AbsSlice_Bytes(s, start, idx), p) -ensures acc(AbsSlice_Bytes(s, idx, end), p) +ensures acc(Bytes(s, start, idx), p) +ensures acc(Bytes(s, idx, end), p) decreases func SplitByIndex_Bytes(s []byte, start int, end int, idx int, p perm) { - unfold acc(AbsSlice_Bytes(s, start, end), p) - fold acc(AbsSlice_Bytes(s, start, idx), p) - fold acc(AbsSlice_Bytes(s, idx, end), p) + unfold acc(Bytes(s, start, end), p) + fold acc(Bytes(s, start, idx), p) + fold acc(Bytes(s, idx, end), p) } ghost requires 0 < p -requires acc(AbsSlice_Bytes(s, start, idx), p) -requires acc(AbsSlice_Bytes(s, idx, end), p) -ensures acc(AbsSlice_Bytes(s, start, end), p) +requires acc(Bytes(s, start, idx), p) +requires acc(Bytes(s, idx, end), p) +ensures acc(Bytes(s, start, end), p) decreases func CombineAtIndex_Bytes(s []byte, start int, end int, idx int, p perm) { - unfold acc(AbsSlice_Bytes(s, start, idx), p) - unfold acc(AbsSlice_Bytes(s, idx, end), p) - fold acc(AbsSlice_Bytes(s, start, end), p) + unfold acc(Bytes(s, start, idx), p) + unfold acc(Bytes(s, idx, end), p) + fold acc(Bytes(s, start, end), p) } ghost requires 0 < p -requires acc(AbsSlice_Bytes(s, start, end), p) +requires acc(Bytes(s, start, end), p) // the following precondition convinces Gobra that // the slice operation is well-formed -requires unfolding acc(AbsSlice_Bytes(s, start, end), p) in true -ensures acc(AbsSlice_Bytes(s[start:end], 0, len(s[start:end])), p) +requires unfolding acc(Bytes(s, start, end), p) in true +ensures acc(Bytes(s[start:end], 0, len(s[start:end])), p) decreases func Reslice_Bytes(s []byte, start int, end int, p perm) { - unfold acc(AbsSlice_Bytes(s, start, end), p) + unfold acc(Bytes(s, start, end), p) assert forall i int :: { &s[start:end][i] }{ &s[start + i] } 0 <= i && i < (end-start) ==> &s[start:end][i] == &s[start + i] - fold acc(AbsSlice_Bytes(s[start:end], 0, len(s[start:end])), p) + fold acc(Bytes(s[start:end], 0, len(s[start:end])), p) } ghost requires 0 < p requires 0 <= start && start <= end && end <= cap(s) requires len(s[start:end]) <= cap(s) -requires acc(AbsSlice_Bytes(s[start:end], 0, len(s[start:end])), p) -ensures acc(AbsSlice_Bytes(s, start, end), p) +requires acc(Bytes(s[start:end], 0, len(s[start:end])), p) +ensures acc(Bytes(s, start, end), p) decreases func Unslice_Bytes(s []byte, start int, end int, p perm) { - unfold acc(AbsSlice_Bytes(s[start:end], 0, len(s[start:end])), p) + unfold acc(Bytes(s[start:end], 0, len(s[start:end])), p) assert 0 <= start && start <= end && end <= cap(s) assert forall i int :: { &s[start:end][i] } 0 <= i && i < len(s[start:end]) ==> acc(&s[start:end][i], p) assert forall i int :: { &s[start:end][i] }{ &s[start + i] } 0 <= i && i < len(s[start:end]) ==> &s[start:end][i] == &s[start + i] @@ -113,16 +103,16 @@ func Unslice_Bytes(s []byte, start int, end int, p perm) { assert acc(&s[start + j], p) assert forall i int :: { &s[i] } start <= i && i <= start+j ==> acc(&s[i], p) } - fold acc(AbsSlice_Bytes(s, start, end), p) + fold acc(Bytes(s, start, end), p) } ghost requires 0 < p requires 0 <= start && start <= end && end <= len(s) -requires acc(AbsSlice_Bytes(s, 0, len(s)), p) -ensures acc(AbsSlice_Bytes(s[start:end], 0, end-start), p) -ensures acc(AbsSlice_Bytes(s, 0, start), p) -ensures acc(AbsSlice_Bytes(s, end, len(s)), p) +requires acc(Bytes(s, 0, len(s)), p) +ensures acc(Bytes(s[start:end], 0, end-start), p) +ensures acc(Bytes(s, 0, start), p) +ensures acc(Bytes(s, end, len(s)), p) decreases func SplitRange_Bytes(s []byte, start int, end int, p perm) { SplitByIndex_Bytes(s, 0, len(s), start, p) @@ -133,10 +123,10 @@ func SplitRange_Bytes(s []byte, start int, end int, p perm) { ghost requires 0 < p requires 0 <= start && start <= end && end <= len(s) -requires acc(AbsSlice_Bytes(s[start:end], 0, end-start), p) -requires acc(AbsSlice_Bytes(s, 0, start), p) -requires acc(AbsSlice_Bytes(s, end, len(s)), p) -ensures acc(AbsSlice_Bytes(s, 0, len(s)), p) +requires acc(Bytes(s[start:end], 0, end-start), p) +requires acc(Bytes(s, 0, start), p) +requires acc(Bytes(s, end, len(s)), p) +ensures acc(Bytes(s, 0, len(s)), p) decreases func CombineRange_Bytes(s []byte, start int, end int, p perm) { Unslice_Bytes(s, start, end, p) @@ -145,10 +135,10 @@ func CombineRange_Bytes(s []byte, start int, end int, p perm) { } ghost -ensures AbsSlice_Bytes(nil, 0, 0) +ensures Bytes(nil, 0, 0) decreases func NilAcc_Bytes() { - fold AbsSlice_Bytes(nil, 0, 0) + fold Bytes(nil, 0, 0) } /** Auxiliar definitions Any **/ @@ -164,81 +154,3 @@ pure func NewSeq_Any(size int) (res seq[any]) // ResliceC_Any /** End of Auxiliar definitions Any **/ - -/** Slices of Any without Contents **/ -pred AbsSlice_Any(s []any, start int, end int) { - // start inclusive - 0 <= start && - start <= end && - // end exclusive - end <= cap(s) && - forall i int :: { &s[i] } start <= i && i < end ==> acc(&s[i]) -} - -ghost -requires 0 < p -requires acc(AbsSlice_Any(s, start, end), p) -requires start <= idx && idx <= end -ensures acc(AbsSlice_Any(s, start, idx), p) -ensures acc(AbsSlice_Any(s, idx, end), p) -decreases -func SplitByIndex_Any(s []any, start int, end int, idx int, p perm) { - unfold acc(AbsSlice_Any(s, start, end), p) - fold acc(AbsSlice_Any(s, start, idx), p) - fold acc(AbsSlice_Any(s, idx, end), p) -} - -ghost -requires 0 < p -requires acc(AbsSlice_Any(s, start, idx), p) -requires acc(AbsSlice_Any(s, idx, end), p) -ensures acc(AbsSlice_Any(s, start, end), p) -decreases -func CombineAtIndex_Any(s []any, start int, end int, idx int, p perm) { - unfold acc(AbsSlice_Any(s, start, idx), p) - unfold acc(AbsSlice_Any(s, idx, end), p) - fold acc(AbsSlice_Any(s, start, end), p) -} - -ghost -requires 0 < p -requires acc(AbsSlice_Any(s, start, end), p) -// the following precondition convinces Gobra that -// the slice operation is well-formed -requires unfolding acc(AbsSlice_Any(s, start, end), p) in true -ensures acc(AbsSlice_Any(s[start:end], 0, len(s[start:end])), p) -decreases -func Reslice_Any(s []any, start int, end int, p perm) { - unfold acc(AbsSlice_Any(s, start, end), p) - assert forall i int :: { &s[start:end][i] }{ &s[start + i] } 0 <= i && i < (end-start) ==> &s[start:end][i] == &s[start + i] - fold acc(AbsSlice_Any(s[start:end], 0, len(s[start:end])), p) -} - -ghost -requires 0 < p -requires 0 <= start && start <= end && end <= cap(s) -requires len(s[start:end]) <= cap(s) -requires acc(AbsSlice_Any(s[start:end], 0, len(s[start:end])), p) -ensures acc(AbsSlice_Any(s, start, end), p) -decreases -func Unslice_Any(s []any, start int, end int, p perm) { - unfold acc(AbsSlice_Any(s[start:end], 0, len(s[start:end])), p) - assert 0 <= start && start <= end && end <= cap(s) - assert forall i int :: { &s[start:end][i] } 0 <= i && i < len(s[start:end]) ==> acc(&s[start:end][i], p) - assert forall i int :: { &s[start:end][i] }{ &s[start + i] } 0 <= i && i < len(s[start:end]) ==> &s[start:end][i] == &s[start + i] - - invariant 0 <= j && j <= len(s[start:end]) - invariant forall i int :: { &s[start:end][i] } j <= i && i < len(s[start:end]) ==> acc(&s[start:end][i], p) - invariant forall i int :: { &s[start:end][i] }{ &s[start + i] } 0 <= i && i < len(s[start:end]) ==> &s[start:end][i] == &s[start + i] - invariant forall i int :: { &s[i] } start <= i && i < start+j ==> acc(&s[i], p) - decreases len(s[start:end]) - j - for j := 0; j < len(s[start:end]); j++ { - assert forall i int :: { &s[i] } start <= i && i < start+j ==> acc(&s[i], p) - assert &s[start:end][j] == &s[start + j] - assert acc(&s[start + j], p) - assert forall i int :: { &s[i] } start <= i && i <= start+j ==> acc(&s[i], p) - } - fold acc(AbsSlice_Any(s, start, end), p) -} - -/** End of slices of Any without Contents **/ diff --git a/verification/utils/slices/slices_contents.gobra b/verification/utils/slices/slices_contents.gobra deleted file mode 100644 index 2cdf3a2c1..000000000 --- a/verification/utils/slices/slices_contents.gobra +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2023 ETH Zurich -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +gobra - -// TODO!! -// When there is support for closed functions, we should drop this -// file and describe the contents of the slice in terms of closed -// heap-dependent functions. - -package slices - -// This file contains predicates for describing the contents of slices and -// functions that manipulate the knowledge in those predicates. - -// How to extend this file: -// - if we need to support slices of non-supported types, we must repeat all definitions -// for that type. For this, we should be careful to avoid introducing cylical dependencies. -// The suffix of the predicate/function should be the type of the elems of the slices. -// - For each type, there might be two different types of operations: those that keep track -// of contents (the name of the operation ends in "C"), and those who do not. - -import "github.com/scionproto/scion/verification/utils/seqs" - -pred AbsSliceC_Bytes(s []byte, start int, end int, ghost contents seq[byte]) { - // start inclusive - 0 <= start && - start <= end && - // end exclusive - end <= cap(s) && - len(contents) == end - start && - // Maybe, it is worth having different versions with - // different triggers, or using a different trigger - // than the one that is inferred. - forall i int :: { &s[i] } start <= i && i < end ==> - (acc(&s[i]) && s[i] == contents[i - start]) -} - -ghost -requires 0 < p -requires 0 <= start && start <= end && end <= len(s) -requires acc(AbsSlice_Bytes(s, start, end), p) -ensures acc(AbsSliceC_Bytes(s, start, end, contents), p) -decreases -func GetAbsSliceC_Bytes(s []byte, start int, end int, p perm) (contents seq[byte]) { - contents = seqs.NewSeqByte(end - start) - unfold acc(AbsSlice_Bytes(s, start, end), p) - invariant forall k int :: { &s[k] } start <= k && k < end ==> acc(&s[k], p) - invariant start <= i && i <= end - invariant len(contents) == end - start - invariant forall k int :: { contents[k] } start <= k && k < i ==> contents[k - start] == s[k] - decreases end - i - for i := start; i < end; i += 1 { - contents[i - start] = s[i] - } - fold acc(AbsSliceC_Bytes(s, start, end, contents), p) - return contents -} - -ghost -requires 0 < p -requires AbsSliceC_Bytes(s, start, end, contents) -requires start < idx && idx < end -ensures AbsSliceC_Bytes(s, start, idx, contents[:idx-start]) && AbsSliceC_Bytes(s, idx, end, contents[idx-start:]) -decreases -func SplitByIndexC_Bytes(s []byte, start int, end int, idx int, contents seq[byte], p perm) { - unfold AbsSliceC_Bytes(s, start, end, contents) - assert len(contents) == end - start - fold AbsSliceC_Bytes(s, start, idx, contents[:idx-start]) - fold AbsSliceC_Bytes(s, idx, end, contents[idx-start:]) -} - -ghost -requires 0 < p -requires acc(AbsSliceC_Bytes(s, start, idx, contents1), p) -requires acc(AbsSliceC_Bytes(s, idx, end, contents2), p) -ensures acc(AbsSliceC_Bytes(s, start, end, contents1 ++ contents2), p) -decreases -func CombineAtIndexC_Bytes(s []byte, start int, end int, idx int, contents1 seq[byte], contents2 seq[byte], p perm) { - unfold acc(AbsSliceC_Bytes(s, start, idx, contents1), p) - unfold acc(AbsSliceC_Bytes(s, idx, end, contents2), p) - fold acc(AbsSliceC_Bytes(s, start, end, contents1 ++ contents2), p) -} - -ghost -requires AbsSliceC_Bytes(s, start, end, contents) -// the following precondition convinces Gobra that -// the slice operation is well-formed -requires unfolding AbsSliceC_Bytes(s, start, end, contents) in true -ensures AbsSliceC_Bytes(s[start:end], 0, len(s[start:end]), contents) -decreases -func ResliceC_Bytes(s []byte, start int, end int, contents seq[byte]) { - unfold AbsSliceC_Bytes(s, start, end, contents) - assert forall i int :: { &s[start:end][i] }{ &s[start + i] } 0 <= i && i < (end-start) ==> &s[start:end][i] == &s[start + i] - fold AbsSliceC_Bytes(s[start:end], 0, len(s[start:end]), contents) -} - -/** Slices of Any with Contents **/ - -// The elements in contents are required to be comparable -pred AbsSliceC_Any(s []any, start int, end int, ghost contents seq[any]) { - // start inclusive - 0 <= start && - start < end && - // end exclusive - end <= cap(s) && - len(contents) == end - start && - // Maybe, it is worth having different versions with - // different triggers, or using a different trigger - // than the one that is inferred. - forall i int :: { &s[i] } start <= i && i < end ==> (acc(&s[i]) && s[i] === contents[i - start]) -} - -ghost -requires AbsSliceC_Any(s, start, end, contents) -requires start < idx && idx < end -ensures AbsSliceC_Any(s, start, idx, contents[:idx-start]) && AbsSliceC_Any(s, idx, end, contents[idx-start:]) -decreases -func SplitByIndexC_Any(s []any, start int, end int, idx int, contents seq[any]) { - unfold AbsSliceC_Any(s, start, end, contents) - assert len(contents) == end - start - fold AbsSliceC_Any(s, start, idx, contents[:idx-start]) - fold AbsSliceC_Any(s, idx, end, contents[idx-start:]) -} - -/** End of slices of Any with Contents **/ diff --git a/verification/utils/slices/slices_test.gobra b/verification/utils/slices/slices_test.gobra index f78f42147..399161928 100644 --- a/verification/utils/slices/slices_test.gobra +++ b/verification/utils/slices/slices_test.gobra @@ -16,34 +16,10 @@ package slices -import "github.com/scionproto/scion/verification/utils/seqs" - /** Bytes **/ -func AbsSliceC_Bytes_test() { - s := make([]byte, 10) - ghost contents := seqs.NewSeqByte(10) - fold AbsSliceC_Bytes(s, 0, 10, contents) - // assert false // fails -} -func AbsSlice_Bytes_test() { +func Bytes_test() { s := make([]byte, 10) - fold AbsSlice_Bytes(s, 0, 10) - // assert false // fails -} - -/** Any **/ -func AbsSliceC_Any_test() { - s := make([]any, 1) - var elem interface{} = int(1) - ghost contents := seq[any]{elem} - s[0] = elem - fold AbsSliceC_Any(s, 0, 1, contents) - // assert false // fails -} - -func AbsSlice_Any_test() { - s := make([]any, 10) - fold AbsSlice_Any(s, 0, 10) + fold Bytes(s, 0, 10) // assert false // fails -} +} \ No newline at end of file