From 97c98ffcd85c7c68e7be926e0564ede894fd8c0a Mon Sep 17 00:00:00 2001 From: Markus Limbeck <92801626+mlimbeck@users.noreply.github.com> Date: Fri, 16 Aug 2024 15:45:08 +0200 Subject: [PATCH 1/2] Proof of SetHopField (#361) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * proof of sethopfield and io-assumptions * fix syntax errors * fix termination measure * fix verification errors * simplifications and comments * fix syntax error * feedback * fix verification error * renaming * space between arithmetic operands * increase timeout of path/scion * fix verification error * test: parallelizeBranches for dependencies * test: increase timeout for scion package to 20 min * test: increase timeout for scion package to 1h * use parallelizeBranches only for scion package * stronger precondition for setHopfield * Revert "stronger precondition for setHopfield" * test: scion pkg without parallelizeBranches * Revert "test: scion pkg without parallelizeBranches" * fix merging mistake * fix merge mistakes * fix verification error * Drop `TemporaryAssumeForIO` * Apply suggestions from code review * Update .github/workflows/gobra.yml --------- Co-authored-by: João Pereira --- .github/workflows/gobra.yml | 2 +- pkg/slayers/path/scion/raw.go | 78 ++++++++++++++----- .../utils/definitions/definitions.gobra | 5 -- 3 files changed, 60 insertions(+), 25 deletions(-) diff --git a/.github/workflows/gobra.yml b/.github/workflows/gobra.yml index d88022b09..7071a8fb1 100644 --- a/.github/workflows/gobra.yml +++ b/.github/workflows/gobra.yml @@ -265,7 +265,7 @@ jobs: uses: viperproject/gobra-action@main with: packages: 'pkg/slayers/path/scion' - timeout: 5m + timeout: 30m headerOnly: ${{ env.headerOnly }} module: ${{ env.module }} includePaths: ${{ env.includePaths }} diff --git a/pkg/slayers/path/scion/raw.go b/pkg/slayers/path/scion/raw.go index 4ee0008c6..259258cd9 100644 --- a/pkg/slayers/path/scion/raw.go +++ b/pkg/slayers/path/scion/raw.go @@ -557,32 +557,72 @@ func (s *Raw) SetHopField(hop path.HopField, idx int /*@, ghost ubuf []byte @*/) // https://github.com/viperproject/gobra/issues/192 //@ assume 0 <= tmpHopField.ConsIngress && 0 <= tmpHopField.ConsEgress //@ fold acc(tmpHopField.Mem(), R9) - //@ unfold acc(s.Mem(ubuf), R20) - //@ unfold acc(s.Base.Mem(), R20) + //@ reveal validPktMetaHdr(ubuf) + //@ unfold acc(s.Mem(ubuf), R50) + //@ unfold acc(s.Base.Mem(), R50) + //@ ghost currInfIdx := int(s.PathMeta.CurrINF) + //@ ghost currHfIdx := int(s.PathMeta.CurrHF) + //@ ghost seg1Len := int(s.PathMeta.SegLen[0]) + //@ ghost seg2Len := int(s.PathMeta.SegLen[1]) + //@ ghost seg3Len := int(s.PathMeta.SegLen[2]) + //@ ghost segLens := io.CombineSegLens(seg1Len, seg2Len, seg3Len) + //@ ghost segLen := segLens.LengthOfCurrSeg(idx) + //@ ghost prevSegLen := segLens.LengthOfPrevSeg(idx) + //@ ghost offset := HopFieldOffset(s.Base.NumINF, prevSegLen, MetaLen) + //@ ghost hopfieldOffset := MetaLen + s.NumINF*path.InfoLen if idx >= s.NumHops { // (VerifiedSCION) introduced `err` err := serrors.New("HopField index out of bounds", "max", s.NumHops-1, "actual", idx) - //@ fold acc(s.Base.Mem(), R20) - //@ fold acc(s.Mem(ubuf), R20) + //@ fold acc(s.Base.Mem(), R50) + //@ fold acc(s.Mem(ubuf), R50) return err } hopOffset := MetaLen + s.NumINF*path.InfoLen + idx*path.HopLen - //@ sl.SplitRange_Bytes(ubuf, 0, len(s.Raw), writePerm) - //@ assert sl.Bytes(s.Raw, 0, len(s.Raw)) - //@ sl.SplitRange_Bytes(s.Raw, hopOffset, hopOffset+path.HopLen, writePerm) + + //@ SliceBytesIntoSegments(ubuf, segLens, HalfPerm) + //@ SliceBytesIntoInfoFields(ubuf[:hopfieldOffset], s.NumINF, segLens, R40) + + //@ ValidPktMetaHdrSublice(ubuf, MetaLen) + //@ ghost inf := path.BytesToAbsInfoField(InfofieldByteSlice(ubuf, currInfIdx), 0) + //@ ghost hfIdxSeg := idx-prevSegLen + //@ ghost currHopfields := HopfieldsByteSlice(ubuf, currInfIdx, segLens) + //@ ghost if idx == currHfIdx { + //@ CurrSegEquality(ubuf, offset, currInfIdx, hfIdxSeg, segLen) + //@ LeftSegEquality(ubuf, currInfIdx+1, segLens) + //@ MidSegEquality(ubuf, currInfIdx+2, segLens) + //@ RightSegEquality(ubuf, currInfIdx-1, segLens) + //@ reveal s.absPkt(ubuf) + //@ SplitHopfields(currHopfields, hfIdxSeg, segLen, R0) + //@ EstablishBytesStoreCurrSeg(currHopfields, hfIdxSeg, segLen, inf) + //@ SplitHopfields(currHopfields, hfIdxSeg, segLen, R0) + //@ } else { + //@ sl.SplitRange_Bytes(ubuf[offset:offset+segLen*path.HopLen], hfIdxSeg*path.HopLen, + //@ (hfIdxSeg+1)*path.HopLen, HalfPerm) + //@ } + //@ sl.SplitRange_Bytes(ubuf, hopOffset, hopOffset+path.HopLen, HalfPerm) ret := tmpHopField.SerializeTo(s.Raw[hopOffset : hopOffset+path.HopLen]) - //@ sl.CombineRange_Bytes(s.Raw, hopOffset, hopOffset+path.HopLen, writePerm) - //@ sl.CombineRange_Bytes(ubuf, 0, len(s.Raw), writePerm) - //@ fold acc(s.Base.Mem(), R20) - //@ fold acc(s.Mem(ubuf), R20) - // (VerifiedSCION) The proof for these assumptions is provided in PR #361 - // (https://github.com/viperproject/VerifiedSCION/pull/361), which will - // be merged once the performance issues are resolved. - //@ TemporaryAssumeForIO(validPktMetaHdr(ubuf) && s.GetBase(ubuf).EqAbsHeader(ubuf)) - //@ TemporaryAssumeForIO(idx == int(old(s.GetCurrHF(ubuf))) ==> - //@ let oldPkt := old(s.absPkt(ubuf)) in - //@ let newPkt := oldPkt.UpdateHopField(hop.ToIO_HF()) in - //@ s.absPkt(ubuf) == newPkt) + //@ sl.CombineRange_Bytes(ubuf, hopOffset, hopOffset+path.HopLen, HalfPerm) + //@ ValidPktMetaHdrSublice(ubuf, MetaLen) + //@ assert reveal validPktMetaHdr(ubuf) + //@ ghost if idx == currHfIdx { + //@ CombineHopfields(currHopfields, hfIdxSeg, segLen, R0) + //@ EstablishBytesStoreCurrSeg(currHopfields, hfIdxSeg, segLen, inf) + //@ CombineHopfields(currHopfields, hfIdxSeg, segLen, R0) + //@ CurrSegEquality(ubuf, offset, currInfIdx, hfIdxSeg, segLen) + //@ LeftSegEquality(ubuf, currInfIdx+1, segLens) + //@ MidSegEquality(ubuf, currInfIdx+2, segLens) + //@ RightSegEquality(ubuf, currInfIdx-1, segLens) + //@ reveal s.absPkt(ubuf) + //@ assert s.absPkt(ubuf).CurrSeg.Future == + //@ seq[io.IO_HF]{tmpHopField.ToIO_HF()} ++ old(s.absPkt(ubuf).CurrSeg.Future[1:]) + //@ } else { + //@ sl.CombineRange_Bytes(ubuf[offset:offset+segLen*path.HopLen], hfIdxSeg*path.HopLen, + //@ (hfIdxSeg+1)*path.HopLen, HalfPerm) + //@ } + //@ CombineBytesFromInfoFields(ubuf[:hopfieldOffset], s.NumINF, segLens, R40) + //@ CombineBytesFromSegments(ubuf, segLens, HalfPerm) + //@ fold acc(s.Base.Mem(), R50) + //@ fold acc(s.Mem(ubuf), R50) return ret } diff --git a/verification/utils/definitions/definitions.gobra b/verification/utils/definitions/definitions.gobra index a9ac46f6b..fe9e35ab3 100644 --- a/verification/utils/definitions/definitions.gobra +++ b/verification/utils/definitions/definitions.gobra @@ -129,8 +129,3 @@ ghost ensures b decreases func AssumeForIO(b bool) - -ghost -ensures b -decreases -func TemporaryAssumeForIO(b bool) From 445bbadaecc9da6c7bc9b702b5a2581957015a9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Pereira?= Date: Tue, 27 Aug 2024 20:28:04 +0200 Subject: [PATCH 2/2] Stabilize `path/scion` (#370) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * test * replace hopFieldsBytePositions and by recursive functions * renaming and public lemmas without perm param * fix mistake * Apply suggestions from code review Co-authored-by: João Pereira * space around arith. operators --------- Co-authored-by: mlimbeck Co-authored-by: Markus Limbeck <92801626+mlimbeck@users.noreply.github.com> --- .../path/scion/info_hop_setter_lemmas.gobra | 59 +----- pkg/slayers/path/scion/raw_spec.gobra | 179 +++++++++++------- 2 files changed, 119 insertions(+), 119 deletions(-) diff --git a/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra b/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra index 04aa00308..f8c436c68 100644 --- a/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra +++ b/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra @@ -593,61 +593,14 @@ func EstablishBytesStoreCurrSeg(hopfields []byte, currHfIdx int, segLen int, inf currseg := reveal CurrSegWithInfo(hopfields, currHfIdx, segLen, inf) currHfStart := currHfIdx * path.HopLen currHfEnd := currHfStart + path.HopLen - unfold acc(sl.Bytes(hopfields, 0, len(hopfields)), R56) - unfold acc(sl.Bytes(hopfields[currHfStart:currHfEnd], 0, path.HopLen), R56) - hf := hopFields(hopfields, 0, 0, segLen) - hopFieldsBytePositionsLemma(hopfields, 0, 0, segLen, R54) - reveal hopFieldsBytePositions(hopfields, 0, 0, segLen, hf) - assert len(currseg.Future) > 0 - assert currseg.Future[0] == path.BytesToIO_HF(hopfields[currHfStart:currHfEnd], 0, 0, path.HopLen) - splitHopFieldsInPastAndFuture(hopfields, currHfIdx, segLen) - assert currseg.Past == segPast(hopFields(hopfields[:currHfStart], 0, 0, currHfIdx)) - assert currseg.Future[0] == hf[currHfIdx] - assert hf[currHfIdx:][1:] == hf[currHfIdx + 1:] - assert currseg.Future == hf[currHfIdx:] - assert currseg.Future[1:] == hopFields(hopfields[currHfEnd:], 0, 0, (segLen - currHfIdx- 1)) - assert currseg.History == segHistory(hopFields(hopfields[:currHfStart], 0, 0, currHfIdx)) - fold acc(sl.Bytes(hopfields[currHfStart:currHfEnd], 0, path.HopLen), R56) - fold acc(sl.Bytes(hopfields, 0, len(hopfields)), R56) -} - -// `splitHopFieldsInPastAndFuture` shows that the raw bytes containing all hopfields -// can be split into two slices, one that exclusively contains all past hopfields and another -// that exclusively contains all future ones. This helps in proving that the future and past -// hopfields remain unchanged when the current hopfield is modified. -ghost -requires 0 < segLen -requires 0 <= currHfIdx && currHfIdx < segLen -requires segLen * path.HopLen == len(hopfields) -preserves acc(sl.Bytes(hopfields, 0, len(hopfields)), R50) -preserves let currHfStart := currHfIdx * path.HopLen in - let currHfEnd := currHfStart + path.HopLen in - acc(sl.Bytes(hopfields[:currHfStart], 0, currHfStart), R50) && - acc(sl.Bytes(hopfields[currHfEnd:], 0, (segLen - currHfIdx - 1) * path.HopLen), R50) -ensures let currHfStart := currHfIdx * path.HopLen in - let currHfEnd := currHfStart + path.HopLen in - hopFields(hopfields, 0, 0, segLen)[:currHfIdx] == - hopFields(hopfields[:currHfStart], 0, 0, currHfIdx) && - hopFields(hopfields, 0, 0, segLen)[currHfIdx + 1:] == - hopFields(hopfields[currHfEnd:], 0, 0, segLen - currHfIdx - 1) -decreases -func splitHopFieldsInPastAndFuture(hopfields []byte, currHfIdx int, segLen int) { - currHfStart := currHfIdx * path.HopLen - currHfEnd := currHfStart + path.HopLen - hf := hopFields(hopfields, 0, 0, segLen) - hopFieldsBytePositionsLemma(hopfields, 0, 0, segLen, R54) - reveal hopFieldsBytePositions(hopfields, 0, 0, segLen, hf) - hfPast := hopFields(hopfields, 0, 0, currHfIdx) - hopFieldsBytePositionsLemma(hopfields, 0, 0, currHfIdx, R54) - reveal hopFieldsBytePositions(hopfields, 0, 0, currHfIdx, hfPast) + HopsFromSuffixOfRawMatchSuffixOfHops(hopfields, 0, 0, segLen, currHfIdx) + AlignHopsOfRawWithOffsetAndIndex(hopfields, 0, currHfIdx + 1, segLen, currHfIdx + 1) + HopsFromPrefixOfRawMatchPrefixOfHops(hopfields, 0, 0, segLen, currHfIdx + 1) + HopsFromPrefixOfRawMatchPrefixOfHops(hopfields, 0, 0, segLen, currHfIdx) widenHopFields(hopfields, 0, 0, currHfIdx, 0, currHfStart, R52) - - hfFuture := hopFields(hopfields, currHfEnd, 0, segLen - currHfIdx - 1) - hopFieldsBytePositionsLemma(hopfields, currHfEnd, 0, segLen - currHfIdx - 1, R54) - reveal hopFieldsBytePositions(hopfields, currHfEnd, 0, segLen - currHfIdx - 1, hfFuture) - widenHopFields(hopfields, currHfEnd, 0, segLen - currHfIdx - 1, - currHfEnd, segLen * path.HopLen, R52) + widenHopFields(hopfields, currHfEnd, 0, segLen - currHfIdx - 1, currHfEnd, segLen * path.HopLen, R52) + widenHopFields(hopfields, currHfStart, 0, 1, currHfStart, currHfEnd, R52) } // `SplitHopfields` splits the permission to the raw bytes of a segment into the permission diff --git a/pkg/slayers/path/scion/raw_spec.gobra b/pkg/slayers/path/scion/raw_spec.gobra index 9bf4537db..72f2ea63b 100644 --- a/pkg/slayers/path/scion/raw_spec.gobra +++ b/pkg/slayers/path/scion/raw_spec.gobra @@ -237,8 +237,8 @@ ensures len(res) == len(hopfields) decreases len(hopfields) pure func segPast(hopfields seq[io.IO_HF]) (res seq[io.IO_HF]) { return len(hopfields) == 0 ? seq[io.IO_HF]{} : - seq[io.IO_HF]{hopfields[len(hopfields)-1]} ++ segPast( - hopfields[:len(hopfields)-1]) + seq[io.IO_HF]{hopfields[len(hopfields) - 1]} ++ segPast( + hopfields[:len(hopfields) - 1]) } ghost @@ -246,8 +246,8 @@ ensures len(res) == len(hopfields) decreases len(hopfields) pure func segHistory(hopfields seq[io.IO_HF]) (res seq[io.IO_ahi]) { return len(hopfields) == 0 ? seq[io.IO_ahi]{} : - seq[io.IO_ahi]{hopfields[len(hopfields)-1].Toab()} ++ segHistory( - hopfields[:len(hopfields)-1]) + seq[io.IO_ahi]{hopfields[len(hopfields) - 1].Toab()} ++ segHistory( + hopfields[:len(hopfields) - 1]) } ghost @@ -385,7 +385,7 @@ pure func (s *Raw) absPkt(raw []byte) (res io.IO_pkt2) { let numINF := segs.NumInfoFields() in let offset := HopFieldOffset(numINF, prevSegLen, MetaLen) in io.IO_Packet2 { - CurrSeg : CurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen, segLen, MetaLen), + CurrSeg : CurrSeg(raw, offset, currInfIdx, currHfIdx - prevSegLen, segLen, MetaLen), LeftSeg : LeftSeg(raw, currInfIdx + 1, segs, MetaLen), MidSeg : MidSeg(raw, currInfIdx + 2, segs, MetaLen), RightSeg : RightSeg(raw, currInfIdx - 1, segs, MetaLen), @@ -536,7 +536,7 @@ pure func (s *Raw) CorrectlyDecodedInfWithIdx(ub []byte, idx int, info path.Info return unfolding acc(s.Mem(ub), _) in unfolding acc(s.Base.Mem(), _) in let infOffset := MetaLen + idx*path.InfoLen in - infOffset+path.InfoLen <= len(ub) && + infOffset + path.InfoLen <= len(ub) && info.ToAbsInfoField() == reveal path.BytesToAbsInfoField(ub, infOffset) } @@ -550,8 +550,8 @@ decreases pure func (s *Raw) CorrectlyDecodedInf(ub []byte, info path.InfoField) bool { return unfolding acc(s.Mem(ub), _) in unfolding acc(s.Base.Mem(), _) in - let infOffset := MetaLen + int(s.Base.PathMeta.CurrINF)*path.InfoLen in - infOffset+path.InfoLen <= len(ub) && + let infOffset := MetaLen + int(s.Base.PathMeta.CurrINF) * path.InfoLen in + infOffset + path.InfoLen <= len(ub) && info.ToAbsInfoField() == reveal path.BytesToAbsInfoField(ub, infOffset) } @@ -565,8 +565,8 @@ decreases pure func (s *Raw) CorrectlyDecodedHfWithIdx(ub []byte, idx int, hop path.HopField) bool { return unfolding acc(s.Mem(ub), _) in unfolding acc(s.Base.Mem(), _) in - let hopOffset := MetaLen + int(s.NumINF)*path.InfoLen + idx*path.HopLen in - hopOffset+path.HopLen <= len(ub) && + let hopOffset := MetaLen + int(s.NumINF) * path.InfoLen + idx * path.HopLen in + hopOffset + path.HopLen <= len(ub) && hop.ToIO_HF() == path.BytesToIO_HF(ub, 0, hopOffset, len(ub)) } @@ -579,9 +579,9 @@ decreases pure func (s *Raw) CorrectlyDecodedHf(ub []byte, hop path.HopField) bool { return unfolding acc(s.Mem(ub), _) in unfolding acc(s.Base.Mem(), _) in - let hopOffset := MetaLen + int(s.NumINF)*path.InfoLen + - int(s.Base.PathMeta.CurrHF)*path.HopLen in - hopOffset+path.HopLen <= len(ub) && + let hopOffset := MetaLen + int(s.NumINF) * path.InfoLen + + int(s.Base.PathMeta.CurrHF) * path.HopLen in + hopOffset + path.HopLen <= len(ub) && hop.ToIO_HF() == path.BytesToIO_HF(ub, 0, hopOffset, len(ub)) } @@ -607,7 +607,7 @@ func (s *Raw) LastHopLemma(ubuf []byte) { numINF := segs.NumInfoFields() offset := HopFieldOffset(numINF, prevSegLen, MetaLen) pkt := reveal s.absPkt(ubuf) - assert pkt.CurrSeg == reveal CurrSeg(ubuf, offset, currInfIdx, currHfIdx-prevSegLen, segLen, MetaLen) + assert pkt.CurrSeg == reveal CurrSeg(ubuf, offset, currInfIdx, currHfIdx - prevSegLen, segLen, MetaLen) assert len(pkt.CurrSeg.Future) == 1 } @@ -634,7 +634,7 @@ func (s *Raw) XoverLemma(ubuf []byte) { numINF := segs.NumInfoFields() offset := HopFieldOffset(numINF, prevSegLen, MetaLen) pkt := reveal s.absPkt(ubuf) - assert pkt.CurrSeg == reveal CurrSeg(ubuf, offset, currInfIdx, currHfIdx-prevSegLen, segLen, MetaLen) + assert pkt.CurrSeg == reveal CurrSeg(ubuf, offset, currInfIdx, currHfIdx - prevSegLen, segLen, MetaLen) assert pkt.LeftSeg == reveal LeftSeg(ubuf, currInfIdx + 1, segs, MetaLen) assert len(pkt.CurrSeg.Future) == 1 assert pkt.LeftSeg != none[io.IO_seg2] @@ -686,18 +686,12 @@ func (s *Raw) DecodingLemma(ubuf []byte, info path.InfoField, hop path.HopField) prevSegLen := segs.LengthOfPrevSeg(currHfIdx) numINF := segs.NumInfoFields() offset := HopFieldOffset(numINF, prevSegLen, MetaLen) - hfIdxSeg := currHfIdx-prevSegLen + hfIdxSeg := currHfIdx - prevSegLen reveal s.CorrectlyDecodedInf(ubuf, info) reveal s.CorrectlyDecodedHf(ubuf, hop) - pkt := reveal s.absPkt(ubuf) - currseg := reveal CurrSeg(ubuf, offset, currInfIdx, hfIdxSeg, segLen, MetaLen) - hopFields := hopFields(ubuf, offset, 0, segLen) - hopFieldsBytePositionsLemma(ubuf, offset, 0, segLen, R54) - reveal hopFieldsBytePositions(ubuf, offset, 0, segLen, hopFields) - assert currseg.Future[0] == hopFields[hfIdxSeg] - assert hopFields[hfIdxSeg] == - path.BytesToIO_HF(ubuf, 0, offset + path.HopLen * hfIdxSeg, len(ubuf)) - assert currseg.Future[0] == path.BytesToIO_HF(ubuf, 0, offset + path.HopLen * hfIdxSeg, len(ubuf)) + reveal s.absPkt(ubuf) + reveal CurrSeg(ubuf, offset, currInfIdx, hfIdxSeg, segLen, MetaLen) + HopsFromPrefixOfRawMatchPrefixOfHops(ubuf, offset, 0, segLen, hfIdxSeg) assert reveal s.EqAbsInfoField(s.absPkt(ubuf), info.ToAbsInfoField()) assert reveal s.EqAbsHopField(s.absPkt(ubuf), hop.ToIO_HF()) } @@ -722,11 +716,11 @@ requires PktLen(segs, 0) <= len(raw) requires 0 <= currInfIdx && currInfIdx < 2 requires 1 <= currInfIdx ==> 0 < segs.Seg3Len preserves acc(sl.Bytes(raw, 0, len(raw)), R56) -ensures LeftSeg(raw, currInfIdx+1, segs, 0) != none[io.IO_seg3] +ensures LeftSeg(raw, currInfIdx + 1, segs, 0) != none[io.IO_seg3] ensures RightSeg(raw, currInfIdx, segs, 0) != none[io.IO_seg3] decreases func XoverSegNotNone(raw []byte, currInfIdx int, segs io.SegLens) { - reveal LeftSeg(raw, currInfIdx+1, segs, 0) + reveal LeftSeg(raw, currInfIdx + 1, segs, 0) reveal RightSeg(raw, currInfIdx, segs, 0) } @@ -738,15 +732,15 @@ requires 0 <= currHfIdx && currHfIdx < segLen requires 0 <= currInfIdx && currInfIdx < 3 preserves acc(sl.Bytes(raw, 0, len(raw)), R56) preserves len(CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, 0).Future) > 0 -ensures CurrSeg(raw, offset, currInfIdx, currHfIdx+1, segLen, 0) == +ensures CurrSeg(raw, offset, currInfIdx, currHfIdx + 1, segLen, 0) == absIncPathSeg(CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, 0)) decreases func IncCurrSeg(raw []byte, offset int, currInfIdx int, currHfIdx int, segLen int) { currseg := reveal CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, 0) - incseg := reveal CurrSeg(raw, offset, currInfIdx, currHfIdx+1, segLen, 0) + incseg := reveal CurrSeg(raw, offset, currInfIdx, currHfIdx + 1, segLen, 0) hf := hopFields(raw, offset, 0, segLen) - hfPast := hf[:currHfIdx+1] - assert hfPast[:len(hfPast)-1] == hf[:currHfIdx] + hfPast := hf[:currHfIdx + 1] + assert hfPast[:len(hfPast) - 1] == hf[:currHfIdx] assert currseg.AInfo == incseg.AInfo assert currseg.UInfo == incseg.UInfo assert currseg.ConsDir == incseg.ConsDir @@ -762,22 +756,22 @@ requires segs.Valid() requires 0 < segs.Seg2Len requires PktLen(segs, 0) <= len(raw) requires 1 <= currInfIdx && currInfIdx < 3 -requires 1 == currInfIdx ==> currHfIdx+1 == segs.Seg1Len -requires 2 == currInfIdx ==> 0 < segs.Seg3Len && currHfIdx+1 == segs.Seg1Len + segs.Seg2Len +requires 1 == currInfIdx ==> currHfIdx + 1 == segs.Seg1Len +requires 2 == currInfIdx ==> 0 < segs.Seg3Len && currHfIdx + 1 == segs.Seg1Len + segs.Seg2Len requires PktLen(segs, 0) <= len(raw) preserves acc(sl.Bytes(raw, 0, len(raw)), R56) preserves LeftSeg(raw, currInfIdx, segs, 0) != none[io.IO_seg3] ensures - let prevSegLen := segs.LengthOfPrevSeg(currHfIdx+1) in - let segLen := segs.LengthOfCurrSeg(currHfIdx+1) in + let prevSegLen := segs.LengthOfPrevSeg(currHfIdx + 1) in + let segLen := segs.LengthOfCurrSeg(currHfIdx + 1) in let numInf := segs.NumInfoFields() in let offset := HopFieldOffset(numInf, prevSegLen, 0) in - CurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen+1, segLen, 0) == + CurrSeg(raw, offset, currInfIdx, currHfIdx - prevSegLen + 1, segLen, 0) == get(LeftSeg(raw, currInfIdx, segs, 0)) decreases func XoverCurrSeg(raw []byte, currInfIdx int, currHfIdx int, segs io.SegLens) { - prevSegLen := segs.LengthOfPrevSeg(currHfIdx+1) - segLen := segs.LengthOfCurrSeg(currHfIdx+1) + prevSegLen := segs.LengthOfPrevSeg(currHfIdx + 1) + segLen := segs.LengthOfCurrSeg(currHfIdx + 1) numInf := segs.NumInfoFields() offset := HopFieldOffset(numInf, prevSegLen, 0) currseg := reveal CurrSeg(raw, offset, currInfIdx, 0, segLen, 0) @@ -806,11 +800,11 @@ requires PktLen(segs, 0) <= len(raw) requires -1 <= currInfIdx && currInfIdx < 1 requires 0 == currInfIdx ==> 0 < segs.Seg3Len preserves acc(sl.Bytes(raw, 0, len(raw)), R56) -ensures MidSeg(raw, currInfIdx+4, segs, 0) == +ensures MidSeg(raw, currInfIdx + 4, segs, 0) == RightSeg(raw, currInfIdx, segs, 0) decreases func XoverMidSeg(raw []byte, currInfIdx int, segs io.SegLens) { - midseg := reveal MidSeg(raw, currInfIdx+4, segs, 0) + midseg := reveal MidSeg(raw, currInfIdx + 4, segs, 0) rightseg := reveal RightSeg(raw, currInfIdx, segs, 0) assert midseg == rightseg } @@ -820,8 +814,8 @@ requires segs.Valid() requires 0 < segs.Seg2Len requires PktLen(segs, 0) <= len(raw) requires 0 <= currInfIdx && currInfIdx < 2 -requires 0 == currInfIdx ==> currHfIdx+1 == segs.Seg1Len -requires 1 == currInfIdx ==> 0 < segs.Seg3Len && currHfIdx+1 == segs.Seg1Len + segs.Seg2Len +requires 0 == currInfIdx ==> currHfIdx + 1 == segs.Seg1Len +requires 1 == currInfIdx ==> 0 < segs.Seg3Len && currHfIdx + 1 == segs.Seg1Len + segs.Seg2Len requires PktLen(segs, 0) <= len(raw) preserves acc(sl.Bytes(raw, 0, len(raw)), R56) preserves RightSeg(raw, currInfIdx, segs, 0) != none[io.IO_seg3] @@ -830,7 +824,7 @@ ensures let segLen := segs.LengthOfCurrSeg(currHfIdx) in let numInf := segs.NumInfoFields() in let offset := HopFieldOffset(numInf, prevSegLen, 0) in - let currseg := CurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen, segLen, 0) in + let currseg := CurrSeg(raw, offset, currInfIdx, currHfIdx - prevSegLen, segLen, 0) in len(currseg.Future) > 0 && get(RightSeg(raw, currInfIdx, segs, 0)) == absIncPathSeg(currseg) decreases @@ -850,38 +844,91 @@ func XoverRightSeg(raw []byte, currInfIdx int, currHfIdx int, segs io.SegLens) { } ghost -opaque requires 0 <= offset -requires 0 <= currHFIdx && currHFIdx <= segLen -requires len(hops) == segLen - currHFIdx +requires 0 <= currHfIdx && currHfIdx <= end +requires end <= segLen requires offset + path.HopLen * segLen <= len(raw) -requires acc(sl.Bytes(raw, 0, len(raw)), R56) +preserves acc(sl.Bytes(raw, 0, len(raw)), R54) +ensures hopFields(raw, offset, currHfIdx, segLen)[:end - currHfIdx] == + hopFields(raw, offset, currHfIdx, end) decreases -pure func hopFieldsBytePositions(raw []byte, offset int, currHFIdx int, segLen int, hops seq[io.IO_HF]) bool { - return forall i int :: { hops[i] } 0 <= i && i < len(hops) ==> - hops[i] == path.BytesToIO_HF(raw, 0, offset + path.HopLen * (currHFIdx + i), len(raw)) +func HopsFromSuffixOfRawMatchSuffixOfHops(raw []byte, offset int, currHfIdx int, segLen int, end int) { + hopsFromSuffixOfRawMatchSuffixOfHops(raw, offset, currHfIdx, segLen, end, R54) } ghost requires R55 < p requires 0 <= offset -requires 0 <= currHFIdx && currHFIdx <= segLen +requires 0 <= currHfIdx && currHfIdx <= end +requires end <= segLen requires offset + path.HopLen * segLen <= len(raw) preserves acc(sl.Bytes(raw, 0, len(raw)), p) -ensures hopFieldsBytePositions(raw, offset, currHFIdx, segLen, hopFields(raw, offset, currHFIdx, segLen)) -decreases segLen - currHFIdx -func hopFieldsBytePositionsLemma( - raw []byte, - offset int, - currHFIdx int, - segLen int, - p perm) { - newP := (p + R55)/2 - hopfields := hopFields(raw, offset, currHFIdx, segLen) - if (currHFIdx != segLen) { - hopFieldsBytePositionsLemma(raw, offset, currHFIdx + 1, segLen, newP) - hopfieldsInc := hopFields(raw, offset, currHFIdx + 1, segLen) - assert reveal hopFieldsBytePositions(raw, offset, currHFIdx + 1, segLen, hopfieldsInc) +ensures hopFields(raw, offset, currHfIdx, segLen)[:end - currHfIdx] == + hopFields(raw, offset, currHfIdx, end) +decreases end - currHfIdx +func hopsFromSuffixOfRawMatchSuffixOfHops(raw []byte, offset int, currHfIdx int, segLen int, end int, p perm) { + if (currHfIdx != end) { + newP := (p + R55)/2 + hopsFromSuffixOfRawMatchSuffixOfHops(raw, offset, currHfIdx + 1, segLen, end, newP) + } +} + +ghost +requires 0 <= offset +requires 0 <= start +requires 0 <= currHfIdx && currHfIdx <= segLen - start +requires offset + path.HopLen * segLen <= len(raw) +preserves acc(sl.Bytes(raw, 0, len(raw)), R54) +ensures hopFields(raw, offset, currHfIdx, segLen)[start:] == + hopFields(raw, offset, currHfIdx + start, segLen) +decreases +func HopsFromPrefixOfRawMatchPrefixOfHops(raw []byte, offset int, currHfIdx int, segLen int, start int) { + hopsFromPrefixOfRawMatchPrefixOfHops(raw, offset, currHfIdx, segLen, start, R54) +} + +ghost +requires R55 < p +requires 0 <= offset +requires 0 <= start +requires 0 <= currHfIdx && currHfIdx <= segLen - start +requires offset + path.HopLen * segLen <= len(raw) +preserves acc(sl.Bytes(raw, 0, len(raw)), p) +ensures hopFields(raw, offset, currHfIdx, segLen)[start:] == + hopFields(raw, offset, currHfIdx + start, segLen) +decreases start +func hopsFromPrefixOfRawMatchPrefixOfHops(raw []byte, offset int, currHfIdx int, segLen int, start int, p perm) { + if (start != 0) { + newP := (p + R55)/2 + hopsFromPrefixOfRawMatchPrefixOfHops(raw, offset, currHfIdx, segLen, start - 1, newP) + } +} + +ghost +requires 0 <= offset +requires 0 <= start && start <= currHfIdx +requires 0 <= currHfIdx && currHfIdx <= segLen +requires offset + path.HopLen * segLen <= len(raw) +preserves acc(sl.Bytes(raw, 0, len(raw)), R54) +ensures hopFields(raw, offset, currHfIdx, segLen) == + hopFields(raw, offset + start * path.HopLen, currHfIdx - start, segLen - start) +decreases +func AlignHopsOfRawWithOffsetAndIndex(raw []byte, offset int, currHfIdx int, segLen int, start int) { + alignHopsOfRawWithOffsetAndIndex(raw, offset, currHfIdx, segLen, start, R54) +} + +ghost +requires R55 < p +requires 0 <= offset +requires 0 <= start && start <= currHfIdx +requires 0 <= currHfIdx && currHfIdx <= segLen +requires offset + path.HopLen * segLen <= len(raw) +preserves acc(sl.Bytes(raw, 0, len(raw)), p) +ensures hopFields(raw, offset, currHfIdx, segLen) == + hopFields(raw, offset + start * path.HopLen, currHfIdx - start, segLen - start) +decreases segLen - currHfIdx +func alignHopsOfRawWithOffsetAndIndex(raw []byte, offset int, currHfIdx int, segLen int, start int, p perm) { + if (currHfIdx != segLen) { + newP := (p + R55)/2 + alignHopsOfRawWithOffsetAndIndex(raw, offset, currHfIdx + 1, segLen, start, newP) } - assert reveal hopFieldsBytePositions(raw, offset, currHFIdx, segLen, hopfields) } \ No newline at end of file