diff --git a/dv/dv/advert_data.go b/dv/dv/advert_data.go index ee69f92f..a7e90c39 100644 --- a/dv/dv/advert_data.go +++ b/dv/dv/advert_data.go @@ -34,16 +34,16 @@ func (dv *Router) advertGenerateNew() { go dv.advertSyncSendInterest() } -func (dv *Router) advertDataFetch(nodeId enc.Name, seqNo uint64) { +func (dv *Router) advertDataFetch(nName enc.Name, seqNo uint64) { // debounce; wait before fetching, then check if this is still the latest // sequence number known for this neighbor time.Sleep(10 * time.Millisecond) - if ns := dv.neighbors.Get(nodeId); ns == nil || ns.AdvertSeq != seqNo { + if ns := dv.neighbors.Get(nName); ns == nil || ns.AdvertSeq != seqNo { return } // Fetch the advertisement - advName := enc.LOCALHOP.Append(nodeId.Append( + advName := enc.LOCALHOP.Append(nName.Append( enc.NewStringComponent(enc.TypeKeywordNameComponent, "DV"), enc.NewStringComponent(enc.TypeKeywordNameComponent, "ADV"), enc.NewVersionComponent(seqNo), @@ -59,12 +59,12 @@ func (dv *Router) advertDataFetch(nodeId enc.Name, seqNo uint64) { if fetchErr != nil { log.Warnf("advert-data: failed to fetch advertisement %s: %+v", state.Name(), fetchErr) time.Sleep(1 * time.Second) // wait on error - dv.advertDataFetch(nodeId, seqNo) + dv.advertDataFetch(nName, seqNo) return } // Process the advertisement - dv.advertDataHandler(nodeId, seqNo, state.Content()) + dv.advertDataHandler(nName, seqNo, state.Content()) }() return true @@ -72,19 +72,19 @@ func (dv *Router) advertDataFetch(nodeId enc.Name, seqNo uint64) { } // Received advertisement Data -func (dv *Router) advertDataHandler(nodeId enc.Name, seqNo uint64, data []byte) { +func (dv *Router) advertDataHandler(nName enc.Name, seqNo uint64, data []byte) { // Lock DV state dv.mutex.Lock() defer dv.mutex.Unlock() // Check if this is the latest advertisement - ns := dv.neighbors.Get(nodeId) + ns := dv.neighbors.Get(nName) if ns == nil { - log.Warnf("advert-handler: unknown advertisement %s", nodeId) + log.Warnf("advert-handler: unknown advertisement %s", nName) return } if ns.AdvertSeq != seqNo { - log.Debugf("advert-handler: old advertisement for %s (%d != %d)", nodeId, ns.AdvertSeq, seqNo) + log.Debugf("advert-handler: old advertisement for %s (%d != %d)", nName, ns.AdvertSeq, seqNo) return } diff --git a/dv/dv/advert_sync.go b/dv/dv/advert_sync.go index ee98b491..11a06664 100644 --- a/dv/dv/advert_sync.go +++ b/dv/dv/advert_sync.go @@ -43,8 +43,8 @@ func (dv *Router) advertSyncSendInterestImpl(prefix enc.Name) (err error) { sv := &svs_2024.StateVectorAppParam{ StateVector: &svs_2024.StateVector{ Entries: []*svs_2024.StateVectorEntry{{ - NodeId: dv.config.RouterName(), - SeqNo: dv.advertSyncSeq, + Name: dv.config.RouterName(), + SeqNo: dv.advertSyncSeq, }}, }, } @@ -102,15 +102,15 @@ func (dv *Router) advertSyncOnInterest(args ndn.InterestHandlerArgs, active bool // There should only be one entry in the StateVector, but check all anyway for _, entry := range params.StateVector.Entries { - // Parse name from NodeId - nodeId := entry.NodeId - if nodeId == nil { - log.Warnf("advertSyncOnInterest: failed to parse NodeId: %+v", err) + // Parse name from entry + nName := entry.Name + if nName == nil { + log.Warnf("advertSyncOnInterest: failed to parse neighbor name: %+v", err) continue } // Check if the entry is newer than what we know - ns := dv.neighbors.Get(nodeId) + ns := dv.neighbors.Get(nName) if ns != nil { if ns.AdvertSeq >= entry.SeqNo { // Nothing has changed, skip @@ -121,13 +121,13 @@ func (dv *Router) advertSyncOnInterest(args ndn.InterestHandlerArgs, active bool // Create new neighbor entry cause none found // This is the ONLY place where neighbors are created // In all other places, quit if not found - ns = dv.neighbors.Add(nodeId) + ns = dv.neighbors.Add(nName) } markRecvPing(ns) ns.AdvertSeq = entry.SeqNo - go dv.advertDataFetch(nodeId, entry.SeqNo) + go dv.advertDataFetch(nName, entry.SeqNo) } // Update FIB if needed diff --git a/dv/dv/prefix_sync.go b/dv/dv/prefix_sync.go index aed6eb95..e1521ed1 100644 --- a/dv/dv/prefix_sync.go +++ b/dv/dv/prefix_sync.go @@ -26,33 +26,33 @@ func (dv *Router) prefixDataFetchAll() { func (dv *Router) onPfxSyncUpdate(ssu ndn_sync.SvSyncUpdate) { // Update the prefix table dv.mutex.Lock() - dv.pfx.GetRouter(ssu.NodeId).Latest = ssu.High + dv.pfx.GetRouter(ssu.Name).Latest = ssu.High dv.mutex.Unlock() // Start a fetching thread (if needed) - dv.prefixDataFetch(ssu.NodeId) + dv.prefixDataFetch(ssu.Name) } // Fetch prefix data -func (dv *Router) prefixDataFetch(nodeId enc.Name) { +func (dv *Router) prefixDataFetch(nName enc.Name) { dv.mutex.Lock() defer dv.mutex.Unlock() // Check if the RIB has this destination - if !dv.rib.Has(nodeId) { + if !dv.rib.Has(nName) { return } // At any given time, there is only one thread fetching // prefix data for a node. This thread recursively calls itself. - router := dv.pfx.GetRouter(nodeId) + router := dv.pfx.GetRouter(nName) if router == nil || router.Fetching || router.Known >= router.Latest { return } router.Fetching = true // Fetch the prefix data object - log.Debugf("prefix-table: fetching object for %s [%d => %d]", nodeId, router.Known, router.Latest) + log.Debugf("prefix-table: fetching object for %s [%d => %d]", nName, router.Known, router.Latest) name := router.GetNextDataName() dv.client.Consume(name, func(state *object.ConsumeState) bool { @@ -78,7 +78,7 @@ func (dv *Router) prefixDataFetch(nodeId enc.Name) { // Done fetching, restart if needed router.Fetching = false - go dv.prefixDataFetch(nodeId) + go dv.prefixDataFetch(nName) }() return true diff --git a/std/examples/low-level/svs/main.go b/std/examples/low-level/svs/main.go index 5a9a6ee7..5ac86b89 100644 --- a/std/examples/low-level/svs/main.go +++ b/std/examples/low-level/svs/main.go @@ -21,11 +21,11 @@ func main() { logger := log.WithField("module", "main") if len(os.Args) < 2 { - log.Fatalf("Usage: %s ", os.Args[0]) + log.Fatalf("Usage: %s ", os.Args[0]) } // Parse command line arguments - nodeId, err := enc.NameFromStr(os.Args[1]) + name, err := enc.NameFromStr(os.Args[1]) if err != nil { log.Fatalf("Invalid node ID: %s", os.Args[1]) } @@ -63,7 +63,7 @@ func main() { ticker := time.NewTicker(3 * time.Second) for range ticker.C { - new := svsync.IncrSeqNo(nodeId) + new := svsync.IncrSeqNo(name) logger.Infof("Published new sequence number: %d", new) } } diff --git a/std/examples/schema-test/chat/main.go b/std/examples/schema-test/chat/main.go index c765cbe9..b11acd51 100644 --- a/std/examples/schema-test/chat/main.go +++ b/std/examples/schema-test/chat/main.go @@ -41,7 +41,7 @@ const SchemaJson = `{ "ChannelSize": 1000, "SyncInterval": 15000, "SuppressionInterval": 100, - "SelfNodeId": "$nodeId", + "SelfName": "$nodeId", "BaseMatching": {} } } @@ -238,15 +238,15 @@ func main() { select { case missData := <-ch: for i := missData.StartSeq; i < missData.EndSeq; i++ { - dataName := syncNode.Call("GetDataName", missData.NodeId, i).(enc.Name) + dataName := syncNode.Call("GetDataName", missData.Name, i).(enc.Name) mLeafNode := tree.Match(dataName) result := <-mLeafNode.Call("NeedChan").(chan schema.NeedResult) if result.Status != ndn.InterestResultData { - fmt.Printf("Data fetching failed for (%s, %d): %+v\n", missData.NodeId.String(), i, result.Status) + fmt.Printf("Data fetching failed for (%s, %d): %+v\n", missData.Name, i, result.Status) } else { dataLock.Lock() - fmt.Printf("Fetched (%s, %d): %s\n", missData.NodeId.String(), i, string(result.Content.Join())) - msg := fmt.Sprintf("%s[%d]: %s", missData.NodeId.String(), i, string(result.Content.Join())) + fmt.Printf("Fetched (%s, %d): %s\n", missData.Name.String(), i, string(result.Content.Join())) + msg := fmt.Sprintf("%s[%d]: %s", missData.Name.String(), i, string(result.Content.Join())) msgList = append(msgList, msg) if wsConn != nil { wsConn.WriteMessage(websocket.TextMessage, []byte(msg)) diff --git a/std/examples/schema-test/chat/schema.json b/std/examples/schema-test/chat/schema.json index d9be7442..e3c16c5b 100644 --- a/std/examples/schema-test/chat/schema.json +++ b/std/examples/schema-test/chat/schema.json @@ -6,7 +6,7 @@ "ChannelSize": 1000, "SyncInterval": 15000, "SuppressionInterval": 100, - "SelfNodeId": "$nodeId", + "SelfName": "$nodeId", "BaseMatching": {} } } diff --git a/std/examples/schema-test/sync/main.go b/std/examples/schema-test/sync/main.go index 3105963b..46231228 100644 --- a/std/examples/schema-test/sync/main.go +++ b/std/examples/schema-test/sync/main.go @@ -28,7 +28,7 @@ const SchemaJson = `{ "ChannelSize": 1000, "SyncInterval": 2000, "SuppressionInterval": 100, - "SelfNodeId": "$nodeId", + "SelfName": "$nodeId", "BaseMatching": {} } } @@ -132,13 +132,13 @@ func main() { select { case missData := <-ch: for i := missData.StartSeq; i < missData.EndSeq; i++ { - dataName := mNode.Call("GetDataName", missData.NodeId, i).(enc.Name) + dataName := mNode.Call("GetDataName", missData.Name, i).(enc.Name) mLeafNode := tree.Match(dataName) result := <-mLeafNode.Call("NeedChan").(chan schema.NeedResult) if result.Status != ndn.InterestResultData { - fmt.Printf("Data fetching failed for (%s, %d): %+v\n", missData.NodeId.String(), i, result.Status) + fmt.Printf("Data fetching failed for (%s, %d): %+v\n", missData.Name.String(), i, result.Status) } else { - fmt.Printf("Fetched (%s, %d): %s", missData.NodeId.String(), i, string(result.Content.Join())) + fmt.Printf("Fetched (%s, %d): %s", missData.Name.String(), i, string(result.Content.Join())) } } case <-ctx.Done(): diff --git a/std/examples/schema-test/sync/schema.json b/std/examples/schema-test/sync/schema.json index 24f02e3e..364ebf96 100644 --- a/std/examples/schema-test/sync/schema.json +++ b/std/examples/schema-test/sync/schema.json @@ -6,7 +6,7 @@ "ChannelSize": 1000, "SyncInterval": 2000, "SuppressionInterval": 100, - "SelfNodeId": "$nodeId", + "SelfName": "$nodeId", "BaseMatching": {} } } diff --git a/std/ndn/svs_2024/definitions.go b/std/ndn/svs_2024/definitions.go index b0eef66a..11692fd2 100644 --- a/std/ndn/svs_2024/definitions.go +++ b/std/ndn/svs_2024/definitions.go @@ -17,7 +17,7 @@ type StateVector struct { type StateVectorEntry struct { //+field:name - NodeId enc.Name `tlv:"0x07"` + Name enc.Name `tlv:"0x07"` //+field:natural SeqNo uint64 `tlv:"0xcc"` } diff --git a/std/ndn/svs_2024/zz_generated.go b/std/ndn/svs_2024/zz_generated.go index 898d9b38..8fe04768 100644 --- a/std/ndn/svs_2024/zz_generated.go +++ b/std/ndn/svs_2024/zz_generated.go @@ -403,24 +403,24 @@ func ParseStateVector(reader enc.ParseReader, ignoreCritical bool) (*StateVector type StateVectorEntryEncoder struct { length uint - NodeId_length uint + Name_length uint } type StateVectorEntryParsingContext struct { } func (encoder *StateVectorEntryEncoder) Init(value *StateVectorEntry) { - if value.NodeId != nil { - encoder.NodeId_length = 0 - for _, c := range value.NodeId { - encoder.NodeId_length += uint(c.EncodingLength()) + if value.Name != nil { + encoder.Name_length = 0 + for _, c := range value.Name { + encoder.Name_length += uint(c.EncodingLength()) } } l := uint(0) - if value.NodeId != nil { + if value.Name != nil { l += 1 - switch x := encoder.NodeId_length; { + switch x := encoder.Name_length; { case x <= 0xfc: l += 1 case x <= 0xffff: @@ -430,7 +430,7 @@ func (encoder *StateVectorEntryEncoder) Init(value *StateVectorEntry) { default: l += 9 } - l += encoder.NodeId_length + l += encoder.Name_length } l += 1 switch x := value.SeqNo; { @@ -455,10 +455,10 @@ func (encoder *StateVectorEntryEncoder) EncodeInto(value *StateVectorEntry, buf pos := uint(0) - if value.NodeId != nil { + if value.Name != nil { buf[pos] = byte(7) pos += 1 - switch x := encoder.NodeId_length; { + switch x := encoder.Name_length; { case x <= 0xfc: buf[pos] = byte(x) pos += 1 @@ -475,7 +475,7 @@ func (encoder *StateVectorEntryEncoder) EncodeInto(value *StateVectorEntry, buf binary.BigEndian.PutUint64(buf[pos+1:], uint64(x)) pos += 9 } - for _, c := range value.NodeId { + for _, c := range value.Name { pos += uint(c.EncodeInto(buf[pos:])) } } @@ -516,7 +516,7 @@ func (context *StateVectorEntryParsingContext) Parse(reader enc.ParseReader, ign return nil, enc.ErrBufferOverflow } - var handled_NodeId bool = false + var handled_Name bool = false var handled_SeqNo bool = false progress := -1 @@ -547,19 +547,19 @@ func (context *StateVectorEntryParsingContext) Parse(reader enc.ParseReader, ign case 7: if true { handled = true - handled_NodeId = true - value.NodeId = make(enc.Name, l/2+1) + handled_Name = true + value.Name = make(enc.Name, l/2+1) startName := reader.Pos() endName := startName + int(l) - for j := range value.NodeId { + for j := range value.Name { if reader.Pos() >= endName { - value.NodeId = value.NodeId[:j] + value.Name = value.Name[:j] break } var err1, err3 error - value.NodeId[j].Typ, err1 = enc.ReadTLNum(reader) + value.Name[j].Typ, err1 = enc.ReadTLNum(reader) l, err2 := enc.ReadTLNum(reader) - value.NodeId[j].Val, err3 = reader.ReadBuf(int(l)) + value.Name[j].Val, err3 = reader.ReadBuf(int(l)) if err1 != nil || err2 != nil || err3 != nil { err = io.ErrUnexpectedEOF break @@ -606,8 +606,8 @@ func (context *StateVectorEntryParsingContext) Parse(reader enc.ParseReader, ign startPos = reader.Pos() err = nil - if !handled_NodeId && err == nil { - value.NodeId = nil + if !handled_Name && err == nil { + value.Name = nil } if !handled_SeqNo && err == nil { err = enc.ErrSkipRequired{Name: "SeqNo", TypeNum: 204} diff --git a/std/schema/svs/sync.go b/std/schema/svs/sync.go index c9c2fe17..62d5f050 100644 --- a/std/schema/svs/sync.go +++ b/std/schema/svs/sync.go @@ -18,7 +18,7 @@ import ( type SyncState int type MissingData struct { - NodeId enc.Name + Name enc.Name StartSeq uint64 EndSeq uint64 } @@ -43,7 +43,7 @@ type SvsNode struct { SuppressionInterval time.Duration BaseMatching enc.Matching ChannelSize uint64 - SelfNodeId enc.Name + SelfName enc.Name dataLock sync.Mutex timer ndn.Timer @@ -98,10 +98,10 @@ func CreateSvsNode(node *schema.Node) schema.NodeImpl { return ret } -func findSvsEntry(v *stlv.StateVector, nodeId enc.Name) int { +func findSvsEntry(v *stlv.StateVector, name enc.Name) int { // This is less efficient but enough for a demo. for i, n := range v.Entries { - if nodeId.Equal(n.NodeId) { + if name.Equal(n.Name) { return i } } @@ -124,34 +124,34 @@ func (n *SvsNode) onSyncInt(event *schema.Event) any { // needFetch := false needNotif := false for _, cur := range remoteSv.Entries { - li := findSvsEntry(&n.localSv, cur.NodeId) + li := findSvsEntry(&n.localSv, cur.Name) if li == -1 { n.localSv.Entries = append(n.localSv.Entries, &stlv.StateVectorEntry{ - NodeId: cur.NodeId, - SeqNo: cur.SeqNo, + Name: cur.Name, + SeqNo: cur.SeqNo, }) // needFetch = true n.missChan <- MissingData{ - NodeId: cur.NodeId, + Name: cur.Name, StartSeq: 1, EndSeq: cur.SeqNo + 1, } } else if n.localSv.Entries[li].SeqNo < cur.SeqNo { - log.Debugf("Missing data for: [%d]: %d < %d", cur.NodeId, n.localSv.Entries[li].SeqNo, cur.SeqNo) + log.Debugf("Missing data for: [%d]: %d < %d", cur.Name, n.localSv.Entries[li].SeqNo, cur.SeqNo) n.missChan <- MissingData{ - NodeId: cur.NodeId, + Name: cur.Name, StartSeq: n.localSv.Entries[li].SeqNo + 1, EndSeq: cur.SeqNo + 1, } n.localSv.Entries[li].SeqNo = cur.SeqNo // needFetch = true } else if n.localSv.Entries[li].SeqNo > cur.SeqNo { - log.Debugf("Outdated remote on: [%d]: %d < %d", cur.NodeId, cur.SeqNo, n.localSv.Entries[li].SeqNo) + log.Debugf("Outdated remote on: [%d]: %d < %d", cur.Name, cur.SeqNo, n.localSv.Entries[li].SeqNo) needNotif = true } } for _, cur := range n.localSv.Entries { - li := findSvsEntry(remoteSv, cur.NodeId) + li := findSvsEntry(remoteSv, cur.Name) if li == -1 { needNotif = true } @@ -213,11 +213,11 @@ func (n *SvsNode) MySequence() uint64 { func (n *SvsNode) aggregate(remoteSv *stlv.StateVector) { for _, cur := range remoteSv.Entries { - li := findSvsEntry(&n.aggSv, cur.NodeId) + li := findSvsEntry(&n.aggSv, cur.Name) if li == -1 { n.aggSv.Entries = append(n.aggSv.Entries, &stlv.StateVectorEntry{ - NodeId: cur.NodeId, - SeqNo: cur.SeqNo, + Name: cur.Name, + SeqNo: cur.SeqNo, }) } else { n.aggSv.Entries[li].SeqNo = utils.Max(n.aggSv.Entries[li].SeqNo, cur.SeqNo) @@ -234,7 +234,7 @@ func (n *SvsNode) onSyncTimer() { n.state = SyncSteady notNecessary = true for _, cur := range n.localSv.Entries { - li := findSvsEntry(&n.aggSv, cur.NodeId) + li := findSvsEntry(&n.aggSv, cur.Name) if li == -1 || n.aggSv.Entries[li].SeqNo < cur.SeqNo { notNecessary = false break @@ -276,7 +276,7 @@ func (n *SvsNode) NewData(mNode schema.MatchedNode, content enc.Wire) enc.Wire { mLeafNode := mNode.Refine(newDataName) ret := mLeafNode.Call("Provide", content).(enc.Wire) if len(ret) > 0 { - li := findSvsEntry(&n.localSv, n.SelfNodeId) + li := findSvsEntry(&n.localSv, n.SelfName) if li >= 0 { n.localSv.Entries[li].SeqNo = n.selfSeq } @@ -291,7 +291,7 @@ func (n *SvsNode) NewData(mNode schema.MatchedNode, content enc.Wire) enc.Wire { } func (n *SvsNode) onAttach(event *schema.Event) any { - if n.ChannelSize == 0 || len(n.SelfNodeId) == 0 || + if n.ChannelSize == 0 || len(n.SelfName) == 0 || n.BaseMatching == nil || n.SyncInterval <= 0 || n.SuppressionInterval <= 0 { panic(errors.New("SvsNode: not configured before Init")) } @@ -302,7 +302,7 @@ func (n *SvsNode) onAttach(event *schema.Event) any { defer n.dataLock.Unlock() n.ownPrefix = event.TargetNode.Apply(n.BaseMatching).Name - n.ownPrefix = append(n.ownPrefix, n.SelfNodeId...) + n.ownPrefix = append(n.ownPrefix, n.SelfName...) // OnMissingData callback @@ -322,8 +322,8 @@ func (n *SvsNode) onAttach(event *schema.Event) any { // initialize localSv // TODO: this demo does not consider recovery from off-line. Should be done via ENV and storage policy. n.localSv.Entries = append(n.localSv.Entries, &stlv.StateVectorEntry{ - NodeId: n.SelfNodeId, - SeqNo: 0, + Name: n.SelfName, + SeqNo: 0, }) n.selfSeq = 0 return nil @@ -343,10 +343,10 @@ func (n *SvsNode) callbackRoutine() { panic("TODO: TO BE DONE") } -func (n *SvsNode) GetDataName(mNode schema.MatchedNode, nodeId []byte, seq uint64) enc.Name { +func (n *SvsNode) GetDataName(mNode schema.MatchedNode, name []byte, seq uint64) enc.Name { ret := make(enc.Name, len(mNode.Name)+2) copy(ret, mNode.Name) - ret[len(mNode.Name)] = enc.Component{Typ: enc.TypeGenericNameComponent, Val: nodeId} + ret[len(mNode.Name)] = enc.Component{Typ: enc.TypeGenericNameComponent, Val: name} ret[len(mNode.Name)+1] = enc.NewSequenceNumComponent(seq) return ret } @@ -372,7 +372,7 @@ func init() { "SuppressionInterval": schema.TimePropertyDesc("SuppressionInterval"), "BaseMatching": schema.MatchingPropertyDesc("BaseMatching"), "ChannelSize": schema.DefaultPropertyDesc("ChannelSize"), - "SelfNodeId": schema.DefaultPropertyDesc("SelfNodeId"), + "SelfName": schema.DefaultPropertyDesc("SelfName"), "ContentType": schema.SubNodePropertyDesc("/<8=nodeId>/", "ContentType"), "Lifetime": schema.SubNodePropertyDesc("/<8=nodeId>/", "Lifetime"), "Freshness": schema.SubNodePropertyDesc("/<8=nodeId>/", "Freshness"), diff --git a/std/sync/svs.go b/std/sync/svs.go index e2a0aea7..81713348 100644 --- a/std/sync/svs.go +++ b/std/sync/svs.go @@ -40,9 +40,9 @@ type SvSync struct { } type SvSyncUpdate struct { - NodeId enc.Name - High uint64 - Low uint64 + Name enc.Name + High uint64 + Low uint64 } func NewSvSync( @@ -112,11 +112,11 @@ func (s *SvSync) Stop() { close(s.stop) } -func (s *SvSync) SetSeqNo(nodeId enc.Name, seqNo uint64) error { +func (s *SvSync) SetSeqNo(name enc.Name, seqNo uint64) error { s.mutex.Lock() defer s.mutex.Unlock() - hash := s.hashName(nodeId) + hash := s.hashName(name) prev := s.state[hash] if seqNo <= prev { @@ -131,18 +131,18 @@ func (s *SvSync) SetSeqNo(nodeId enc.Name, seqNo uint64) error { return nil } -func (s *SvSync) GetSeqNo(nodeId enc.Name) uint64 { +func (s *SvSync) GetSeqNo(name enc.Name) uint64 { s.mutex.Lock() defer s.mutex.Unlock() - hash := s.hashName(nodeId) + hash := s.hashName(name) return s.state[hash] } -func (s *SvSync) IncrSeqNo(nodeId enc.Name) uint64 { +func (s *SvSync) IncrSeqNo(name enc.Name) uint64 { s.mutex.Lock() defer s.mutex.Unlock() - hash := s.hashName(nodeId) + hash := s.hashName(name) val := s.state[hash] + 1 s.state[hash] = val @@ -153,10 +153,10 @@ func (s *SvSync) IncrSeqNo(nodeId enc.Name) uint64 { return val } -func (s *SvSync) hashName(nodeId enc.Name) uint64 { - hash := nodeId.Hash() +func (s *SvSync) hashName(name enc.Name) uint64 { + hash := name.Hash() if _, ok := s.names[hash]; !ok { - s.names[hash] = nodeId.Clone() + s.names[hash] = name.Clone() } return hash } @@ -170,7 +170,7 @@ func (s *SvSync) onReceiveStateVector(sv *stlv.StateVector) { recvSet := make(map[uint64]bool) for _, entry := range sv.Entries { - hash := s.hashName(entry.NodeId) + hash := s.hashName(entry.Name) recvSet[hash] = true prev := s.state[hash] @@ -185,9 +185,9 @@ func (s *SvSync) onReceiveStateVector(sv *stlv.StateVector) { // Notify the application of the update s.onUpdate(SvSyncUpdate{ - NodeId: entry.NodeId, - High: entry.SeqNo, - Low: prev + 1, + Name: entry.Name, + High: entry.SeqNo, + Low: prev + 1, }) } else if entry.SeqNo < prev { isOutdated = true @@ -213,8 +213,8 @@ func (s *SvSync) onReceiveStateVector(sv *stlv.StateVector) { // The above checks each node in the incoming state vector, but // does not check if a node is missing from the incoming state vector. if !isOutdated { - for nodeId := range s.state { - if _, ok := recvSet[nodeId]; !ok { + for nameHash := range s.state { + if _, ok := recvSet[nameHash]; !ok { isOutdated = true canDrop = false break @@ -250,8 +250,8 @@ func (s *SvSync) timerExpired() { if s.suppress { // [Spec] If MergedStateVector is up-to-date; no inconsistency. send := false - for nodeId, seqNo := range s.state { - if seqNo > s.merge[nodeId] { + for nameHash, seqNo := range s.state { + if seqNo > s.merge[nameHash] { send = true break } @@ -336,16 +336,16 @@ func (s *SvSync) onSyncInterest(interest ndn.Interest) { // Call with mutex locked func (s *SvSync) encodeSv() enc.Wire { entries := make([]*stlv.StateVectorEntry, 0, len(s.state)) - for nodeId, seqNo := range s.state { + for nameHash, seqNo := range s.state { entries = append(entries, &stlv.StateVectorEntry{ - NodeId: s.names[nodeId], - SeqNo: seqNo, + Name: s.names[nameHash], + SeqNo: seqNo, }) } // Sort entries by in the NDN canonical order sort.Slice(entries, func(i, j int) bool { - return entries[i].NodeId.Compare(entries[j].NodeId) < 0 + return entries[i].Name.Compare(entries[j].Name) < 0 }) params := stlv.StateVectorAppParam{