yggdrasil-go/src/yggdrasil/search.go

209 lines
7.3 KiB
Go
Raw Normal View History

2017-12-29 07:16:20 +03:00
package yggdrasil
// This thing manages search packets
// The basic idea is as follows:
// We may know a NodeID (with a mask) and want to connect
// We begin a search by initializing a list of all nodes in our DHT, sorted by closest to the destination
// We then iteratively ping nodes from the search, marking each pinged node as visited
// We add any unvisited nodes from ping responses to the search, truncating to some maximum search size
// This stops when we either run out of nodes to ping (we hit a dead end where we can't make progress without going back), or we reach the destination
// A new search packet is sent immediately after receiving a response
// A new search packet is sent periodically, once per second, in case a packet was dropped (this slowly causes the search to become parallel if the search doesn't timeout but also doesn't finish within 1 second for whatever reason)
2017-12-29 07:16:20 +03:00
2018-06-13 01:50:08 +03:00
import (
2018-10-22 01:40:43 +03:00
"fmt"
2018-06-13 01:50:08 +03:00
"sort"
"time"
)
2017-12-29 07:16:20 +03:00
// This defines the maximum number of dhtInfo that we keep track of for nodes to query in an ongoing search.
const search_MAX_SEARCH_SIZE = 16
// This defines the time after which we send a new search packet.
// Search packets are sent automatically immediately after a response is received.
// So this allows for timeouts and for long searches to become increasingly parallel.
const search_RETRY_TIME = time.Second
// Information about an ongoing search.
// Includes the targed NodeID, the bitmask to match it to an IP, and the list of nodes to visit / already visited.
2017-12-29 07:16:20 +03:00
type searchInfo struct {
dest NodeID
mask NodeID
2018-06-02 07:34:21 +03:00
time time.Time
packet []byte
toVisit []*dhtInfo
visited map[NodeID]bool
2017-12-29 07:16:20 +03:00
}
// This stores a map of active searches.
2017-12-29 07:16:20 +03:00
type searches struct {
2018-01-05 01:37:51 +03:00
core *Core
searches map[NodeID]*searchInfo
2017-12-29 07:16:20 +03:00
}
// Intializes the searches struct.
2017-12-29 07:16:20 +03:00
func (s *searches) init(core *Core) {
2018-01-05 01:37:51 +03:00
s.core = core
s.searches = make(map[NodeID]*searchInfo)
2017-12-29 07:16:20 +03:00
}
// Creates a new search info, adds it to the searches struct, and returns a pointer to the info.
2017-12-29 07:16:20 +03:00
func (s *searches) createSearch(dest *NodeID, mask *NodeID) *searchInfo {
2018-01-05 01:37:51 +03:00
now := time.Now()
for dest, sinfo := range s.searches {
if now.Sub(sinfo.time) > time.Minute {
delete(s.searches, dest)
}
}
info := searchInfo{
dest: *dest,
mask: *mask,
2018-01-05 01:37:51 +03:00
time: now.Add(-time.Second),
}
s.searches[*dest] = &info
return &info
2017-12-29 07:16:20 +03:00
}
////////////////////////////////////////////////////////////////////////////////
// Checks if there's an ongoing search relaed to a dhtRes.
// If there is, it adds the response info to the search and triggers a new search step.
// If there's no ongoing search, or we if the dhtRes finished the search (it was from the target node), then don't do anything more.
2018-06-02 07:34:21 +03:00
func (s *searches) handleDHTRes(res *dhtRes) {
2018-06-03 00:30:05 +03:00
sinfo, isIn := s.searches[res.Dest]
2018-06-02 08:16:47 +03:00
if !isIn || s.checkDHTRes(sinfo, res) {
// Either we don't recognize this search, or we just finished it
2018-10-22 01:40:43 +03:00
if isIn {
fmt.Println("DEBUG: search finished, length:", len(sinfo.visited))
}
2018-06-02 07:34:21 +03:00
return
2018-06-02 08:16:47 +03:00
} else {
// Add to the search and continue
s.addToSearch(sinfo, res)
s.doSearchStep(sinfo)
2018-06-02 07:34:21 +03:00
}
}
// Adds the information from a dhtRes to an ongoing search.
// Info about a node that has already been visited is not re-added to the search.
// Duplicate information about nodes toVisit is deduplicated (the newest information is kept).
// The toVisit list is sorted in ascending order of keyspace distance from the destination.
2018-06-02 08:16:47 +03:00
func (s *searches) addToSearch(sinfo *searchInfo, res *dhtRes) {
// Add responses to toVisit if closer to dest than the res node
2018-06-03 00:30:05 +03:00
from := dhtInfo{key: res.Key, coords: res.Coords}
for _, info := range res.Infos {
if *info.getNodeID() == s.core.dht.nodeID || sinfo.visited[*info.getNodeID()] {
continue
}
2018-10-22 01:40:43 +03:00
if true || dht_ordered(from.getNodeID(), info.getNodeID(), &res.Dest) {
2018-06-02 07:34:21 +03:00
sinfo.toVisit = append(sinfo.toVisit, info)
}
}
// Deduplicate
vMap := make(map[NodeID]*dhtInfo)
for _, info := range sinfo.toVisit {
vMap[*info.getNodeID()] = info
}
sinfo.toVisit = sinfo.toVisit[:0]
for _, info := range vMap {
sinfo.toVisit = append(sinfo.toVisit, info)
}
// Sort
2018-06-02 07:34:21 +03:00
sort.SliceStable(sinfo.toVisit, func(i, j int) bool {
2018-10-22 01:40:43 +03:00
// Should return true if i is closer to the destination than j
// FIXME for some reason it works better backwards, why?!
//return dht_ordered(sinfo.toVisit[j].getNodeID(), sinfo.toVisit[i].getNodeID(), &res.Dest)
return dht_ordered(&res.Dest, sinfo.toVisit[i].getNodeID(), sinfo.toVisit[j].getNodeID())
2018-06-02 07:34:21 +03:00
})
// Truncate to some maximum size
if len(sinfo.toVisit) > search_MAX_SEARCH_SIZE {
sinfo.toVisit = sinfo.toVisit[:search_MAX_SEARCH_SIZE]
}
2018-06-02 07:34:21 +03:00
}
// If there are no nodes left toVisit, then this cleans up the search.
// Otherwise, it pops the closest node to the destination (in keyspace) off of the toVisit list and sends a dht ping.
2018-06-02 07:34:21 +03:00
func (s *searches) doSearchStep(sinfo *searchInfo) {
if len(sinfo.toVisit) == 0 {
// Dead end, do cleanup
delete(s.searches, sinfo.dest)
2018-10-22 01:40:43 +03:00
fmt.Println("DEBUG: search abandoned, length:", len(sinfo.visited))
2018-06-02 07:34:21 +03:00
return
} else {
// Send to the next search target
var next *dhtInfo
next, sinfo.toVisit = sinfo.toVisit[0], sinfo.toVisit[1:]
s.core.dht.ping(next, &sinfo.dest)
sinfo.visited[*next.getNodeID()] = true
2018-06-02 07:34:21 +03:00
}
}
// If we've recenty sent a ping for this search, do nothing.
// Otherwise, doSearchStep and schedule another continueSearch to happen after search_RETRY_TIME.
2018-06-02 07:34:21 +03:00
func (s *searches) continueSearch(sinfo *searchInfo) {
if time.Since(sinfo.time) < search_RETRY_TIME {
2018-06-02 07:34:21 +03:00
return
}
sinfo.time = time.Now()
s.doSearchStep(sinfo)
// In case the search dies, try to spawn another thread later
// Note that this will spawn multiple parallel searches as time passes
// Any that die aren't restarted, but a new one will start later
retryLater := func() {
newSearchInfo := s.searches[sinfo.dest]
if newSearchInfo != sinfo {
return
}
s.continueSearch(sinfo)
}
go func() {
time.Sleep(search_RETRY_TIME)
s.core.router.admin <- retryLater
}()
2018-06-02 07:34:21 +03:00
}
// Calls create search, and initializes the iterative search parts of the struct before returning it.
2018-06-02 07:34:21 +03:00
func (s *searches) newIterSearch(dest *NodeID, mask *NodeID) *searchInfo {
sinfo := s.createSearch(dest, mask)
sinfo.toVisit = s.core.dht.lookup(dest, true)
sinfo.visited = make(map[NodeID]bool)
2018-06-02 07:34:21 +03:00
return sinfo
}
// Checks if a dhtRes is good (called by handleDHTRes).
// If the response is from the target, get/create a session, trigger a session ping, and return true.
// Otherwise return false.
2018-06-02 08:16:47 +03:00
func (s *searches) checkDHTRes(info *searchInfo, res *dhtRes) bool {
2018-06-03 00:30:05 +03:00
them := getNodeID(&res.Key)
2018-06-02 07:34:21 +03:00
var destMasked NodeID
var themMasked NodeID
for idx := 0; idx < NodeIDLen; idx++ {
destMasked[idx] = info.dest[idx] & info.mask[idx]
themMasked[idx] = them[idx] & info.mask[idx]
}
if themMasked != destMasked {
return false
}
// They match, so create a session and send a sessionRequest
2018-06-03 00:30:05 +03:00
sinfo, isIn := s.core.sessions.getByTheirPerm(&res.Key)
2018-06-02 07:34:21 +03:00
if !isIn {
2018-06-03 00:30:05 +03:00
sinfo = s.core.sessions.createSession(&res.Key)
if sinfo == nil {
// nil if the DHT search finished but the session wasn't allowed
return true
}
2018-06-03 00:30:05 +03:00
_, isIn := s.core.sessions.getByTheirPerm(&res.Key)
2018-06-02 07:34:21 +03:00
if !isIn {
panic("This should never happen")
}
}
// FIXME (!) replay attacks could mess with coords? Give it a handle (tstamp)?
2018-06-03 00:30:05 +03:00
sinfo.coords = res.Coords
2018-06-02 07:34:21 +03:00
sinfo.packet = info.packet
s.core.sessions.ping(sinfo)
// Cleanup
2018-06-03 00:30:05 +03:00
delete(s.searches, res.Dest)
2018-06-02 07:34:21 +03:00
return true
}