keep peers separate from other nodes in dht

This commit is contained in:
Arceliar 2018-03-10 13:58:48 -06:00
parent e04ab7cfe6
commit 9ce0b7fbea
5 changed files with 72 additions and 34 deletions

View File

@ -290,7 +290,16 @@ func (a *admin) getData_getDHT() []admin_nodeInfo {
getDHT := func() { getDHT := func() {
for i := 0; i < a.core.dht.nBuckets(); i++ { for i := 0; i < a.core.dht.nBuckets(); i++ {
b := a.core.dht.getBucket(i) b := a.core.dht.getBucket(i)
for _, v := range b.infos { for _, v := range b.other {
addr := *address_addrForNodeID(v.getNodeID())
info := admin_nodeInfo{
{"IP", net.IP(addr[:]).String()},
{"coords", fmt.Sprint(v.coords)},
{"bucket", fmt.Sprint(i)},
}
infos = append(infos, info)
}
for _, v := range b.peers {
addr := *address_addrForNodeID(v.getNodeID()) addr := *address_addrForNodeID(v.getNodeID())
info := admin_nodeInfo{ info := admin_nodeInfo{
{"IP", net.IP(addr[:]).String()}, {"IP", net.IP(addr[:]).String()},

View File

@ -154,7 +154,8 @@ func (c *Core) DEBUG_getDHTSize() int {
total := 0 total := 0
for bidx := 0; bidx < c.dht.nBuckets(); bidx++ { for bidx := 0; bidx < c.dht.nBuckets(); bidx++ {
b := c.dht.getBucket(bidx) b := c.dht.getBucket(bidx)
total += len(b.infos) total += len(b.peers)
total += len(b.other)
} }
return total return total
} }

View File

@ -46,7 +46,8 @@ func (info *dhtInfo) getNodeID() *NodeID {
} }
type bucket struct { type bucket struct {
infos []*dhtInfo peers []*dhtInfo
other []*dhtInfo
} }
type dhtReq struct { type dhtReq struct {
@ -100,7 +101,7 @@ func (t *dht) handleReq(req *dhtReq) {
key: req.key, key: req.key,
coords: req.coords, coords: req.coords,
} }
t.insertIfNew(&info) // This seems DoSable (we just trust their coords...) t.insertIfNew(&info, false) // This seems DoSable (we just trust their coords...)
//if req.dest != t.nodeID { t.ping(&info, info.getNodeID()) } // Or spam... //if req.dest != t.nodeID { t.ping(&info, info.getNodeID()) } // Or spam...
} }
@ -125,13 +126,18 @@ func (t *dht) handleRes(res *dhtRes) {
return return
} }
b := t.getBucket(bidx) b := t.getBucket(bidx)
for _, oldinfo := range b.infos { for _, oldinfo := range b.peers {
if oldinfo.key == rinfo.key {
rinfo.send = oldinfo.send
}
}
for _, oldinfo := range b.other {
if oldinfo.key == rinfo.key { if oldinfo.key == rinfo.key {
rinfo.send = oldinfo.send rinfo.send = oldinfo.send
} }
} }
// Insert into table // Insert into table
t.insert(&rinfo) t.insert(&rinfo, false)
if res.dest == *rinfo.getNodeID() { if res.dest == *rinfo.getNodeID() {
return return
} // No infinite recursions } // No infinite recursions
@ -170,7 +176,8 @@ func (t *dht) lookup(nodeID *NodeID) []*dhtInfo {
var res []*dhtInfo var res []*dhtInfo
for bidx := 0; bidx < t.nBuckets(); bidx++ { for bidx := 0; bidx < t.nBuckets(); bidx++ {
b := t.getBucket(bidx) b := t.getBucket(bidx)
res = addInfos(res, b.infos) res = addInfos(res, b.peers)
res = addInfos(res, b.other)
} }
doSort := func(infos []*dhtInfo) { doSort := func(infos []*dhtInfo) {
less := func(i, j int) bool { less := func(i, j int) bool {
@ -195,7 +202,7 @@ func (t *dht) nBuckets() int {
return len(t.buckets_hidden) return len(t.buckets_hidden)
} }
func (t *dht) insertIfNew(info *dhtInfo) { func (t *dht) insertIfNew(info *dhtInfo, isPeer bool) {
//fmt.Println("DEBUG: dht insertIfNew:", info.getNodeID(), info.coords) //fmt.Println("DEBUG: dht insertIfNew:", info.getNodeID(), info.coords)
// Insert a peer if and only if the bucket doesn't already contain it // Insert a peer if and only if the bucket doesn't already contain it
nodeID := info.getNodeID() nodeID := info.getNodeID()
@ -210,11 +217,11 @@ func (t *dht) insertIfNew(info *dhtInfo) {
// (Is there another "natural" choice that bootstraps faster?) // (Is there another "natural" choice that bootstraps faster?)
info.send = time.Now() info.send = time.Now()
info.recv = info.send info.recv = info.send
t.insert(info) t.insert(info, isPeer)
} }
} }
func (t *dht) insert(info *dhtInfo) { func (t *dht) insert(info *dhtInfo, isPeer bool) {
//fmt.Println("DEBUG: dht insert:", info.getNodeID(), info.coords) //fmt.Println("DEBUG: dht insert:", info.getNodeID(), info.coords)
// First update the time on this info // First update the time on this info
info.recv = time.Now() info.recv = time.Now()
@ -228,18 +235,23 @@ func (t *dht) insert(info *dhtInfo) {
// First drop any existing entry from the bucket // First drop any existing entry from the bucket
b.drop(&info.key) b.drop(&info.key)
// Now add to the *end* of the bucket // Now add to the *end* of the bucket
b.infos = append(b.infos, info) if isPeer {
// TODO make sure we don't duplicate peers in b.other too
b.peers = append(b.peers, info)
return
}
b.other = append(b.other, info)
// Check if the next bucket is non-full and return early if it is // Check if the next bucket is non-full and return early if it is
if bidx+1 == t.nBuckets() { if bidx+1 == t.nBuckets() {
return return
} }
bnext := t.getBucket(bidx + 1) bnext := t.getBucket(bidx + 1)
if len(bnext.infos) < dht_bucket_size { if len(bnext.other) < dht_bucket_size {
return return
} }
// Shrink from the *front* to requied size // Shrink from the *front* to requied size
for len(b.infos) > dht_bucket_size { for len(b.other) > dht_bucket_size {
b.infos = b.infos[1:] b.other = b.other[1:]
} }
} }
@ -256,23 +268,34 @@ func (t *dht) getBucketIndex(nodeID *NodeID) (int, bool) {
func (b *bucket) contains(ninfo *dhtInfo) bool { func (b *bucket) contains(ninfo *dhtInfo) bool {
// Compares if key and coords match // Compares if key and coords match
for _, info := range b.infos { var found bool
if info == nil { check := func(infos []*dhtInfo) {
panic("Should never happen") for _, info := range infos {
} if info == nil {
if info.key == ninfo.key { panic("Should never happen")
if len(info.coords) != len(ninfo.coords) {
return false
} }
if info.key != info.key {
continue
}
if len(info.coords) != len(ninfo.coords) {
continue
}
match := true
for idx := 0; idx < len(info.coords); idx++ { for idx := 0; idx < len(info.coords); idx++ {
if info.coords[idx] != ninfo.coords[idx] { if info.coords[idx] != ninfo.coords[idx] {
return false match = false
break
} }
} }
return true if match {
found = true
break
}
} }
} }
return false check(b.peers)
check(b.other)
return found
} }
func (b *bucket) drop(key *boxPubKey) { func (b *bucket) drop(key *boxPubKey) {
@ -286,7 +309,8 @@ func (b *bucket) drop(key *boxPubKey) {
} }
return cleaned return cleaned
} }
b.infos = clean(b.infos) b.peers = clean(b.peers)
b.other = clean(b.other)
} }
func (t *dht) sendReq(req *dhtReq, dest *dhtInfo) { func (t *dht) sendReq(req *dhtReq, dest *dhtInfo) {
@ -333,7 +357,7 @@ func (t *dht) sendRes(res *dhtRes, req *dhtReq) {
} }
func (b *bucket) isEmpty() bool { func (b *bucket) isEmpty() bool {
return len(b.infos) == 0 return len(b.peers)+len(b.other) == 0
} }
func (b *bucket) nextToPing() *dhtInfo { func (b *bucket) nextToPing() *dhtInfo {
@ -343,14 +367,18 @@ func (b *bucket) nextToPing() *dhtInfo {
// Gives them time to respond // Gives them time to respond
// And time between traffic loss from short term congestion in the network // And time between traffic loss from short term congestion in the network
var toPing *dhtInfo var toPing *dhtInfo
for _, next := range b.infos { update := func(infos []*dhtInfo) {
if time.Since(next.send) < 6*time.Second { for _, next := range infos {
continue if time.Since(next.send) < 6*time.Second {
} continue
if toPing == nil || next.recv.Before(toPing.recv) { }
toPing = next if toPing == nil || next.recv.Before(toPing.recv) {
toPing = next
}
} }
} }
update(b.peers)
update(b.other)
return toPing return toPing
} }

View File

@ -77,7 +77,7 @@ func (r *router) mainLoop() {
case p := <-r.send: case p := <-r.send:
r.sendPacket(p) r.sendPacket(p)
case info := <-r.core.dht.peers: case info := <-r.core.dht.peers:
r.core.dht.insert(info) //r.core.dht.insertIfNew(info) r.core.dht.insertIfNew(info, true)
case <-r.reset: case <-r.reset:
r.core.sessions.resetInits() r.core.sessions.resetInits()
case <-ticker.C: case <-ticker.C:

View File

@ -113,7 +113,7 @@ func generateConfig(isAutoconf bool) *nodeConfig {
cfg.Listen = "[::]:0" cfg.Listen = "[::]:0"
} else { } else {
r1 := rand.New(rand.NewSource(time.Now().UnixNano())) r1 := rand.New(rand.NewSource(time.Now().UnixNano()))
cfg.Listen = fmt.Sprintf("[::]:%d", r1.Intn(65534 - 32768) + 32768) cfg.Listen = fmt.Sprintf("[::]:%d", r1.Intn(65534-32768)+32768)
} }
cfg.AdminListen = "[::1]:9001" cfg.AdminListen = "[::1]:9001"
cfg.BoxPub = hex.EncodeToString(bpub[:]) cfg.BoxPub = hex.EncodeToString(bpub[:])