weird scheduler hack, seems to tend to make things more stable without actually locking streams to any particular link

This commit is contained in:
Arceliar 2019-09-21 14:33:45 -05:00
parent 2a76163c7e
commit 691192ff5a
2 changed files with 11 additions and 5 deletions

View File

@ -712,10 +712,15 @@ func (t *switchTable) _handleIn(packet []byte, idle map[switchPort]struct{}) boo
} }
} }
if best != nil { if best != nil {
// Send to the best idle next hop
delete(idle, best.elem.port) delete(idle, best.elem.port)
ports[best.elem.port].sendPacketsFrom(t, [][]byte{packet}) // Tell ourselves to send to this node later
return true // If another (e.g. even better) hop becomes idle in the mean time, it'll take the packet instead
// FIXME this is just a hack, but seems to help with stability...
go t.Act(nil, func() {
t._idleIn(best.elem.port)
})
//ports[best.elem.port].sendPacketsFrom(t, [][]byte{packet})
//return true
} }
// Didn't find anyone idle to send it to // Didn't find anyone idle to send it to
return false return false

View File

@ -233,8 +233,9 @@ func (t *tcp) call(saddr string, options interface{}, sintf string) {
} }
defer func() { defer func() {
// Block new calls for a little while, to mitigate livelock scenarios // Block new calls for a little while, to mitigate livelock scenarios
time.Sleep(default_timeout) rand.Seed(time.Now().UnixNano())
time.Sleep(time.Duration(rand.Intn(1000)) * time.Millisecond) delay := default_timeout + time.Duration(rand.Intn(10000))*time.Millisecond
time.Sleep(delay)
t.mutex.Lock() t.mutex.Lock()
delete(t.calls, callname) delete(t.calls, callname)
t.mutex.Unlock() t.mutex.Unlock()