2019-05-18 19:21:02 +03:00
|
|
|
package yggdrasil
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/hex"
|
|
|
|
"errors"
|
2019-05-20 21:51:44 +03:00
|
|
|
"fmt"
|
2019-05-18 19:21:02 +03:00
|
|
|
"net"
|
|
|
|
"sort"
|
2019-05-20 21:51:44 +03:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
2019-05-18 19:21:02 +03:00
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/gologme/log"
|
|
|
|
"github.com/yggdrasil-network/yggdrasil-go/src/address"
|
|
|
|
"github.com/yggdrasil-network/yggdrasil-go/src/crypto"
|
|
|
|
)
|
|
|
|
|
|
|
|
// Peer represents a single peer object. This contains information from the
|
|
|
|
// preferred switch port for this peer, although there may be more than one in
|
|
|
|
// reality.
|
|
|
|
type Peer struct {
|
|
|
|
PublicKey crypto.BoxPubKey
|
|
|
|
Endpoint string
|
|
|
|
BytesSent uint64
|
|
|
|
BytesRecvd uint64
|
|
|
|
Protocol string
|
|
|
|
Port uint64
|
|
|
|
Uptime time.Duration
|
|
|
|
}
|
|
|
|
|
|
|
|
// SwitchPeer represents a switch connection to a peer. Note that there may be
|
|
|
|
// multiple switch peers per actual peer, e.g. if there are multiple connections
|
|
|
|
// to a given node.
|
|
|
|
type SwitchPeer struct {
|
|
|
|
PublicKey crypto.BoxPubKey
|
|
|
|
Coords []byte
|
|
|
|
BytesSent uint64
|
|
|
|
BytesRecvd uint64
|
|
|
|
Port uint64
|
|
|
|
Protocol string
|
2019-05-19 19:27:48 +03:00
|
|
|
Endpoint string
|
2019-05-18 19:21:02 +03:00
|
|
|
}
|
|
|
|
|
2019-05-19 18:29:04 +03:00
|
|
|
// DHTEntry represents a single DHT entry that has been learned or cached from
|
|
|
|
// DHT searches.
|
2019-05-18 19:21:02 +03:00
|
|
|
type DHTEntry struct {
|
|
|
|
PublicKey crypto.BoxPubKey
|
|
|
|
Coords []byte
|
|
|
|
LastSeen time.Duration
|
|
|
|
}
|
|
|
|
|
2019-05-20 21:51:44 +03:00
|
|
|
// DHTRes represents a DHT response, as returned by DHTPing.
|
|
|
|
type DHTRes struct {
|
|
|
|
PublicKey crypto.BoxPubKey // key of the sender
|
|
|
|
Coords []byte // coords of the sender
|
|
|
|
Dest crypto.NodeID // the destination node ID
|
|
|
|
Infos []DHTEntry // response
|
|
|
|
}
|
|
|
|
|
|
|
|
// NodeInfoPayload represents a RequestNodeInfo response, in bytes.
|
2019-07-28 13:30:24 +03:00
|
|
|
type NodeInfoPayload []byte
|
2019-05-20 21:51:44 +03:00
|
|
|
|
2019-05-19 18:29:04 +03:00
|
|
|
// SwitchQueues represents information from the switch related to link
|
|
|
|
// congestion and a list of switch queues created in response to congestion on a
|
|
|
|
// given link.
|
|
|
|
type SwitchQueues struct {
|
|
|
|
Queues []SwitchQueue
|
|
|
|
Count uint64
|
|
|
|
Size uint64
|
|
|
|
HighestCount uint64
|
|
|
|
HighestSize uint64
|
|
|
|
MaximumSize uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
// SwitchQueue represents a single switch queue, which is created in response
|
|
|
|
// to congestion on a given link.
|
|
|
|
type SwitchQueue struct {
|
|
|
|
ID string
|
|
|
|
Size uint64
|
|
|
|
Packets uint64
|
|
|
|
Port uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
// Session represents an open session with another node.
|
|
|
|
type Session struct {
|
|
|
|
PublicKey crypto.BoxPubKey
|
|
|
|
Coords []byte
|
|
|
|
BytesSent uint64
|
|
|
|
BytesRecvd uint64
|
|
|
|
MTU uint16
|
2019-05-29 14:59:36 +03:00
|
|
|
Uptime time.Duration
|
2019-05-19 18:29:04 +03:00
|
|
|
WasMTUFixed bool
|
|
|
|
}
|
2019-05-18 19:21:02 +03:00
|
|
|
|
|
|
|
// GetPeers returns one or more Peer objects containing information about active
|
|
|
|
// peerings with other Yggdrasil nodes, where one of the responses always
|
|
|
|
// includes information about the current node (with a port number of 0). If
|
|
|
|
// there is exactly one entry then this node is not connected to any other nodes
|
|
|
|
// and is therefore isolated.
|
|
|
|
func (c *Core) GetPeers() []Peer {
|
|
|
|
ports := c.peers.ports.Load().(map[switchPort]*peer)
|
|
|
|
var peers []Peer
|
|
|
|
var ps []switchPort
|
|
|
|
for port := range ports {
|
|
|
|
ps = append(ps, port)
|
|
|
|
}
|
|
|
|
sort.Slice(ps, func(i, j int) bool { return ps[i] < ps[j] })
|
|
|
|
for _, port := range ps {
|
|
|
|
p := ports[port]
|
|
|
|
info := Peer{
|
|
|
|
Endpoint: p.intf.name,
|
|
|
|
BytesSent: atomic.LoadUint64(&p.bytesSent),
|
|
|
|
BytesRecvd: atomic.LoadUint64(&p.bytesRecvd),
|
|
|
|
Protocol: p.intf.info.linkType,
|
|
|
|
Port: uint64(port),
|
|
|
|
Uptime: time.Since(p.firstSeen),
|
|
|
|
}
|
|
|
|
copy(info.PublicKey[:], p.box[:])
|
|
|
|
peers = append(peers, info)
|
|
|
|
}
|
|
|
|
return peers
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetSwitchPeers returns zero or more SwitchPeer objects containing information
|
|
|
|
// about switch port connections with other Yggdrasil nodes. Note that, unlike
|
|
|
|
// GetPeers, GetSwitchPeers does not include information about the current node,
|
|
|
|
// therefore it is possible for this to return zero elements if the node is
|
|
|
|
// isolated or not connected to any peers.
|
|
|
|
func (c *Core) GetSwitchPeers() []SwitchPeer {
|
|
|
|
var switchpeers []SwitchPeer
|
|
|
|
table := c.switchTable.table.Load().(lookupTable)
|
|
|
|
peers := c.peers.ports.Load().(map[switchPort]*peer)
|
|
|
|
for _, elem := range table.elems {
|
|
|
|
peer, isIn := peers[elem.port]
|
|
|
|
if !isIn {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
coords := elem.locator.getCoords()
|
|
|
|
info := SwitchPeer{
|
2019-05-20 21:51:44 +03:00
|
|
|
Coords: append([]byte{}, coords...),
|
2019-05-18 19:21:02 +03:00
|
|
|
BytesSent: atomic.LoadUint64(&peer.bytesSent),
|
|
|
|
BytesRecvd: atomic.LoadUint64(&peer.bytesRecvd),
|
|
|
|
Port: uint64(elem.port),
|
|
|
|
Protocol: peer.intf.info.linkType,
|
2019-05-19 19:27:48 +03:00
|
|
|
Endpoint: peer.intf.info.remote,
|
2019-05-18 19:21:02 +03:00
|
|
|
}
|
|
|
|
copy(info.PublicKey[:], peer.box[:])
|
|
|
|
switchpeers = append(switchpeers, info)
|
|
|
|
}
|
|
|
|
return switchpeers
|
|
|
|
}
|
|
|
|
|
2019-05-19 18:29:04 +03:00
|
|
|
// GetDHT returns zero or more entries as stored in the DHT, cached primarily
|
|
|
|
// from searches that have already taken place.
|
2019-05-18 19:21:02 +03:00
|
|
|
func (c *Core) GetDHT() []DHTEntry {
|
2019-05-19 18:29:04 +03:00
|
|
|
var dhtentries []DHTEntry
|
|
|
|
getDHT := func() {
|
|
|
|
now := time.Now()
|
|
|
|
var dhtentry []*dhtInfo
|
|
|
|
for _, v := range c.dht.table {
|
|
|
|
dhtentry = append(dhtentry, v)
|
|
|
|
}
|
|
|
|
sort.SliceStable(dhtentry, func(i, j int) bool {
|
|
|
|
return dht_ordered(&c.dht.nodeID, dhtentry[i].getNodeID(), dhtentry[j].getNodeID())
|
|
|
|
})
|
|
|
|
for _, v := range dhtentry {
|
|
|
|
info := DHTEntry{
|
2019-05-20 21:51:44 +03:00
|
|
|
Coords: append([]byte{}, v.coords...),
|
2019-05-19 18:29:04 +03:00
|
|
|
LastSeen: now.Sub(v.recv),
|
|
|
|
}
|
|
|
|
copy(info.PublicKey[:], v.key[:])
|
|
|
|
dhtentries = append(dhtentries, info)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c.router.doAdmin(getDHT)
|
|
|
|
return dhtentries
|
2019-05-18 19:21:02 +03:00
|
|
|
}
|
|
|
|
|
2019-05-19 18:29:04 +03:00
|
|
|
// GetSwitchQueues returns information about the switch queues that are
|
|
|
|
// currently in effect. These values can change within an instant.
|
|
|
|
func (c *Core) GetSwitchQueues() SwitchQueues {
|
|
|
|
var switchqueues SwitchQueues
|
|
|
|
switchTable := &c.switchTable
|
|
|
|
getSwitchQueues := func() {
|
|
|
|
switchqueues = SwitchQueues{
|
|
|
|
Count: uint64(len(switchTable.queues.bufs)),
|
|
|
|
Size: switchTable.queues.size,
|
|
|
|
HighestCount: uint64(switchTable.queues.maxbufs),
|
|
|
|
HighestSize: switchTable.queues.maxsize,
|
|
|
|
MaximumSize: switchTable.queueTotalMaxSize,
|
|
|
|
}
|
|
|
|
for k, v := range switchTable.queues.bufs {
|
|
|
|
nexthop := switchTable.bestPortForCoords([]byte(k))
|
|
|
|
queue := SwitchQueue{
|
|
|
|
ID: k,
|
|
|
|
Size: v.size,
|
|
|
|
Packets: uint64(len(v.packets)),
|
|
|
|
Port: uint64(nexthop),
|
|
|
|
}
|
|
|
|
switchqueues.Queues = append(switchqueues.Queues, queue)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
c.switchTable.doAdmin(getSwitchQueues)
|
|
|
|
return switchqueues
|
2019-05-18 19:21:02 +03:00
|
|
|
}
|
|
|
|
|
2019-05-19 18:29:04 +03:00
|
|
|
// GetSessions returns a list of open sessions from this node to other nodes.
|
2019-05-18 19:21:02 +03:00
|
|
|
func (c *Core) GetSessions() []Session {
|
2019-05-19 18:29:04 +03:00
|
|
|
var sessions []Session
|
|
|
|
getSessions := func() {
|
|
|
|
for _, sinfo := range c.sessions.sinfos {
|
2019-06-30 02:56:26 +03:00
|
|
|
var session Session
|
|
|
|
workerFunc := func() {
|
2019-06-30 03:25:34 +03:00
|
|
|
session = Session{
|
2019-06-30 02:56:26 +03:00
|
|
|
Coords: append([]byte{}, sinfo.coords...),
|
|
|
|
MTU: sinfo.getMTU(),
|
|
|
|
BytesSent: sinfo.bytesSent,
|
|
|
|
BytesRecvd: sinfo.bytesRecvd,
|
|
|
|
Uptime: time.Now().Sub(sinfo.timeOpened),
|
|
|
|
WasMTUFixed: sinfo.wasMTUFixed,
|
|
|
|
}
|
|
|
|
copy(session.PublicKey[:], sinfo.theirPermPub[:])
|
|
|
|
}
|
|
|
|
var skip bool
|
|
|
|
func() {
|
|
|
|
defer func() {
|
|
|
|
if recover() != nil {
|
|
|
|
skip = true
|
|
|
|
}
|
|
|
|
}()
|
2019-07-28 02:10:32 +03:00
|
|
|
sinfo.doFunc(workerFunc)
|
2019-06-30 02:56:26 +03:00
|
|
|
}()
|
|
|
|
if skip {
|
|
|
|
continue
|
2019-05-19 18:29:04 +03:00
|
|
|
}
|
2019-06-30 02:56:26 +03:00
|
|
|
// TODO? skipped known but timed out sessions?
|
2019-05-19 18:29:04 +03:00
|
|
|
sessions = append(sessions, session)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c.router.doAdmin(getSessions)
|
|
|
|
return sessions
|
2019-05-18 19:21:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// BuildName gets the current build name. This is usually injected if built
|
|
|
|
// from git, or returns "unknown" otherwise.
|
|
|
|
func BuildName() string {
|
|
|
|
if buildName == "" {
|
2019-06-29 01:45:04 +03:00
|
|
|
return "yggdrasil"
|
2019-05-18 19:21:02 +03:00
|
|
|
}
|
|
|
|
return buildName
|
|
|
|
}
|
|
|
|
|
|
|
|
// BuildVersion gets the current build version. This is usually injected if
|
|
|
|
// built from git, or returns "unknown" otherwise.
|
|
|
|
func BuildVersion() string {
|
|
|
|
if buildVersion == "" {
|
|
|
|
return "unknown"
|
|
|
|
}
|
|
|
|
return buildVersion
|
|
|
|
}
|
|
|
|
|
2019-07-28 13:35:16 +03:00
|
|
|
// ConnListen returns a listener for Yggdrasil session connections.
|
2019-05-18 19:21:02 +03:00
|
|
|
func (c *Core) ConnListen() (*Listener, error) {
|
|
|
|
c.sessions.listenerMutex.Lock()
|
|
|
|
defer c.sessions.listenerMutex.Unlock()
|
|
|
|
if c.sessions.listener != nil {
|
|
|
|
return nil, errors.New("a listener already exists")
|
|
|
|
}
|
|
|
|
c.sessions.listener = &Listener{
|
|
|
|
core: c,
|
|
|
|
conn: make(chan *Conn),
|
|
|
|
close: make(chan interface{}),
|
|
|
|
}
|
|
|
|
return c.sessions.listener, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ConnDialer returns a dialer for Yggdrasil session connections.
|
|
|
|
func (c *Core) ConnDialer() (*Dialer, error) {
|
|
|
|
return &Dialer{
|
|
|
|
core: c,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListenTCP starts a new TCP listener. The input URI should match that of the
|
|
|
|
// "Listen" configuration item, e.g.
|
|
|
|
// tcp://a.b.c.d:e
|
|
|
|
func (c *Core) ListenTCP(uri string) (*TcpListener, error) {
|
|
|
|
return c.link.tcp.listen(uri)
|
|
|
|
}
|
|
|
|
|
|
|
|
// NodeID gets the node ID.
|
|
|
|
func (c *Core) NodeID() *crypto.NodeID {
|
|
|
|
return crypto.GetNodeID(&c.boxPub)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TreeID gets the tree ID.
|
|
|
|
func (c *Core) TreeID() *crypto.TreeID {
|
|
|
|
return crypto.GetTreeID(&c.sigPub)
|
|
|
|
}
|
|
|
|
|
2019-07-28 13:35:16 +03:00
|
|
|
// SigningPublicKey gets the node's signing public key.
|
2019-07-28 13:30:24 +03:00
|
|
|
func (c *Core) SigningPublicKey() string {
|
2019-05-18 19:21:02 +03:00
|
|
|
return hex.EncodeToString(c.sigPub[:])
|
|
|
|
}
|
|
|
|
|
2019-07-28 13:35:16 +03:00
|
|
|
// EncryptionPublicKey gets the node's encryption public key.
|
2019-07-28 13:30:24 +03:00
|
|
|
func (c *Core) EncryptionPublicKey() string {
|
2019-05-18 19:21:02 +03:00
|
|
|
return hex.EncodeToString(c.boxPub[:])
|
|
|
|
}
|
|
|
|
|
2019-05-19 19:27:48 +03:00
|
|
|
// Coords returns the current coordinates of the node.
|
|
|
|
func (c *Core) Coords() []byte {
|
|
|
|
table := c.switchTable.table.Load().(lookupTable)
|
|
|
|
return table.self.getCoords()
|
|
|
|
}
|
|
|
|
|
2019-05-18 19:21:02 +03:00
|
|
|
// Address gets the IPv6 address of the Yggdrasil node. This is always a /128
|
|
|
|
// address.
|
2019-07-28 13:30:24 +03:00
|
|
|
func (c *Core) Address() net.IP {
|
2019-05-18 19:21:02 +03:00
|
|
|
address := net.IP(address.AddrForNodeID(c.NodeID())[:])
|
2019-07-28 13:30:24 +03:00
|
|
|
return address
|
2019-05-18 19:21:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Subnet gets the routed IPv6 subnet of the Yggdrasil node. This is always a
|
|
|
|
// /64 subnet.
|
2019-07-28 13:30:24 +03:00
|
|
|
func (c *Core) Subnet() net.IPNet {
|
2019-05-18 19:21:02 +03:00
|
|
|
subnet := address.SubnetForNodeID(c.NodeID())[:]
|
|
|
|
subnet = append(subnet, 0, 0, 0, 0, 0, 0, 0, 0)
|
2019-07-28 13:30:24 +03:00
|
|
|
return net.IPNet{IP: subnet, Mask: net.CIDRMask(64, 128)}
|
2019-05-18 19:21:02 +03:00
|
|
|
}
|
|
|
|
|
2019-07-28 13:30:24 +03:00
|
|
|
// MyNodeInfo gets the currently configured nodeinfo.
|
|
|
|
func (c *Core) MyNodeInfo() NodeInfoPayload {
|
2019-05-18 19:21:02 +03:00
|
|
|
return c.router.nodeinfo.getNodeInfo()
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetNodeInfo the lcal nodeinfo. Note that nodeinfo can be any value or struct,
|
|
|
|
// it will be serialised into JSON automatically.
|
|
|
|
func (c *Core) SetNodeInfo(nodeinfo interface{}, nodeinfoprivacy bool) {
|
|
|
|
c.router.nodeinfo.setNodeInfo(nodeinfo, nodeinfoprivacy)
|
|
|
|
}
|
|
|
|
|
2019-05-20 21:51:44 +03:00
|
|
|
// GetNodeInfo requests nodeinfo from a remote node, as specified by the public
|
|
|
|
// key and coordinates specified. The third parameter specifies whether a cached
|
|
|
|
// result is acceptable - this results in less traffic being generated than is
|
|
|
|
// necessary when, e.g. crawling the network.
|
|
|
|
func (c *Core) GetNodeInfo(keyString, coordString string, nocache bool) (NodeInfoPayload, error) {
|
|
|
|
var key crypto.BoxPubKey
|
|
|
|
if keyBytes, err := hex.DecodeString(keyString); err != nil {
|
|
|
|
return NodeInfoPayload{}, err
|
|
|
|
} else {
|
|
|
|
copy(key[:], keyBytes)
|
|
|
|
}
|
|
|
|
if !nocache {
|
|
|
|
if response, err := c.router.nodeinfo.getCachedNodeInfo(key); err == nil {
|
2019-07-28 13:30:24 +03:00
|
|
|
return response, nil
|
2019-05-20 21:51:44 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
var coords []byte
|
|
|
|
for _, cstr := range strings.Split(strings.Trim(coordString, "[]"), " ") {
|
|
|
|
if cstr == "" {
|
|
|
|
// Special case, happens if trimmed is the empty string, e.g. this is the root
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if u64, err := strconv.ParseUint(cstr, 10, 8); err != nil {
|
|
|
|
return NodeInfoPayload{}, err
|
|
|
|
} else {
|
|
|
|
coords = append(coords, uint8(u64))
|
|
|
|
}
|
|
|
|
}
|
2019-07-28 13:30:24 +03:00
|
|
|
response := make(chan *NodeInfoPayload, 1)
|
2019-05-20 21:51:44 +03:00
|
|
|
sendNodeInfoRequest := func() {
|
2019-07-28 13:30:24 +03:00
|
|
|
c.router.nodeinfo.addCallback(key, func(nodeinfo *NodeInfoPayload) {
|
2019-05-20 21:51:44 +03:00
|
|
|
defer func() { recover() }()
|
|
|
|
select {
|
|
|
|
case response <- nodeinfo:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
})
|
|
|
|
c.router.nodeinfo.sendNodeInfo(key, coords, false)
|
|
|
|
}
|
|
|
|
c.router.doAdmin(sendNodeInfoRequest)
|
|
|
|
go func() {
|
|
|
|
time.Sleep(6 * time.Second)
|
|
|
|
close(response)
|
|
|
|
}()
|
|
|
|
for res := range response {
|
2019-07-28 13:30:24 +03:00
|
|
|
return *res, nil
|
2019-05-20 21:51:44 +03:00
|
|
|
}
|
2019-07-28 13:35:16 +03:00
|
|
|
return NodeInfoPayload{}, fmt.Errorf("getNodeInfo timeout: %s", keyString)
|
2019-05-20 21:51:44 +03:00
|
|
|
}
|
|
|
|
|
2019-06-11 12:52:21 +03:00
|
|
|
// SetSessionGatekeeper allows you to configure a handler function for deciding
|
|
|
|
// whether a session should be allowed or not. The default session firewall is
|
|
|
|
// implemented in this way. The function receives the public key of the remote
|
2019-06-11 14:52:13 +03:00
|
|
|
// side and a boolean which is true if we initiated the session or false if we
|
|
|
|
// received an incoming session request. The function should return true to
|
|
|
|
// allow the session or false to reject it.
|
2019-06-11 12:52:21 +03:00
|
|
|
func (c *Core) SetSessionGatekeeper(f func(pubkey *crypto.BoxPubKey, initiator bool) bool) {
|
|
|
|
c.sessions.isAllowedMutex.Lock()
|
|
|
|
defer c.sessions.isAllowedMutex.Unlock()
|
|
|
|
|
|
|
|
c.sessions.isAllowedHandler = f
|
|
|
|
}
|
|
|
|
|
2019-05-18 19:21:02 +03:00
|
|
|
// SetLogger sets the output logger of the Yggdrasil node after startup. This
|
|
|
|
// may be useful if you want to redirect the output later.
|
|
|
|
func (c *Core) SetLogger(log *log.Logger) {
|
|
|
|
c.log = log
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddPeer adds a peer. This should be specified in the peer URI format, e.g.:
|
|
|
|
// tcp://a.b.c.d:e
|
|
|
|
// socks://a.b.c.d:e/f.g.h.i:j
|
|
|
|
// This adds the peer to the peer list, so that they will be called again if the
|
|
|
|
// connection drops.
|
|
|
|
func (c *Core) AddPeer(addr string, sintf string) error {
|
|
|
|
if err := c.CallPeer(addr, sintf); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
c.config.Mutex.Lock()
|
|
|
|
if sintf == "" {
|
|
|
|
c.config.Current.Peers = append(c.config.Current.Peers, addr)
|
|
|
|
} else {
|
|
|
|
c.config.Current.InterfacePeers[sintf] = append(c.config.Current.InterfacePeers[sintf], addr)
|
|
|
|
}
|
|
|
|
c.config.Mutex.Unlock()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-05-20 21:51:44 +03:00
|
|
|
// RemovePeer is not implemented yet.
|
|
|
|
func (c *Core) RemovePeer(addr string, sintf string) error {
|
|
|
|
// TODO: Implement a reverse of AddPeer, where we look up the port number
|
|
|
|
// based on the addr and sintf, disconnect it and then remove it from the
|
|
|
|
// peers list so we don't reconnect to it later
|
|
|
|
return errors.New("not implemented")
|
|
|
|
}
|
|
|
|
|
2019-05-18 19:21:02 +03:00
|
|
|
// CallPeer calls a peer once. This should be specified in the peer URI format,
|
|
|
|
// e.g.:
|
|
|
|
// tcp://a.b.c.d:e
|
|
|
|
// socks://a.b.c.d:e/f.g.h.i:j
|
|
|
|
// This does not add the peer to the peer list, so if the connection drops, the
|
|
|
|
// peer will not be called again automatically.
|
|
|
|
func (c *Core) CallPeer(addr string, sintf string) error {
|
|
|
|
return c.link.call(addr, sintf)
|
|
|
|
}
|
|
|
|
|
2019-05-20 21:51:44 +03:00
|
|
|
// DisconnectPeer disconnects a peer once. This should be specified as a port
|
|
|
|
// number.
|
|
|
|
func (c *Core) DisconnectPeer(port uint64) error {
|
|
|
|
c.peers.removePeer(switchPort(port))
|
2019-05-19 19:27:48 +03:00
|
|
|
return nil
|
2019-05-18 19:21:02 +03:00
|
|
|
}
|
2019-05-20 21:51:44 +03:00
|
|
|
|
|
|
|
// GetAllowedEncryptionPublicKeys returns the public keys permitted for incoming
|
|
|
|
// peer connections.
|
|
|
|
func (c *Core) GetAllowedEncryptionPublicKeys() []string {
|
|
|
|
return c.peers.getAllowedEncryptionPublicKeys()
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddAllowedEncryptionPublicKey whitelists a key for incoming peer connections.
|
|
|
|
func (c *Core) AddAllowedEncryptionPublicKey(bstr string) (err error) {
|
|
|
|
c.peers.addAllowedEncryptionPublicKey(bstr)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// RemoveAllowedEncryptionPublicKey removes a key from the whitelist for
|
|
|
|
// incoming peer connections. If none are set, an empty list permits all
|
|
|
|
// incoming connections.
|
|
|
|
func (c *Core) RemoveAllowedEncryptionPublicKey(bstr string) (err error) {
|
|
|
|
c.peers.removeAllowedEncryptionPublicKey(bstr)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-07-28 13:35:16 +03:00
|
|
|
// DHTPing sends a DHT ping to the node with the provided key and coords,
|
|
|
|
// optionally looking up the specified target NodeID.
|
2019-05-20 21:51:44 +03:00
|
|
|
func (c *Core) DHTPing(keyString, coordString, targetString string) (DHTRes, error) {
|
|
|
|
var key crypto.BoxPubKey
|
|
|
|
if keyBytes, err := hex.DecodeString(keyString); err != nil {
|
|
|
|
return DHTRes{}, err
|
|
|
|
} else {
|
|
|
|
copy(key[:], keyBytes)
|
|
|
|
}
|
|
|
|
var coords []byte
|
|
|
|
for _, cstr := range strings.Split(strings.Trim(coordString, "[]"), " ") {
|
|
|
|
if cstr == "" {
|
|
|
|
// Special case, happens if trimmed is the empty string, e.g. this is the root
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if u64, err := strconv.ParseUint(cstr, 10, 8); err != nil {
|
|
|
|
return DHTRes{}, err
|
|
|
|
} else {
|
|
|
|
coords = append(coords, uint8(u64))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
resCh := make(chan *dhtRes, 1)
|
|
|
|
info := dhtInfo{
|
|
|
|
key: key,
|
|
|
|
coords: coords,
|
|
|
|
}
|
|
|
|
target := *info.getNodeID()
|
|
|
|
if targetString == "none" {
|
|
|
|
// Leave the default target in place
|
|
|
|
} else if targetBytes, err := hex.DecodeString(targetString); err != nil {
|
|
|
|
return DHTRes{}, err
|
|
|
|
} else if len(targetBytes) != len(target) {
|
|
|
|
return DHTRes{}, errors.New("Incorrect target NodeID length")
|
|
|
|
} else {
|
|
|
|
var target crypto.NodeID
|
|
|
|
copy(target[:], targetBytes)
|
|
|
|
}
|
|
|
|
rq := dhtReqKey{info.key, target}
|
|
|
|
sendPing := func() {
|
|
|
|
c.dht.addCallback(&rq, func(res *dhtRes) {
|
2019-06-26 03:31:29 +03:00
|
|
|
resCh <- res
|
2019-05-20 21:51:44 +03:00
|
|
|
})
|
|
|
|
c.dht.ping(&info, &target)
|
|
|
|
}
|
|
|
|
c.router.doAdmin(sendPing)
|
|
|
|
// TODO: do something better than the below...
|
2019-06-26 03:31:29 +03:00
|
|
|
res := <-resCh
|
|
|
|
if res != nil {
|
2019-05-20 21:51:44 +03:00
|
|
|
r := DHTRes{
|
|
|
|
Coords: append([]byte{}, res.Coords...),
|
|
|
|
}
|
|
|
|
copy(r.PublicKey[:], res.Key[:])
|
|
|
|
for _, i := range res.Infos {
|
|
|
|
e := DHTEntry{
|
|
|
|
Coords: append([]byte{}, i.coords...),
|
|
|
|
}
|
|
|
|
copy(e.PublicKey[:], i.key[:])
|
|
|
|
r.Infos = append(r.Infos, e)
|
|
|
|
}
|
|
|
|
return r, nil
|
|
|
|
}
|
2019-07-28 13:35:16 +03:00
|
|
|
return DHTRes{}, fmt.Errorf("DHT ping timeout: %s", keyString)
|
2019-05-20 21:51:44 +03:00
|
|
|
}
|