mirror of
https://github.com/openp2p-cn/openp2p.git
synced 2026-05-07 13:52:14 +08:00
Compare commits
20 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7e57237ec9 | ||
|
|
52dfe5c938 | ||
|
|
b72ede9a6a | ||
|
|
b39fab2188 | ||
|
|
9b0525294a | ||
|
|
276a4433f1 | ||
|
|
e21adebc26 | ||
|
|
b2a7619bd6 | ||
|
|
fe4022ba6c | ||
|
|
82c74b4f85 | ||
|
|
0af65b7204 | ||
|
|
46b4f78010 | ||
|
|
8ebdf3341e | ||
|
|
b667e5b766 | ||
|
|
cd415e7bf4 | ||
|
|
67e3a8915a | ||
|
|
791d910314 | ||
|
|
c3a43be3cc | ||
|
|
c8b8bf05a5 | ||
|
|
8311341960 |
22
README-ZH.md
22
README-ZH.md
@@ -101,6 +101,22 @@ cd到代码根目录,执行
|
||||
```
|
||||
make
|
||||
```
|
||||
手动编译特定系统和架构
|
||||
All GOOS values:
|
||||
```
|
||||
"aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "ios", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos"
|
||||
```
|
||||
All GOARCH values:
|
||||
```
|
||||
"386", "amd64", "amd64p32", "arm", "arm64", "arm64be", "armbe", "loong64", "mips", "mips64", "mips64le", "mips64p32", "mips64p32le", "mipsle", "ppc", "ppc64", "ppc64le", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm"
|
||||
```
|
||||
|
||||
比如linux+amd64
|
||||
```
|
||||
export GOPROXY=https://goproxy.io,direct
|
||||
go mod tidy
|
||||
CGO_ENABLED=0 env GOOS=linux GOARCH=amd64 go build -o openp2p --ldflags '-s -w ' -gcflags '-l' -p 8 -installsuffix cgo ./cmd
|
||||
```
|
||||
|
||||
## RoadMap
|
||||
近期计划:
|
||||
@@ -110,12 +126,12 @@ make
|
||||
4. ~~建立网站,用户可以在网站管理所有P2PApp和设备。查看设备在线状态,升级,增删查改重启P2PApp等~~(100%)
|
||||
5. 建立公众号,用户可在微信公众号管理所有P2PApp和设备
|
||||
6. 客户端提供WebUI
|
||||
7. 支持自有服务器,开源服务器程序
|
||||
7. ~~支持自有服务器,开源服务器程序~~(100%)
|
||||
8. 共享节点调度模型优化,对不同的运营商优化
|
||||
9. 方便二次开发,提供API和lib
|
||||
10. 应用层支持UDP协议,实现很简单,但UDP应用较少暂不急(100%)
|
||||
10. ~~应用层支持UDP协议,实现很简单,但UDP应用较少暂不急~~(100%)
|
||||
11. 底层通信支持KCP协议,目前仅支持Quic;KCP专门对延时优化,被游戏加速器广泛使用,可以牺牲一定的带宽降低延时
|
||||
12. 支持Android系统,让旧手机焕发青春变成移动网关
|
||||
12. ~~支持Android系统,让旧手机焕发青春变成移动网关~~(100%)
|
||||
13. 支持Windows网上邻居共享文件
|
||||
14. 内网直连优化,用处不大,估计就用户测试时用到
|
||||
15. ~~支持UPNP~~(100%)
|
||||
|
||||
21
README.md
21
README.md
@@ -109,6 +109,23 @@ cd root directory of the socure code and execute
|
||||
make
|
||||
```
|
||||
|
||||
build specified os and arch.
|
||||
All GOOS values:
|
||||
```
|
||||
"aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "ios", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos"
|
||||
```
|
||||
All GOARCH values:
|
||||
```
|
||||
"386", "amd64", "amd64p32", "arm", "arm64", "arm64be", "armbe", "loong64", "mips", "mips64", "mips64le", "mips64p32", "mips64p32le", "mipsle", "ppc", "ppc64", "ppc64le", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm"
|
||||
```
|
||||
|
||||
For example linux+amd64
|
||||
```
|
||||
export GOPROXY=https://goproxy.io,direct
|
||||
go mod tidy
|
||||
CGO_ENABLED=0 env GOOS=linux GOARCH=amd64 go build -o openp2p --ldflags '-s -w ' -gcflags '-l' -p 8 -installsuffix cgo ./cmd
|
||||
```
|
||||
|
||||
## RoadMap
|
||||
Short-Term:
|
||||
1. ~~Support IPv6.~~(100%)
|
||||
@@ -117,12 +134,12 @@ Short-Term:
|
||||
4. ~~Build website, users can manage all P2PApp and devices via it. View devices' online status, upgrade, restart or CURD P2PApp .~~(100%)
|
||||
5. Provide wechat official account, user can manage P2PApp nodes and deivce as same as website.
|
||||
6. Provide WebUI on client side.
|
||||
7. Support private server, open source server program.
|
||||
7. ~~Support private server, open source server program.~~(100%)
|
||||
8. Optimize our share scheduling model for different network operators.
|
||||
9. Provide REST APIs and libary for secondary development.
|
||||
10. ~~Support UDP at application layer, it is easy to implement but not urgent due to only a few applicaitons using UDP protocol.~~(100%)
|
||||
11. Support KCP protocol underlay, currently support Quic only. KCP focus on delay optimization,which has been widely used as game accelerator,it can sacrifice part of bandwidth to reduce timelag.
|
||||
12. Support Android platform, let the phones to be mobile gateway.
|
||||
12. ~~Support Android platform, let the phones to be mobile gateway.~~(100%)
|
||||
13. Support SMB Windows neighborhood.
|
||||
14. Direct connection on intranet, for testing.
|
||||
15. ~~Support UPNP.~~(100%)
|
||||
|
||||
10
USAGE-ZH.md
10
USAGE-ZH.md
@@ -96,4 +96,12 @@ firewall-cmd --state
|
||||
C:\Program Files\OpenP2P\openp2p.exe uninstall
|
||||
# linux,macos
|
||||
sudo /usr/local/openp2p/openp2p uninstall
|
||||
```
|
||||
```
|
||||
|
||||
## Docker运行
|
||||
```
|
||||
# 把YOUR-TOKEN和YOUR-NODE-NAME替换成自己的
|
||||
docker run -d --restart=always --net host --name openp2p-client -e OPENP2P_TOKEN=YOUR-TOKEN -e OPENP2P_NODE=YOUR-NODE-NAME openp2pcn/openp2p-client:latest
|
||||
OR
|
||||
docker run -d --restart=always --net host --name openp2p-client openp2pcn/openp2p-client:latest -token YOUR-TOKEN -node YOUR-NODE-NAME
|
||||
```
|
||||
|
||||
8
USAGE.md
8
USAGE.md
@@ -98,4 +98,12 @@ firewall-cmd --state
|
||||
C:\Program Files\OpenP2P\openp2p.exe uninstall
|
||||
# linux,macos
|
||||
sudo /usr/local/openp2p/openp2p uninstall
|
||||
```
|
||||
|
||||
## Run with Docker
|
||||
```
|
||||
# Replace YOUR-TOKEN and YOUR-NODE-NAME with yours
|
||||
docker run -d --net host --name openp2p-client -e OPENP2P_TOKEN=YOUR-TOKEN -e OPENP2P_NODE=YOUR-NODE-NAME openp2pcn/openp2p-client:latest
|
||||
OR
|
||||
docker run -d --net host --name openp2p-client openp2pcn/openp2p-client:latest -token YOUR-TOKEN -node YOUR-NODE-NAME
|
||||
```
|
||||
17
app/README.md
Normal file
17
app/README.md
Normal file
@@ -0,0 +1,17 @@
|
||||
## Build
|
||||
```
|
||||
cd core
|
||||
go get -v golang.org/x/mobile/bind
|
||||
gomobile bind -target android -v
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "build error"
|
||||
exit 9
|
||||
fi
|
||||
echo "build ok"
|
||||
cp openp2p.aar openp2p-sources.jar ../app/app/libs
|
||||
echo "copy to APP libs"
|
||||
|
||||
cd ../app
|
||||
./gradlew build
|
||||
|
||||
```
|
||||
@@ -1,7 +1,9 @@
|
||||
package main
|
||||
|
||||
import openp2p "openp2p/core"
|
||||
import (
|
||||
core "openp2p/core"
|
||||
)
|
||||
|
||||
func main() {
|
||||
openp2p.Run()
|
||||
core.Run()
|
||||
}
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
package openp2p
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BandwidthLimiter ...
|
||||
type BandwidthLimiter struct {
|
||||
ts time.Time
|
||||
bw int // mbps
|
||||
freeBytes int // bytes
|
||||
maxFreeBytes int // bytes
|
||||
mtx sync.Mutex
|
||||
}
|
||||
|
||||
// mbps
|
||||
func newBandwidthLimiter(bw int) *BandwidthLimiter {
|
||||
return &BandwidthLimiter{
|
||||
bw: bw,
|
||||
ts: time.Now(),
|
||||
maxFreeBytes: bw * 1024 * 1024 / 8,
|
||||
freeBytes: bw * 1024 * 1024 / 8,
|
||||
}
|
||||
}
|
||||
|
||||
// Add ...
|
||||
func (bl *BandwidthLimiter) Add(bytes int) {
|
||||
if bl.bw <= 0 {
|
||||
return
|
||||
}
|
||||
bl.mtx.Lock()
|
||||
defer bl.mtx.Unlock()
|
||||
// calc free flow 1000*1000/1024/1024=0.954; 1024*1024/1000/1000=1.048
|
||||
bl.freeBytes += int(time.Since(bl.ts) * time.Duration(bl.bw) / 8 / 954)
|
||||
if bl.freeBytes > bl.maxFreeBytes {
|
||||
bl.freeBytes = bl.maxFreeBytes
|
||||
}
|
||||
bl.freeBytes -= bytes
|
||||
bl.ts = time.Now()
|
||||
if bl.freeBytes < 0 {
|
||||
// sleep for the overflow
|
||||
time.Sleep(time.Millisecond * time.Duration(-bl.freeBytes/(bl.bw*1048/8)))
|
||||
}
|
||||
}
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
const MinNodeNameLen = 8
|
||||
|
||||
func getmac(ip string) string {
|
||||
//get mac relative to the ip address which connected to the mq.
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return ""
|
||||
@@ -138,8 +137,7 @@ func netInfo() *NetInfo {
|
||||
continue
|
||||
}
|
||||
rsp := NetInfo{}
|
||||
err = json.Unmarshal(buf[:n], &rsp)
|
||||
if err != nil {
|
||||
if err = json.Unmarshal(buf[:n], &rsp); err != nil {
|
||||
gLog.Printf(LvERROR, "wrong NetInfo:%s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -3,8 +3,10 @@ package openp2p
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@@ -13,14 +15,16 @@ var gConf Config
|
||||
|
||||
type AppConfig struct {
|
||||
// required
|
||||
AppName string
|
||||
Protocol string
|
||||
SrcPort int
|
||||
PeerNode string
|
||||
DstPort int
|
||||
DstHost string
|
||||
PeerUser string
|
||||
Enabled int // default:1
|
||||
AppName string
|
||||
Protocol string
|
||||
Whitelist string
|
||||
SrcPort int
|
||||
PeerNode string
|
||||
DstPort int
|
||||
DstHost string
|
||||
PeerUser string
|
||||
RelayNode string
|
||||
Enabled int // default:1
|
||||
// runtime info
|
||||
peerVersion string
|
||||
peerToken uint64
|
||||
@@ -38,10 +42,13 @@ type AppConfig struct {
|
||||
connectTime time.Time
|
||||
fromToken uint64
|
||||
linkMode string
|
||||
isUnderlayServer int // TODO: bool?
|
||||
isUnderlayServer int
|
||||
}
|
||||
|
||||
func (c *AppConfig) ID() string {
|
||||
return fmt.Sprintf("%s%d", c.Protocol, c.SrcPort)
|
||||
}
|
||||
|
||||
// TODO: add loglevel, maxlogfilesize
|
||||
type Config struct {
|
||||
Network NetworkConfig `json:"network"`
|
||||
Apps []*AppConfig `json:"apps"`
|
||||
@@ -58,7 +65,18 @@ func (c *Config) switchApp(app AppConfig, enabled int) {
|
||||
c.Apps[i].Enabled = enabled
|
||||
c.Apps[i].retryNum = 0
|
||||
c.Apps[i].nextRetryTime = time.Now()
|
||||
return
|
||||
break
|
||||
}
|
||||
}
|
||||
c.save()
|
||||
}
|
||||
func (c *Config) retryApp(peerNode string) {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
for i := 0; i < len(c.Apps); i++ {
|
||||
if c.Apps[i].PeerNode == peerNode {
|
||||
c.Apps[i].retryNum = 0
|
||||
c.Apps[i].nextRetryTime = time.Now()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -66,6 +84,7 @@ func (c *Config) switchApp(app AppConfig, enabled int) {
|
||||
func (c *Config) add(app AppConfig, override bool) {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
defer c.save()
|
||||
if app.SrcPort == 0 || app.DstPort == 0 {
|
||||
gLog.Println(LvERROR, "invalid app ", app)
|
||||
return
|
||||
@@ -87,17 +106,19 @@ func (c *Config) delete(app AppConfig) {
|
||||
}
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
defer c.save()
|
||||
for i := 0; i < len(c.Apps); i++ {
|
||||
if c.Apps[i].Protocol == app.Protocol && c.Apps[i].SrcPort == app.SrcPort {
|
||||
c.Apps = append(c.Apps[:i], c.Apps[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (c *Config) save() {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
// c.mtx.Lock()
|
||||
// defer c.mtx.Unlock() // internal call
|
||||
data, _ := json.MarshalIndent(c, "", " ")
|
||||
err := ioutil.WriteFile("config.json", data, 0644)
|
||||
if err != nil {
|
||||
@@ -106,7 +127,7 @@ func (c *Config) save() {
|
||||
}
|
||||
|
||||
func init() {
|
||||
gConf.LogLevel = 1
|
||||
gConf.LogLevel = int(LvINFO)
|
||||
gConf.Network.ShareBandwidth = 10
|
||||
gConf.Network.ServerHost = "api.openp2p.cn"
|
||||
gConf.Network.ServerPort = WsPort
|
||||
@@ -128,26 +149,43 @@ func (c *Config) load() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// deal with multi-thread r/w
|
||||
func (c *Config) setToken(token uint64) {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
c.Network.Token = token
|
||||
defer c.save()
|
||||
if token != 0 {
|
||||
c.Network.Token = token
|
||||
}
|
||||
}
|
||||
func (c *Config) setUser(user string) {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
defer c.save()
|
||||
c.Network.User = user
|
||||
}
|
||||
func (c *Config) setNode(node string) {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
defer c.save()
|
||||
c.Network.Node = node
|
||||
}
|
||||
func (c *Config) setShareBandwidth(bw int) {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
defer c.save()
|
||||
c.Network.ShareBandwidth = bw
|
||||
}
|
||||
func (c *Config) setIPv6(v6 string) {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
c.Network.publicIPv6 = v6
|
||||
}
|
||||
func (c *Config) IPv6() string {
|
||||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
return c.Network.publicIPv6
|
||||
}
|
||||
|
||||
type NetworkConfig struct {
|
||||
// local info
|
||||
@@ -180,16 +218,18 @@ func parseParams(subCommand string) {
|
||||
node := fset.String("node", "", "node name. 8-31 characters. if not set, it will be hostname")
|
||||
peerNode := fset.String("peernode", "", "peer node name that you want to connect")
|
||||
dstIP := fset.String("dstip", "127.0.0.1", "destination ip ")
|
||||
whiteList := fset.String("whitelist", "", "whitelist for p2pApp ")
|
||||
dstPort := fset.Int("dstport", 0, "destination port ")
|
||||
srcPort := fset.Int("srcport", 0, "source port ")
|
||||
tcpPort := fset.Int("tcpport", 0, "tcp port for upnp or publicip")
|
||||
protocol := fset.String("protocol", "tcp", "tcp or udp")
|
||||
appName := fset.String("appname", "", "app name")
|
||||
relayNode := fset.String("relaynode", "", "relaynode")
|
||||
shareBandwidth := fset.Int("sharebandwidth", 10, "N mbps share bandwidth limit, private network no limit")
|
||||
daemonMode := fset.Bool("d", false, "daemonMode")
|
||||
notVerbose := fset.Bool("nv", false, "not log console")
|
||||
newconfig := fset.Bool("newconfig", false, "not load existing config.json")
|
||||
logLevel := fset.Int("loglevel", 0, "0:info 1:warn 2:error 3:debug")
|
||||
logLevel := fset.Int("loglevel", 1, "0:debug 1:info 2:warn 3:error")
|
||||
if subCommand == "" { // no subcommand
|
||||
fset.Parse(os.Args[1:])
|
||||
} else {
|
||||
@@ -199,10 +239,12 @@ func parseParams(subCommand string) {
|
||||
config := AppConfig{Enabled: 1}
|
||||
config.PeerNode = *peerNode
|
||||
config.DstHost = *dstIP
|
||||
config.Whitelist = *whiteList
|
||||
config.DstPort = *dstPort
|
||||
config.SrcPort = *srcPort
|
||||
config.Protocol = *protocol
|
||||
config.AppName = *appName
|
||||
config.RelayNode = *relayNode
|
||||
if !*newconfig {
|
||||
gConf.load() // load old config. otherwise will clear all apps
|
||||
}
|
||||
@@ -229,20 +271,20 @@ func parseParams(subCommand string) {
|
||||
gConf.Network.TCPPort = *tcpPort
|
||||
}
|
||||
if f.Name == "token" {
|
||||
gConf.Network.Token = *token
|
||||
gConf.setToken(*token)
|
||||
}
|
||||
})
|
||||
|
||||
// set default value
|
||||
if gConf.Network.ServerHost == "" {
|
||||
gConf.Network.ServerHost = *serverHost
|
||||
}
|
||||
if *node != "" {
|
||||
if len(*node) < MinNodeNameLen {
|
||||
gLog.Println(LvERROR, ErrNodeTooShort)
|
||||
os.Exit(9)
|
||||
}
|
||||
gConf.Network.Node = *node
|
||||
} else {
|
||||
envNode := os.Getenv("OPENP2P_NODE")
|
||||
if envNode != "" {
|
||||
gConf.Network.Node = envNode
|
||||
}
|
||||
if gConf.Network.Node == "" { // if node name not set. use os.Hostname
|
||||
gConf.Network.Node = defaultNodeName()
|
||||
}
|
||||
@@ -254,7 +296,14 @@ func parseParams(subCommand string) {
|
||||
}
|
||||
gConf.Network.TCPPort = *tcpPort
|
||||
}
|
||||
|
||||
if *token == 0 {
|
||||
envToken := os.Getenv("OPENP2P_TOKEN")
|
||||
if envToken != "" {
|
||||
if n, err := strconv.ParseUint(envToken, 10, 64); n != 0 && err == nil {
|
||||
gConf.setToken(n)
|
||||
}
|
||||
}
|
||||
}
|
||||
gConf.Network.ServerPort = *serverPort
|
||||
gConf.Network.UDPPort1 = UDPPort1
|
||||
gConf.Network.UDPPort2 = UDPPort2
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/kardianos/service"
|
||||
"github.com/openp2p-cn/service"
|
||||
)
|
||||
|
||||
type daemon struct {
|
||||
@@ -44,9 +44,9 @@ func (d *daemon) run() {
|
||||
}
|
||||
gLog.Println(LvINFO, mydir)
|
||||
conf := &service.Config{
|
||||
Name: ProducnName,
|
||||
DisplayName: ProducnName,
|
||||
Description: ProducnName,
|
||||
Name: ProductName,
|
||||
DisplayName: ProductName,
|
||||
Description: ProductName,
|
||||
Executable: binPath,
|
||||
}
|
||||
|
||||
@@ -95,9 +95,9 @@ func (d *daemon) run() {
|
||||
|
||||
func (d *daemon) Control(ctrlComm string, exeAbsPath string, args []string) error {
|
||||
svcConfig := &service.Config{
|
||||
Name: ProducnName,
|
||||
DisplayName: ProducnName,
|
||||
Description: ProducnName,
|
||||
Name: ProductName,
|
||||
DisplayName: ProductName,
|
||||
Description: ProductName,
|
||||
Executable: exeAbsPath,
|
||||
Arguments: args,
|
||||
}
|
||||
|
||||
@@ -8,12 +8,21 @@ import (
|
||||
var (
|
||||
// ErrorS2S string = "s2s is not supported"
|
||||
// ErrorHandshake string = "handshake error"
|
||||
ErrorS2S = errors.New("s2s is not supported")
|
||||
ErrorHandshake = errors.New("handshake error")
|
||||
ErrorNewUser = errors.New("new user")
|
||||
ErrorLogin = errors.New("user or password not correct")
|
||||
ErrNodeTooShort = errors.New("node name too short, it must >=8 charaters")
|
||||
ErrPeerOffline = errors.New("peer offline")
|
||||
ErrMsgFormat = errors.New("message format wrong")
|
||||
ErrVersionNotCompatible = errors.New("version not compatible")
|
||||
ErrorS2S = errors.New("s2s is not supported")
|
||||
ErrorHandshake = errors.New("handshake error")
|
||||
ErrorNewUser = errors.New("new user")
|
||||
ErrorLogin = errors.New("user or password not correct")
|
||||
ErrNodeTooShort = errors.New("node name too short, it must >=8 charaters")
|
||||
ErrReadDB = errors.New("read db error")
|
||||
ErrNoUpdate = errors.New("there are currently no updates available")
|
||||
ErrPeerOffline = errors.New("peer offline")
|
||||
ErrNetwork = errors.New("network error")
|
||||
ErrMsgFormat = errors.New("message format wrong")
|
||||
ErrVersionNotCompatible = errors.New("version not compatible")
|
||||
ErrOverlayConnDisconnect = errors.New("overlay connection is disconnected")
|
||||
ErrConnectRelayNode = errors.New("connect relay node error")
|
||||
ErrConnectPublicV4 = errors.New("connect public ipv4 error")
|
||||
ErrMsgChannelNotFound = errors.New("message channel not found")
|
||||
ErrRelayTunnelNotFound = errors.New("relay tunnel not found")
|
||||
ErrSymmetricLimit = errors.New("symmetric limit")
|
||||
)
|
||||
|
||||
@@ -6,9 +6,11 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/openp2p-cn/totp"
|
||||
)
|
||||
|
||||
func handlePush(pn *P2PNetwork, subType uint16, msg []byte) error {
|
||||
@@ -19,63 +21,11 @@ func handlePush(pn *P2PNetwork, subType uint16, msg []byte) error {
|
||||
}
|
||||
gLog.Printf(LvDEBUG, "handle push msg type:%d, push header:%+v", subType, pushHead)
|
||||
switch subType {
|
||||
case MsgPushConnectReq: // TODO: handle a msg move to a new function
|
||||
req := PushConnectReq{}
|
||||
err := json.Unmarshal(msg[openP2PHeaderSize+PushHeaderSize:], &req)
|
||||
if err != nil {
|
||||
gLog.Printf(LvERROR, "wrong MsgPushConnectReq:%s", err)
|
||||
return err
|
||||
}
|
||||
gLog.Printf(LvINFO, "%s is connecting...", req.From)
|
||||
gLog.Println(LvDEBUG, "push connect response to ", req.From)
|
||||
if compareVersion(req.Version, LeastSupportVersion) == LESS {
|
||||
gLog.Println(LvERROR, ErrVersionNotCompatible.Error(), ":", req.From)
|
||||
rsp := PushConnectRsp{
|
||||
Error: 10,
|
||||
Detail: ErrVersionNotCompatible.Error(),
|
||||
To: req.From,
|
||||
From: pn.config.Node,
|
||||
}
|
||||
pn.push(req.From, MsgPushConnectRsp, rsp)
|
||||
return ErrVersionNotCompatible
|
||||
}
|
||||
// verify totp token or token
|
||||
if VerifyTOTP(req.Token, pn.config.Token, time.Now().Unix()+(pn.serverTs-pn.localTs)) || // localTs may behind, auto adjust ts
|
||||
VerifyTOTP(req.Token, pn.config.Token, time.Now().Unix()) {
|
||||
gLog.Printf(LvINFO, "Access Granted\n")
|
||||
config := AppConfig{}
|
||||
config.peerNatType = req.NatType
|
||||
config.peerConeNatPort = req.ConeNatPort
|
||||
config.peerIP = req.FromIP
|
||||
config.PeerNode = req.From
|
||||
config.peerVersion = req.Version
|
||||
config.fromToken = req.Token
|
||||
config.peerIPv6 = req.IPv6
|
||||
config.hasIPv4 = req.HasIPv4
|
||||
config.hasUPNPorNATPMP = req.HasUPNPorNATPMP
|
||||
config.linkMode = req.LinkMode
|
||||
config.isUnderlayServer = req.IsUnderlayServer
|
||||
// share relay node will limit bandwidth
|
||||
if req.Token != pn.config.Token {
|
||||
gLog.Printf(LvINFO, "set share bandwidth %d mbps", pn.config.ShareBandwidth)
|
||||
config.shareBandwidth = pn.config.ShareBandwidth
|
||||
}
|
||||
// go pn.AddTunnel(config, req.ID)
|
||||
go pn.addDirectTunnel(config, req.ID)
|
||||
break
|
||||
}
|
||||
gLog.Println(LvERROR, "Access Denied:", req.From)
|
||||
rsp := PushConnectRsp{
|
||||
Error: 1,
|
||||
Detail: fmt.Sprintf("connect to %s error: Access Denied", pn.config.Node),
|
||||
To: req.From,
|
||||
From: pn.config.Node,
|
||||
}
|
||||
pn.push(req.From, MsgPushConnectRsp, rsp)
|
||||
case MsgPushConnectReq:
|
||||
err = handleConnectReq(pn, subType, msg)
|
||||
case MsgPushRsp:
|
||||
rsp := PushRsp{}
|
||||
err := json.Unmarshal(msg[openP2PHeaderSize:], &rsp)
|
||||
if err != nil {
|
||||
if err = json.Unmarshal(msg[openP2PHeaderSize:], &rsp); err != nil {
|
||||
gLog.Printf(LvERROR, "wrong pushRsp:%s", err)
|
||||
return err
|
||||
}
|
||||
@@ -86,9 +36,8 @@ func handlePush(pn *P2PNetwork, subType uint16, msg []byte) error {
|
||||
}
|
||||
case MsgPushAddRelayTunnelReq:
|
||||
req := AddRelayTunnelReq{}
|
||||
err := json.Unmarshal(msg[openP2PHeaderSize+PushHeaderSize:], &req)
|
||||
if err != nil {
|
||||
gLog.Printf(LvERROR, "wrong RelayNodeRsp:%s", err)
|
||||
if err = json.Unmarshal(msg[openP2PHeaderSize+PushHeaderSize:], &req); err != nil {
|
||||
gLog.Printf(LvERROR, "wrong %v:%s", reflect.TypeOf(req), err)
|
||||
return err
|
||||
}
|
||||
config := AppConfig{}
|
||||
@@ -100,29 +49,20 @@ func handlePush(pn *P2PNetwork, subType uint16, msg []byte) error {
|
||||
// notify peer relay ready
|
||||
msg := TunnelMsg{ID: t.id}
|
||||
pn.push(r.From, MsgPushAddRelayTunnelRsp, msg)
|
||||
} else {
|
||||
pn.push(r.From, MsgPushAddRelayTunnelRsp, "error") // compatible with old version client, trigger unmarshal error
|
||||
}
|
||||
|
||||
}(req)
|
||||
case MsgPushAPPKey:
|
||||
req := APPKeySync{}
|
||||
err := json.Unmarshal(msg[openP2PHeaderSize+PushHeaderSize:], &req)
|
||||
if err != nil {
|
||||
gLog.Printf(LvERROR, "wrong APPKeySync:%s", err)
|
||||
if err = json.Unmarshal(msg[openP2PHeaderSize+PushHeaderSize:], &req); err != nil {
|
||||
gLog.Printf(LvERROR, "wrong %v:%s", reflect.TypeOf(req), err)
|
||||
return err
|
||||
}
|
||||
SaveKey(req.AppID, req.AppKey)
|
||||
case MsgPushUpdate:
|
||||
gLog.Println(LvINFO, "MsgPushUpdate")
|
||||
update(pn.config.ServerHost, pn.config.ServerPort) // download new version first, then exec ./openp2p update
|
||||
targetPath := filepath.Join(defaultInstallPath, defaultBinName)
|
||||
args := []string{"update"}
|
||||
env := os.Environ()
|
||||
cmd := exec.Command(targetPath, args...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Env = env
|
||||
err := cmd.Run()
|
||||
err := update(pn.config.ServerHost, pn.config.ServerPort)
|
||||
if err == nil {
|
||||
os.Exit(0)
|
||||
}
|
||||
@@ -132,137 +72,26 @@ func handlePush(pn *P2PNetwork, subType uint16, msg []byte) error {
|
||||
os.Exit(0)
|
||||
return err
|
||||
case MsgPushReportApps:
|
||||
gLog.Println(LvINFO, "MsgPushReportApps")
|
||||
req := ReportApps{}
|
||||
gConf.mtx.Lock()
|
||||
defer gConf.mtx.Unlock()
|
||||
for _, config := range gConf.Apps {
|
||||
appActive := 0
|
||||
relayNode := ""
|
||||
relayMode := ""
|
||||
linkMode := LinkModeUDPPunch
|
||||
i, ok := pn.apps.Load(fmt.Sprintf("%s%d", config.Protocol, config.SrcPort))
|
||||
if ok {
|
||||
app := i.(*p2pApp)
|
||||
if app.isActive() {
|
||||
appActive = 1
|
||||
}
|
||||
relayNode = app.relayNode
|
||||
relayMode = app.relayMode
|
||||
linkMode = app.tunnel.linkModeWeb
|
||||
}
|
||||
appInfo := AppInfo{
|
||||
AppName: config.AppName,
|
||||
Error: config.errMsg,
|
||||
Protocol: config.Protocol,
|
||||
SrcPort: config.SrcPort,
|
||||
RelayNode: relayNode,
|
||||
RelayMode: relayMode,
|
||||
LinkMode: linkMode,
|
||||
PeerNode: config.PeerNode,
|
||||
DstHost: config.DstHost,
|
||||
DstPort: config.DstPort,
|
||||
PeerUser: config.PeerUser,
|
||||
PeerIP: config.peerIP,
|
||||
PeerNatType: config.peerNatType,
|
||||
RetryTime: config.retryTime.Local().Format("2006-01-02T15:04:05-0700"),
|
||||
ConnectTime: config.connectTime.Local().Format("2006-01-02T15:04:05-0700"),
|
||||
IsActive: appActive,
|
||||
Enabled: config.Enabled,
|
||||
}
|
||||
req.Apps = append(req.Apps, appInfo)
|
||||
}
|
||||
pn.write(MsgReport, MsgReportApps, &req)
|
||||
err = handleReportApps(pn, subType, msg)
|
||||
case MsgPushReportLog:
|
||||
gLog.Println(LvINFO, "MsgPushReportLog")
|
||||
req := ReportLogReq{}
|
||||
err := json.Unmarshal(msg[openP2PHeaderSize:], &req)
|
||||
if err != nil {
|
||||
gLog.Printf(LvERROR, "wrong MsgPushReportLog:%s %s", err, string(msg[openP2PHeaderSize:]))
|
||||
return err
|
||||
}
|
||||
if req.FileName == "" {
|
||||
req.FileName = "openp2p.log"
|
||||
}
|
||||
f, err := os.Open(filepath.Join("log", req.FileName))
|
||||
if err != nil {
|
||||
gLog.Println(LvERROR, "read log file error:", err)
|
||||
break
|
||||
}
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if req.Offset == 0 && fi.Size() > 4096 {
|
||||
req.Offset = fi.Size() - 4096
|
||||
}
|
||||
if req.Len <= 0 {
|
||||
req.Len = 4096
|
||||
}
|
||||
f.Seek(req.Offset, 0)
|
||||
if req.Len > 1024*1024 { // too large
|
||||
break
|
||||
}
|
||||
buff := make([]byte, req.Len)
|
||||
readLength, err := f.Read(buff)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
gLog.Println(LvERROR, "read log content error:", err)
|
||||
break
|
||||
}
|
||||
rsp := ReportLogRsp{}
|
||||
rsp.Content = string(buff[:readLength])
|
||||
rsp.FileName = req.FileName
|
||||
rsp.Total = fi.Size()
|
||||
rsp.Len = req.Len
|
||||
pn.write(MsgReport, MsgPushReportLog, &rsp)
|
||||
err = handleLog(pn, subType, msg)
|
||||
case MsgPushEditApp:
|
||||
gLog.Println(LvINFO, "MsgPushEditApp")
|
||||
newApp := AppInfo{}
|
||||
err := json.Unmarshal(msg[openP2PHeaderSize:], &newApp)
|
||||
if err != nil {
|
||||
gLog.Printf(LvERROR, "wrong MsgPushEditApp:%s %s", err, string(msg[openP2PHeaderSize:]))
|
||||
return err
|
||||
}
|
||||
oldConf := AppConfig{Enabled: 1}
|
||||
// protocol0+srcPort0 exist, delApp
|
||||
oldConf.AppName = newApp.AppName
|
||||
oldConf.Protocol = newApp.Protocol0
|
||||
oldConf.SrcPort = newApp.SrcPort0
|
||||
oldConf.PeerNode = newApp.PeerNode
|
||||
oldConf.DstHost = newApp.DstHost
|
||||
oldConf.DstPort = newApp.DstPort
|
||||
|
||||
gConf.delete(oldConf)
|
||||
// AddApp
|
||||
newConf := oldConf
|
||||
newConf.Protocol = newApp.Protocol
|
||||
newConf.SrcPort = newApp.SrcPort
|
||||
gConf.add(newConf, false)
|
||||
gConf.save() // save quickly for the next request reportApplist
|
||||
pn.DeleteApp(oldConf) // DeleteApp may cost some times, execute at the end
|
||||
// autoReconnect will auto AddApp
|
||||
// pn.AddApp(config)
|
||||
// TODO: report result
|
||||
err = handleEditApp(pn, subType, msg)
|
||||
case MsgPushEditNode:
|
||||
gLog.Println(LvINFO, "MsgPushEditNode")
|
||||
req := EditNode{}
|
||||
err := json.Unmarshal(msg[openP2PHeaderSize:], &req)
|
||||
if err != nil {
|
||||
gLog.Printf(LvERROR, "wrong MsgPushEditNode:%s %s", err, string(msg[openP2PHeaderSize:]))
|
||||
if err = json.Unmarshal(msg[openP2PHeaderSize:], &req); err != nil {
|
||||
gLog.Printf(LvERROR, "wrong %v:%s %s", reflect.TypeOf(req), err, string(msg[openP2PHeaderSize:]))
|
||||
return err
|
||||
}
|
||||
gConf.setNode(req.NewName)
|
||||
gConf.setShareBandwidth(req.Bandwidth)
|
||||
gConf.save()
|
||||
// TODO: hot reload
|
||||
os.Exit(0)
|
||||
case MsgPushSwitchApp:
|
||||
gLog.Println(LvINFO, "MsgPushSwitchApp")
|
||||
app := AppInfo{}
|
||||
err := json.Unmarshal(msg[openP2PHeaderSize:], &app)
|
||||
if err != nil {
|
||||
gLog.Printf(LvERROR, "wrong MsgPushSwitchApp:%s %s", err, string(msg[openP2PHeaderSize:]))
|
||||
if err = json.Unmarshal(msg[openP2PHeaderSize:], &app); err != nil {
|
||||
gLog.Printf(LvERROR, "wrong %v:%s %s", reflect.TypeOf(app), err, string(msg[openP2PHeaderSize:]))
|
||||
return err
|
||||
}
|
||||
config := AppConfig{Enabled: app.Enabled, SrcPort: app.SrcPort, Protocol: app.Protocol}
|
||||
@@ -272,11 +101,196 @@ func handlePush(pn *P2PNetwork, subType uint16, msg []byte) error {
|
||||
// disable APP
|
||||
pn.DeleteApp(config)
|
||||
}
|
||||
case MsgPushDstNodeOnline:
|
||||
gLog.Println(LvINFO, "MsgPushDstNodeOnline")
|
||||
req := PushDstNodeOnline{}
|
||||
if err = json.Unmarshal(msg[openP2PHeaderSize:], &req); err != nil {
|
||||
gLog.Printf(LvERROR, "wrong %v:%s %s", reflect.TypeOf(req), err, string(msg[openP2PHeaderSize:]))
|
||||
return err
|
||||
}
|
||||
gLog.Println(LvINFO, "retry peerNode ", req.Node)
|
||||
gConf.retryApp(req.Node)
|
||||
default:
|
||||
pn.msgMapMtx.Lock()
|
||||
ch := pn.msgMap[pushHead.From]
|
||||
pn.msgMapMtx.Unlock()
|
||||
ch <- msg
|
||||
i, ok := pn.msgMap.Load(pushHead.From)
|
||||
if !ok {
|
||||
return ErrMsgChannelNotFound
|
||||
}
|
||||
ch := i.(chan msgCtx)
|
||||
ch <- msgCtx{data: msg, ts: time.Now()}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func handleEditApp(pn *P2PNetwork, subType uint16, msg []byte) (err error) {
|
||||
gLog.Println(LvINFO, "MsgPushEditApp")
|
||||
newApp := AppInfo{}
|
||||
if err = json.Unmarshal(msg[openP2PHeaderSize:], &newApp); err != nil {
|
||||
gLog.Printf(LvERROR, "wrong %v:%s %s", reflect.TypeOf(newApp), err, string(msg[openP2PHeaderSize:]))
|
||||
return err
|
||||
}
|
||||
oldConf := AppConfig{Enabled: 1}
|
||||
// protocol0+srcPort0 exist, delApp
|
||||
oldConf.AppName = newApp.AppName
|
||||
oldConf.Protocol = newApp.Protocol0
|
||||
oldConf.Whitelist = newApp.Whitelist
|
||||
oldConf.SrcPort = newApp.SrcPort0
|
||||
oldConf.PeerNode = newApp.PeerNode
|
||||
oldConf.DstHost = newApp.DstHost
|
||||
oldConf.DstPort = newApp.DstPort
|
||||
|
||||
gConf.delete(oldConf)
|
||||
// AddApp
|
||||
newConf := oldConf
|
||||
newConf.Protocol = newApp.Protocol
|
||||
newConf.SrcPort = newApp.SrcPort
|
||||
gConf.add(newConf, false)
|
||||
pn.DeleteApp(oldConf) // DeleteApp may cost some times, execute at the end
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleConnectReq(pn *P2PNetwork, subType uint16, msg []byte) (err error) {
|
||||
req := PushConnectReq{}
|
||||
if err = json.Unmarshal(msg[openP2PHeaderSize+PushHeaderSize:], &req); err != nil {
|
||||
gLog.Printf(LvERROR, "wrong %v:%s", reflect.TypeOf(req), err)
|
||||
return err
|
||||
}
|
||||
gLog.Printf(LvDEBUG, "%s is connecting...", req.From)
|
||||
gLog.Println(LvDEBUG, "push connect response to ", req.From)
|
||||
if compareVersion(req.Version, LeastSupportVersion) == LESS {
|
||||
gLog.Println(LvERROR, ErrVersionNotCompatible.Error(), ":", req.From)
|
||||
rsp := PushConnectRsp{
|
||||
Error: 10,
|
||||
Detail: ErrVersionNotCompatible.Error(),
|
||||
To: req.From,
|
||||
From: pn.config.Node,
|
||||
}
|
||||
pn.push(req.From, MsgPushConnectRsp, rsp)
|
||||
return ErrVersionNotCompatible
|
||||
}
|
||||
// verify totp token or token
|
||||
t := totp.TOTP{Step: totp.RelayTOTPStep}
|
||||
if t.Verify(req.Token, pn.config.Token, time.Now().Unix()-pn.dt/int64(time.Second)) { // localTs may behind, auto adjust ts
|
||||
gLog.Printf(LvINFO, "Access Granted\n")
|
||||
config := AppConfig{}
|
||||
config.peerNatType = req.NatType
|
||||
config.peerConeNatPort = req.ConeNatPort
|
||||
config.peerIP = req.FromIP
|
||||
config.PeerNode = req.From
|
||||
config.peerVersion = req.Version
|
||||
config.fromToken = req.Token
|
||||
config.peerIPv6 = req.IPv6
|
||||
config.hasIPv4 = req.HasIPv4
|
||||
config.hasUPNPorNATPMP = req.HasUPNPorNATPMP
|
||||
config.linkMode = req.LinkMode
|
||||
config.isUnderlayServer = req.IsUnderlayServer
|
||||
// share relay node will limit bandwidth
|
||||
if req.Token != pn.config.Token {
|
||||
gLog.Printf(LvINFO, "set share bandwidth %d mbps", pn.config.ShareBandwidth)
|
||||
config.shareBandwidth = pn.config.ShareBandwidth
|
||||
}
|
||||
// go pn.AddTunnel(config, req.ID)
|
||||
go pn.addDirectTunnel(config, req.ID)
|
||||
return nil
|
||||
}
|
||||
gLog.Println(LvERROR, "Access Denied:", req.From)
|
||||
rsp := PushConnectRsp{
|
||||
Error: 1,
|
||||
Detail: fmt.Sprintf("connect to %s error: Access Denied", pn.config.Node),
|
||||
To: req.From,
|
||||
From: pn.config.Node,
|
||||
}
|
||||
return pn.push(req.From, MsgPushConnectRsp, rsp)
|
||||
}
|
||||
|
||||
func handleReportApps(pn *P2PNetwork, subType uint16, msg []byte) (err error) {
|
||||
gLog.Println(LvINFO, "MsgPushReportApps")
|
||||
req := ReportApps{}
|
||||
gConf.mtx.Lock()
|
||||
defer gConf.mtx.Unlock()
|
||||
for _, config := range gConf.Apps {
|
||||
appActive := 0
|
||||
relayNode := ""
|
||||
relayMode := ""
|
||||
linkMode := LinkModeUDPPunch
|
||||
i, ok := pn.apps.Load(config.ID())
|
||||
if ok {
|
||||
app := i.(*p2pApp)
|
||||
if app.isActive() {
|
||||
appActive = 1
|
||||
}
|
||||
relayNode = app.relayNode
|
||||
relayMode = app.relayMode
|
||||
linkMode = app.tunnel.linkModeWeb
|
||||
}
|
||||
appInfo := AppInfo{
|
||||
AppName: config.AppName,
|
||||
Error: config.errMsg,
|
||||
Protocol: config.Protocol,
|
||||
Whitelist: config.Whitelist,
|
||||
SrcPort: config.SrcPort,
|
||||
RelayNode: relayNode,
|
||||
RelayMode: relayMode,
|
||||
LinkMode: linkMode,
|
||||
PeerNode: config.PeerNode,
|
||||
DstHost: config.DstHost,
|
||||
DstPort: config.DstPort,
|
||||
PeerUser: config.PeerUser,
|
||||
PeerIP: config.peerIP,
|
||||
PeerNatType: config.peerNatType,
|
||||
RetryTime: config.retryTime.Local().Format("2006-01-02T15:04:05-0700"),
|
||||
ConnectTime: config.connectTime.Local().Format("2006-01-02T15:04:05-0700"),
|
||||
IsActive: appActive,
|
||||
Enabled: config.Enabled,
|
||||
}
|
||||
req.Apps = append(req.Apps, appInfo)
|
||||
}
|
||||
return pn.write(MsgReport, MsgReportApps, &req)
|
||||
}
|
||||
|
||||
func handleLog(pn *P2PNetwork, subType uint16, msg []byte) (err error) {
|
||||
gLog.Println(LvDEBUG, "MsgPushReportLog")
|
||||
const defaultLen = 1024 * 128
|
||||
const maxLen = 1024 * 1024
|
||||
req := ReportLogReq{}
|
||||
if err = json.Unmarshal(msg[openP2PHeaderSize:], &req); err != nil {
|
||||
gLog.Printf(LvERROR, "wrong %v:%s %s", reflect.TypeOf(req), err, string(msg[openP2PHeaderSize:]))
|
||||
return err
|
||||
}
|
||||
if req.FileName == "" {
|
||||
req.FileName = "openp2p.log"
|
||||
}
|
||||
f, err := os.Open(filepath.Join("log", req.FileName))
|
||||
if err != nil {
|
||||
gLog.Println(LvERROR, "read log file error:", err)
|
||||
return err
|
||||
}
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if req.Offset > fi.Size() {
|
||||
req.Offset = fi.Size() - defaultLen
|
||||
}
|
||||
// verify input parameters
|
||||
if req.Offset < 0 {
|
||||
req.Offset = 0
|
||||
}
|
||||
if req.Len <= 0 || req.Len > maxLen {
|
||||
req.Len = defaultLen
|
||||
}
|
||||
|
||||
f.Seek(req.Offset, 0)
|
||||
buff := make([]byte, req.Len)
|
||||
readLength, err := f.Read(buff)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
gLog.Println(LvERROR, "read log content error:", err)
|
||||
return err
|
||||
}
|
||||
rsp := ReportLogRsp{}
|
||||
rsp.Content = string(buff[:readLength])
|
||||
rsp.FileName = req.FileName
|
||||
rsp.Total = fi.Size()
|
||||
rsp.Len = req.Len
|
||||
return pn.write(MsgReport, MsgPushReportLog, &rsp)
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -23,33 +22,21 @@ func handshakeC2C(t *P2PTunnel) (err error) {
|
||||
gLog.Println(LvDEBUG, "handshakeC2C write MsgPunchHandshake error:", err)
|
||||
return err
|
||||
}
|
||||
ra, head, _, _, err := UDPRead(conn, 5000)
|
||||
ra, head, _, _, err := UDPRead(conn, HandshakeTimeout)
|
||||
if err != nil {
|
||||
time.Sleep(time.Millisecond * 200)
|
||||
gLog.Println(LvDEBUG, err, ", return this error when ip was not reachable, retry read")
|
||||
ra, head, _, _, err = UDPRead(conn, 5000)
|
||||
if err != nil {
|
||||
gLog.Println(LvDEBUG, "handshakeC2C read MsgPunchHandshake error:", err)
|
||||
return err
|
||||
}
|
||||
gLog.Println(LvDEBUG, "handshakeC2C read MsgPunchHandshake error:", err)
|
||||
return err
|
||||
}
|
||||
t.ra, _ = net.ResolveUDPAddr("udp", ra.String())
|
||||
// cone server side
|
||||
if head.MainType == MsgP2P && head.SubType == MsgPunchHandshake {
|
||||
gLog.Printf(LvDEBUG, "read %d handshake ", t.id)
|
||||
UDPWrite(conn, t.ra, MsgP2P, MsgPunchHandshakeAck, P2PHandshakeReq{ID: t.id})
|
||||
_, head, _, _, err = UDPRead(conn, 5000)
|
||||
_, head, _, _, err = UDPRead(conn, HandshakeTimeout)
|
||||
if err != nil {
|
||||
gLog.Println(LvDEBUG, "handshakeC2C write MsgPunchHandshakeAck error", err)
|
||||
return err
|
||||
}
|
||||
if head.MainType == MsgP2P && head.SubType == MsgPunchHandshakeAck {
|
||||
gLog.Printf(LvDEBUG, "read %d handshake ack ", t.id)
|
||||
gLog.Printf(LvINFO, "handshakeC2C ok")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// cone client side will only read handshake ack
|
||||
if head.MainType == MsgP2P && head.SubType == MsgPunchHandshakeAck {
|
||||
gLog.Printf(LvDEBUG, "read %d handshake ack ", t.id)
|
||||
_, err = UDPWrite(conn, t.ra, MsgP2P, MsgPunchHandshakeAck, P2PHandshakeReq{ID: t.id})
|
||||
@@ -65,8 +52,7 @@ func handshakeC2C(t *P2PTunnel) (err error) {
|
||||
func handshakeC2S(t *P2PTunnel) error {
|
||||
gLog.Printf(LvDEBUG, "handshakeC2S start")
|
||||
defer gLog.Printf(LvDEBUG, "handshakeC2S end")
|
||||
// even if read timeout, continue handshake
|
||||
t.pn.read(t.config.PeerNode, MsgPush, MsgPushHandshakeStart, SymmetricHandshakeAckTimeout)
|
||||
startTime := time.Now()
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
randPorts := r.Perm(65532)
|
||||
conn, err := net.ListenUDP("udp", t.la)
|
||||
@@ -74,11 +60,11 @@ func handshakeC2S(t *P2PTunnel) error {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
go func() error {
|
||||
gLog.Printf(LvDEBUG, "send symmetric handshake to %s from %d:%d start", t.config.peerIP, t.coneLocalPort, t.coneNatPort)
|
||||
for i := 0; i < SymmetricHandshakeNum; i++ {
|
||||
// TODO: auto calc cost time
|
||||
time.Sleep(SymmetricHandshakeInterval)
|
||||
// time.Sleep(SymmetricHandshakeInterval)
|
||||
dst, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", t.config.peerIP, randPorts[i]+2))
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -92,8 +78,7 @@ func handshakeC2S(t *P2PTunnel) error {
|
||||
gLog.Println(LvDEBUG, "send symmetric handshake end")
|
||||
return nil
|
||||
}()
|
||||
deadline := time.Now().Add(SymmetricHandshakeAckTimeout)
|
||||
err = conn.SetReadDeadline(deadline)
|
||||
err = conn.SetReadDeadline(time.Now().Add(HandshakeTimeout))
|
||||
if err != nil {
|
||||
gLog.Println(LvERROR, "SymmetricHandshakeAckTimeout SetReadDeadline error")
|
||||
return err
|
||||
@@ -112,73 +97,102 @@ func handshakeC2S(t *P2PTunnel) error {
|
||||
return err
|
||||
}
|
||||
t.ra, _ = net.ResolveUDPAddr("udp", dst.String())
|
||||
if head.MainType == MsgP2P && head.SubType == MsgPunchHandshakeAck {
|
||||
gLog.Printf(LvDEBUG, "handshakeC2S read %d handshake ack %s", t.id, dst.String())
|
||||
_, err = UDPWrite(conn, dst, MsgP2P, MsgPunchHandshakeAck, P2PHandshakeReq{ID: t.id})
|
||||
return err
|
||||
if head.MainType == MsgP2P && head.SubType == MsgPunchHandshake {
|
||||
gLog.Printf(LvDEBUG, "handshakeC2S read %d handshake ", t.id)
|
||||
UDPWrite(conn, t.ra, MsgP2P, MsgPunchHandshakeAck, P2PHandshakeReq{ID: t.id})
|
||||
for {
|
||||
_, head, _, _, err = UDPRead(conn, HandshakeTimeout)
|
||||
if err != nil {
|
||||
gLog.Println(LvDEBUG, "handshakeC2S handshake error")
|
||||
return err
|
||||
}
|
||||
// waiting ack
|
||||
if head.MainType == MsgP2P && head.SubType == MsgPunchHandshakeAck {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
gLog.Printf(LvINFO, "handshakeC2S ok")
|
||||
if head.MainType == MsgP2P && head.SubType == MsgPunchHandshakeAck {
|
||||
gLog.Printf(LvDEBUG, "handshakeC2S read %d handshake ack %s", t.id, t.ra.String())
|
||||
_, err = UDPWrite(conn, t.ra, MsgP2P, MsgPunchHandshakeAck, P2PHandshakeReq{ID: t.id})
|
||||
return err
|
||||
} else {
|
||||
gLog.Println(LvDEBUG, "handshakeS2C read msg but not MsgPunchHandshakeAck")
|
||||
}
|
||||
gLog.Printf(LvINFO, "handshakeC2S ok. cost %d ms", time.Since(startTime)/time.Millisecond)
|
||||
return nil
|
||||
}
|
||||
|
||||
func handshakeS2C(t *P2PTunnel) error {
|
||||
gLog.Printf(LvDEBUG, "handshakeS2C start")
|
||||
defer gLog.Printf(LvDEBUG, "handshakeS2C end")
|
||||
startTime := time.Now()
|
||||
gotCh := make(chan *net.UDPAddr, 5)
|
||||
// sequencely udp send handshake, do not parallel send
|
||||
gLog.Printf(LvDEBUG, "send symmetric handshake to %s:%d start", t.config.peerIP, t.config.peerConeNatPort)
|
||||
gotIt := false
|
||||
gotMtx := sync.Mutex{}
|
||||
for i := 0; i < SymmetricHandshakeNum; i++ {
|
||||
// TODO: auto calc cost time
|
||||
time.Sleep(SymmetricHandshakeInterval)
|
||||
// time.Sleep(SymmetricHandshakeInterval)
|
||||
go func(t *P2PTunnel) error {
|
||||
conn, err := net.ListenUDP("udp", nil)
|
||||
conn, err := net.ListenUDP("udp", nil) // TODO: system allocated port really random?
|
||||
if err != nil {
|
||||
gLog.Printf(LvDEBUG, "listen error")
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
UDPWrite(conn, t.ra, MsgP2P, MsgPunchHandshake, P2PHandshakeReq{ID: t.id})
|
||||
_, head, _, _, err := UDPRead(conn, 10000)
|
||||
_, head, _, _, err := UDPRead(conn, HandshakeTimeout)
|
||||
if err != nil {
|
||||
// gLog.Println(LevelDEBUG, "one of the handshake error:", err)
|
||||
return err
|
||||
}
|
||||
gotMtx.Lock()
|
||||
defer gotMtx.Unlock()
|
||||
if gotIt {
|
||||
return nil
|
||||
}
|
||||
gotIt = true
|
||||
t.la, _ = net.ResolveUDPAddr("udp", conn.LocalAddr().String())
|
||||
|
||||
if head.MainType == MsgP2P && head.SubType == MsgPunchHandshake {
|
||||
gLog.Printf(LvDEBUG, "handshakeS2C read %d handshake ", t.id)
|
||||
UDPWrite(conn, t.ra, MsgP2P, MsgPunchHandshakeAck, P2PHandshakeReq{ID: t.id})
|
||||
_, head, _, _, err = UDPRead(conn, 5000)
|
||||
if err != nil {
|
||||
gLog.Println(LvDEBUG, "handshakeS2C handshake error")
|
||||
return err
|
||||
}
|
||||
if head.MainType == MsgP2P && head.SubType == MsgPunchHandshakeAck {
|
||||
gLog.Printf(LvDEBUG, "handshakeS2C read %d handshake ack %s", t.id, conn.LocalAddr().String())
|
||||
gotCh <- t.la
|
||||
return nil
|
||||
// may read sereral MsgPunchHandshake
|
||||
for {
|
||||
_, head, _, _, err = UDPRead(conn, HandshakeTimeout)
|
||||
if err != nil {
|
||||
gLog.Println(LvDEBUG, "handshakeS2C handshake error")
|
||||
return err
|
||||
}
|
||||
if head.MainType == MsgP2P && head.SubType == MsgPunchHandshakeAck {
|
||||
break
|
||||
} else {
|
||||
gLog.Println(LvDEBUG, "handshakeS2C read msg but not MsgPunchHandshakeAck")
|
||||
}
|
||||
}
|
||||
}
|
||||
if head.MainType == MsgP2P && head.SubType == MsgPunchHandshakeAck {
|
||||
gLog.Printf(LvDEBUG, "handshakeS2C read %d handshake ack %s", t.id, conn.LocalAddr().String())
|
||||
UDPWrite(conn, t.ra, MsgP2P, MsgPunchHandshakeAck, P2PHandshakeReq{ID: t.id})
|
||||
gotIt = true
|
||||
la, _ := net.ResolveUDPAddr("udp", conn.LocalAddr().String())
|
||||
gotCh <- la
|
||||
return nil
|
||||
} else {
|
||||
gLog.Println(LvDEBUG, "handshakeS2C read msg but not MsgPunchHandshakeAck")
|
||||
}
|
||||
return nil
|
||||
}(t)
|
||||
}
|
||||
gLog.Printf(LvDEBUG, "send symmetric handshake end")
|
||||
gLog.Println(LvDEBUG, "handshakeS2C ready, notify peer connect")
|
||||
t.pn.push(t.config.PeerNode, MsgPushHandshakeStart, TunnelMsg{ID: t.id})
|
||||
if compareVersion(t.config.peerVersion, SymmetricSimultaneouslySendVersion) == LESS { // compatible with old client
|
||||
gLog.Println(LvDEBUG, "handshakeS2C ready, notify peer connect")
|
||||
t.pn.push(t.config.PeerNode, MsgPushHandshakeStart, TunnelMsg{ID: t.id})
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(SymmetricHandshakeAckTimeout):
|
||||
case <-time.After(HandshakeTimeout):
|
||||
return fmt.Errorf("wait handshake failed")
|
||||
case la := <-gotCh:
|
||||
t.la = la
|
||||
gLog.Println(LvDEBUG, "symmetric handshake ok", la)
|
||||
gLog.Printf(LvINFO, "handshakeS2C ok")
|
||||
gLog.Printf(LvINFO, "handshakeS2C ok. cost %dms", time.Since(startTime)/time.Millisecond)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
// ./openp2p install -node hhd1207-222 -token YOUR-TOKEN -sharebandwidth 0 -peernode hhdhome-n1 -dstip 127.0.0.1 -dstport 50022 -protocol tcp -srcport 22
|
||||
func install() {
|
||||
gLog.Println(LvINFO, "openp2p start. version: ", OpenP2PVersion)
|
||||
gLog.Println(LvINFO, "Contact: QQ: 16947733, Email: openp2p.cn@gmail.com")
|
||||
gLog.Println(LvINFO, "Contact: QQ group 16947733, Email openp2p.cn@gmail.com")
|
||||
gLog.Println(LvINFO, "install start")
|
||||
defer gLog.Println(LvINFO, "install end")
|
||||
// auto uninstall
|
||||
@@ -74,6 +74,7 @@ func install() {
|
||||
} else {
|
||||
gLog.Println(LvINFO, "start openp2p service ok.")
|
||||
}
|
||||
gLog.Println(LvINFO, "Visit WebUI on https://console.openp2p.cn")
|
||||
}
|
||||
|
||||
func installByFilename() {
|
||||
|
||||
162
core/iptree.go
Normal file
162
core/iptree.go
Normal file
@@ -0,0 +1,162 @@
|
||||
package openp2p
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/emirpasic/gods/trees/avltree"
|
||||
"github.com/emirpasic/gods/utils"
|
||||
)
|
||||
|
||||
type IPTree struct {
|
||||
tree *avltree.Tree
|
||||
treeMtx sync.RWMutex
|
||||
}
|
||||
|
||||
// add 120k cost 0.5s
|
||||
func (iptree *IPTree) AddIntIP(minIP uint32, maxIP uint32) bool {
|
||||
if minIP > maxIP {
|
||||
return false
|
||||
}
|
||||
iptree.treeMtx.Lock()
|
||||
defer iptree.treeMtx.Unlock()
|
||||
newMinIP := minIP
|
||||
newMaxIP := maxIP
|
||||
cur := iptree.tree.Root
|
||||
for {
|
||||
if cur == nil {
|
||||
break
|
||||
}
|
||||
curMaxIP := cur.Value.(uint32)
|
||||
curMinIP := cur.Key.(uint32)
|
||||
|
||||
// newNode all in existNode, treat as inserted.
|
||||
if newMinIP >= curMinIP && newMaxIP <= curMaxIP {
|
||||
return true
|
||||
}
|
||||
// has no interset
|
||||
if newMinIP > curMaxIP {
|
||||
cur = cur.Children[1]
|
||||
continue
|
||||
}
|
||||
if newMaxIP < curMinIP {
|
||||
cur = cur.Children[0]
|
||||
continue
|
||||
}
|
||||
// has interset, rm it and Add the new merged ip segment
|
||||
iptree.tree.Remove(curMinIP)
|
||||
if curMinIP < newMinIP {
|
||||
newMinIP = curMinIP
|
||||
}
|
||||
if curMaxIP > newMaxIP {
|
||||
newMaxIP = curMaxIP
|
||||
}
|
||||
cur = iptree.tree.Root
|
||||
}
|
||||
// put in the tree
|
||||
iptree.tree.Put(newMinIP, newMaxIP)
|
||||
return true
|
||||
}
|
||||
|
||||
func (iptree *IPTree) Add(minIPStr string, maxIPStr string) bool {
|
||||
var minIP, maxIP uint32
|
||||
binary.Read(bytes.NewBuffer(net.ParseIP(minIPStr).To4()), binary.BigEndian, &minIP)
|
||||
binary.Read(bytes.NewBuffer(net.ParseIP(maxIPStr).To4()), binary.BigEndian, &maxIP)
|
||||
return iptree.AddIntIP(minIP, maxIP)
|
||||
}
|
||||
|
||||
func (iptree *IPTree) Contains(ipStr string) bool {
|
||||
var ip uint32
|
||||
binary.Read(bytes.NewBuffer(net.ParseIP(ipStr).To4()), binary.BigEndian, &ip)
|
||||
return iptree.ContainsInt(ip)
|
||||
}
|
||||
|
||||
func IsLocalhost(ipStr string) bool {
|
||||
if ipStr == "localhost" || ipStr == "127.0.0.1" || ipStr == "::1" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (iptree *IPTree) ContainsInt(ip uint32) bool {
|
||||
iptree.treeMtx.RLock()
|
||||
defer iptree.treeMtx.RUnlock()
|
||||
if iptree.tree == nil {
|
||||
return false
|
||||
}
|
||||
n := iptree.tree.Root
|
||||
for n != nil {
|
||||
curMaxIP := n.Value.(uint32)
|
||||
curMinIP := n.Key.(uint32)
|
||||
switch {
|
||||
case ip >= curMinIP && ip <= curMaxIP: // hit
|
||||
return true
|
||||
case ip < curMinIP:
|
||||
n = n.Children[0]
|
||||
default:
|
||||
n = n.Children[1]
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (iptree *IPTree) Size() int {
|
||||
iptree.treeMtx.RLock()
|
||||
defer iptree.treeMtx.RUnlock()
|
||||
return iptree.tree.Size()
|
||||
}
|
||||
|
||||
func (iptree *IPTree) Print() {
|
||||
iptree.treeMtx.RLock()
|
||||
defer iptree.treeMtx.RUnlock()
|
||||
log.Println("size:", iptree.Size())
|
||||
log.Println(iptree.tree.String())
|
||||
}
|
||||
|
||||
func (iptree *IPTree) Clear() {
|
||||
iptree.treeMtx.Lock()
|
||||
defer iptree.treeMtx.Unlock()
|
||||
iptree.tree.Clear()
|
||||
}
|
||||
|
||||
// input format 127.0.0.1,192.168.1.0/24,10.1.1.30-10.1.1.50
|
||||
// 127.0.0.1
|
||||
// 192.168.1.0/24
|
||||
// 192.168.1.1-192.168.1.10
|
||||
func NewIPTree(ips string) *IPTree {
|
||||
iptree := &IPTree{
|
||||
tree: avltree.NewWith(utils.UInt32Comparator),
|
||||
}
|
||||
ipArr := strings.Split(ips, ",")
|
||||
for _, ip := range ipArr {
|
||||
if strings.Contains(ip, "/") { // x.x.x.x/24
|
||||
_, ipNet, err := net.ParseCIDR(ip)
|
||||
if err != nil {
|
||||
fmt.Println("Error parsing CIDR:", err)
|
||||
continue
|
||||
}
|
||||
minIP := ipNet.IP.Mask(ipNet.Mask).String()
|
||||
maxIP := calculateMaxIP(ipNet).String()
|
||||
iptree.Add(minIP, maxIP)
|
||||
} else if strings.Contains(ip, "-") { // x.x.x.x-y.y.y.y
|
||||
minAndMax := strings.Split(ip, "-")
|
||||
iptree.Add(minAndMax[0], minAndMax[1])
|
||||
} else { // single ip
|
||||
iptree.Add(ip, ip)
|
||||
}
|
||||
}
|
||||
return iptree
|
||||
}
|
||||
func calculateMaxIP(ipNet *net.IPNet) net.IP {
|
||||
maxIP := make(net.IP, len(ipNet.IP))
|
||||
copy(maxIP, ipNet.IP)
|
||||
for i := range maxIP {
|
||||
maxIP[i] |= ^ipNet.Mask[i]
|
||||
}
|
||||
return maxIP
|
||||
}
|
||||
174
core/iptree_test.go
Normal file
174
core/iptree_test.go
Normal file
@@ -0,0 +1,174 @@
|
||||
package openp2p
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func wrapTestContains(t *testing.T, iptree *IPTree, ip string, result bool) {
|
||||
if iptree.Contains(ip) == result {
|
||||
// t.Logf("compare version %s %s ok\n", v1, v2)
|
||||
} else {
|
||||
t.Errorf("test %s fail\n", ip)
|
||||
}
|
||||
}
|
||||
func wrapBenchmarkContains(t *testing.B, iptree *IPTree, ip string, result bool) {
|
||||
if iptree.Contains(ip) == result {
|
||||
// t.Logf("compare version %s %s ok\n", v1, v2)
|
||||
} else {
|
||||
t.Errorf("test %s fail\n", ip)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllInputFormat(t *testing.T) {
|
||||
iptree := NewIPTree("219.137.185.70,127.0.0.1,127.0.0.0/8,192.168.1.0/24,192.168.3.100-192.168.3.255,192.168.100.0-192.168.200.255")
|
||||
wrapTestContains(t, iptree, "127.0.0.1", true)
|
||||
wrapTestContains(t, iptree, "127.0.0.2", true)
|
||||
wrapTestContains(t, iptree, "127.1.1.1", true)
|
||||
wrapTestContains(t, iptree, "219.137.185.70", true)
|
||||
wrapTestContains(t, iptree, "219.137.185.71", false)
|
||||
wrapTestContains(t, iptree, "192.168.1.2", true)
|
||||
wrapTestContains(t, iptree, "192.168.2.2", false)
|
||||
wrapTestContains(t, iptree, "192.168.3.1", false)
|
||||
wrapTestContains(t, iptree, "192.168.3.100", true)
|
||||
wrapTestContains(t, iptree, "192.168.3.255", true)
|
||||
wrapTestContains(t, iptree, "192.168.150.1", true)
|
||||
wrapTestContains(t, iptree, "192.168.250.1", false)
|
||||
}
|
||||
|
||||
func TestSingleIP(t *testing.T) {
|
||||
iptree := NewIPTree("")
|
||||
iptree.Add("219.137.185.70", "219.137.185.70")
|
||||
wrapTestContains(t, iptree, "219.137.185.70", true)
|
||||
wrapTestContains(t, iptree, "219.137.185.71", false)
|
||||
}
|
||||
|
||||
func TestWrongSegment(t *testing.T) {
|
||||
iptree := NewIPTree("")
|
||||
inserted := iptree.Add("87.251.75.0", "82.251.75.255")
|
||||
if inserted {
|
||||
t.Errorf("TestWrongSegment failed\n")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSegment2(t *testing.T) {
|
||||
iptree := NewIPTree("")
|
||||
iptree.Clear()
|
||||
iptree.Add("10.1.5.50", "10.1.5.100")
|
||||
iptree.Add("10.1.1.50", "10.1.1.100")
|
||||
iptree.Add("10.1.2.50", "10.1.2.100")
|
||||
iptree.Add("10.1.6.50", "10.1.6.100")
|
||||
iptree.Add("10.1.7.50", "10.1.7.100")
|
||||
iptree.Add("10.1.3.50", "10.1.3.100")
|
||||
iptree.Add("10.1.1.1", "10.1.1.10") // no interset
|
||||
iptree.Add("10.1.1.200", "10.1.1.250") // no interset
|
||||
iptree.Print()
|
||||
|
||||
iptree.Add("10.1.1.80", "10.1.1.90") // all in
|
||||
iptree.Add("10.1.1.40", "10.1.1.60") // interset
|
||||
iptree.Print()
|
||||
iptree.Add("10.1.1.90", "10.1.1.110") // interset
|
||||
iptree.Print()
|
||||
t.Logf("ipTree size:%d\n", iptree.Size())
|
||||
wrapTestContains(t, iptree, "10.1.1.40", true)
|
||||
wrapTestContains(t, iptree, "10.1.5.50", true)
|
||||
wrapTestContains(t, iptree, "10.1.6.50", true)
|
||||
wrapTestContains(t, iptree, "10.1.7.50", true)
|
||||
wrapTestContains(t, iptree, "10.1.2.50", true)
|
||||
wrapTestContains(t, iptree, "10.1.3.50", true)
|
||||
wrapTestContains(t, iptree, "10.1.1.60", true)
|
||||
wrapTestContains(t, iptree, "10.1.1.90", true)
|
||||
wrapTestContains(t, iptree, "10.1.1.110", true)
|
||||
wrapTestContains(t, iptree, "10.1.1.250", true)
|
||||
wrapTestContains(t, iptree, "10.1.2.60", true)
|
||||
wrapTestContains(t, iptree, "10.1.100.30", false)
|
||||
wrapTestContains(t, iptree, "10.1.200.30", false)
|
||||
|
||||
iptree.Add("10.0.0.0", "10.255.255.255") // will merge all segment
|
||||
iptree.Print()
|
||||
if iptree.Size() != 1 {
|
||||
t.Errorf("merge ip segment error\n")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func BenchmarkBuildipTree20k(t *testing.B) {
|
||||
iptree := NewIPTree("")
|
||||
iptree.Clear()
|
||||
iptree.Add("10.1.5.50", "10.1.5.100")
|
||||
iptree.Add("10.1.1.50", "10.1.1.100")
|
||||
iptree.Add("10.1.2.50", "10.1.2.100")
|
||||
iptree.Add("10.1.6.50", "10.1.6.100")
|
||||
iptree.Add("10.1.7.50", "10.1.7.100")
|
||||
iptree.Add("10.1.3.50", "10.1.3.100")
|
||||
iptree.Add("10.1.1.1", "10.1.1.10") // no interset
|
||||
iptree.Add("10.1.1.200", "10.1.1.250") // no interset
|
||||
iptree.Add("10.1.1.80", "10.1.1.90") // all in
|
||||
iptree.Add("10.1.1.40", "10.1.1.60") // interset
|
||||
iptree.Add("10.1.1.90", "10.1.1.110") // interset
|
||||
var minIP uint32
|
||||
binary.Read(bytes.NewBuffer(net.ParseIP("10.1.1.1").To4()), binary.BigEndian, &minIP)
|
||||
|
||||
// insert 10k block ip single
|
||||
nodeNum := uint32(10000 * 1)
|
||||
gap := uint32(10)
|
||||
for i := minIP; i < minIP+nodeNum*gap; i += gap {
|
||||
iptree.AddIntIP(i, i)
|
||||
// t.Logf("ipTree size:%d\n", iptree.Size())
|
||||
}
|
||||
binary.Read(bytes.NewBuffer(net.ParseIP("100.1.1.1").To4()), binary.BigEndian, &minIP)
|
||||
// insert 100k block ip segment
|
||||
for i := minIP; i < minIP+nodeNum*gap; i += gap {
|
||||
iptree.AddIntIP(i, i+5)
|
||||
}
|
||||
t.Logf("ipTree size:%d\n", iptree.Size())
|
||||
iptree.Clear()
|
||||
t.Logf("clear. ipTree size:%d\n", iptree.Size())
|
||||
}
|
||||
func BenchmarkQuery(t *testing.B) {
|
||||
ts := time.Now()
|
||||
iptree := NewIPTree("")
|
||||
iptree.Clear()
|
||||
iptree.Add("10.1.5.50", "10.1.5.100")
|
||||
iptree.Add("10.1.1.50", "10.1.1.100")
|
||||
iptree.Add("10.1.2.50", "10.1.2.100")
|
||||
iptree.Add("10.1.6.50", "10.1.6.100")
|
||||
iptree.Add("10.1.7.50", "10.1.7.100")
|
||||
iptree.Add("10.1.3.50", "10.1.3.100")
|
||||
iptree.Add("10.1.1.1", "10.1.1.10") // no interset
|
||||
iptree.Add("10.1.1.200", "10.1.1.250") // no interset
|
||||
iptree.Add("10.1.1.80", "10.1.1.90") // all in
|
||||
iptree.Add("10.1.1.40", "10.1.1.60") // interset
|
||||
iptree.Add("10.1.1.90", "10.1.1.110") // interset
|
||||
var minIP uint32
|
||||
binary.Read(bytes.NewBuffer(net.ParseIP("10.1.1.1").To4()), binary.BigEndian, &minIP)
|
||||
|
||||
// insert 10k block ip single
|
||||
nodeNum := uint32(10000 * 1000)
|
||||
gap := uint32(10)
|
||||
for i := minIP; i < minIP+nodeNum*gap; i += gap {
|
||||
iptree.AddIntIP(i, i)
|
||||
// t.Logf("ipTree size:%d\n", iptree.Size())
|
||||
}
|
||||
binary.Read(bytes.NewBuffer(net.ParseIP("100.1.1.1").To4()), binary.BigEndian, &minIP)
|
||||
// insert 100k block ip segment
|
||||
for i := minIP; i < minIP+nodeNum*gap; i += gap {
|
||||
iptree.AddIntIP(i, i+5)
|
||||
}
|
||||
t.Logf("ipTree size:%d cost:%dms\n", iptree.Size(), time.Since(ts)/time.Millisecond)
|
||||
ts = time.Now()
|
||||
// t.ResetTimer()
|
||||
queryNum := 100 * 10000
|
||||
for i := 0; i < queryNum; i++ {
|
||||
iptree.ContainsInt(minIP + uint32(i))
|
||||
wrapBenchmarkContains(t, iptree, "10.1.5.55", true)
|
||||
wrapBenchmarkContains(t, iptree, "10.1.1.1", true)
|
||||
wrapBenchmarkContains(t, iptree, "10.1.5.200", false)
|
||||
wrapBenchmarkContains(t, iptree, "200.1.1.1", false)
|
||||
}
|
||||
t.Logf("query num:%d cost:%dms\n", queryNum*4, time.Since(ts)/time.Millisecond)
|
||||
|
||||
}
|
||||
27
core/log.go
27
core/log.go
@@ -36,9 +36,8 @@ func init() {
|
||||
}
|
||||
|
||||
const (
|
||||
LogFile = iota
|
||||
LogFile = 1 << iota
|
||||
LogConsole
|
||||
LogFileAndConsole
|
||||
)
|
||||
|
||||
type logger struct {
|
||||
@@ -51,6 +50,7 @@ type logger struct {
|
||||
pid int
|
||||
maxLogSize int64
|
||||
mode int
|
||||
stdLogger *log.Logger
|
||||
}
|
||||
|
||||
func NewLogger(path string, filePrefix string, level LogLevel, maxLogSize int64, mode int) *logger {
|
||||
@@ -67,13 +67,13 @@ func NewLogger(path string, filePrefix string, level LogLevel, maxLogSize int64,
|
||||
os.MkdirAll(logdir, 0777)
|
||||
for lv := range logFileNames {
|
||||
logFilePath := logdir + filePrefix + logFileNames[lv]
|
||||
f, err := os.OpenFile(logFilePath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
|
||||
f, err := os.OpenFile(logFilePath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
os.Chmod(logFilePath, 0666)
|
||||
os.Chmod(logFilePath, 0644)
|
||||
logfiles[lv] = f
|
||||
loggers[lv] = log.New(f, "", log.LstdFlags)
|
||||
loggers[lv] = log.New(f, "", log.LstdFlags|log.Lmicroseconds)
|
||||
}
|
||||
var le string
|
||||
if runtime.GOOS == "windows" {
|
||||
@@ -81,7 +81,8 @@ func NewLogger(path string, filePrefix string, level LogLevel, maxLogSize int64,
|
||||
} else {
|
||||
le = "\n"
|
||||
}
|
||||
pLog := &logger{loggers, logfiles, level, logdir, &sync.Mutex{}, le, os.Getpid(), maxLogSize, mode}
|
||||
pLog := &logger{loggers, logfiles, level, logdir, &sync.Mutex{}, le, os.Getpid(), maxLogSize, mode, log.New(os.Stdout, "", 0)}
|
||||
pLog.stdLogger.SetFlags(log.LstdFlags | log.Lmicroseconds)
|
||||
go pLog.checkFile()
|
||||
return pLog
|
||||
}
|
||||
@@ -119,7 +120,7 @@ func (l *logger) checkFile() {
|
||||
backupPath := l.logDir + fname + ".0"
|
||||
os.Remove(backupPath)
|
||||
os.Rename(l.logDir+fname, backupPath)
|
||||
newFile, e := os.OpenFile(l.logDir+fname, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
|
||||
newFile, e := os.OpenFile(l.logDir+fname, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
|
||||
if e == nil {
|
||||
l.loggers[lv].SetOutput(newFile)
|
||||
l.files[lv] = newFile
|
||||
@@ -138,11 +139,11 @@ func (l *logger) Printf(level LogLevel, format string, params ...interface{}) {
|
||||
}
|
||||
pidAndLevel := []interface{}{l.pid, loglevel[level]}
|
||||
params = append(pidAndLevel, params...)
|
||||
if l.mode == LogFile || l.mode == LogFileAndConsole {
|
||||
if l.mode & LogFile != 0 {
|
||||
l.loggers[0].Printf("%d %s "+format+l.lineEnding, params...)
|
||||
}
|
||||
if l.mode == LogConsole || l.mode == LogFileAndConsole {
|
||||
log.Printf("%d %s "+format+l.lineEnding, params...)
|
||||
if l.mode & LogConsole != 0 {
|
||||
l.stdLogger.Printf("%d %s "+format+l.lineEnding, params...)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -155,10 +156,10 @@ func (l *logger) Println(level LogLevel, params ...interface{}) {
|
||||
pidAndLevel := []interface{}{l.pid, " ", loglevel[level], " "}
|
||||
params = append(pidAndLevel, params...)
|
||||
params = append(params, l.lineEnding)
|
||||
if l.mode == LogFile || l.mode == LogFileAndConsole {
|
||||
if l.mode & LogFile != 0 {
|
||||
l.loggers[0].Print(params...)
|
||||
}
|
||||
if l.mode == LogConsole || l.mode == LogFileAndConsole {
|
||||
log.Print(params...)
|
||||
if l.mode & LogConsole != 0 {
|
||||
l.stdLogger.Print(params...)
|
||||
}
|
||||
}
|
||||
|
||||
45
core/nat.go
45
core/nat.go
@@ -3,38 +3,38 @@ package openp2p
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
reuse "github.com/openp2p-cn/go-reuseport"
|
||||
)
|
||||
|
||||
func natTCP(serverHost string, serverPort int, localPort int) (publicIP string, publicPort int) {
|
||||
func natTCP(serverHost string, serverPort int) (publicIP string, publicPort int, localPort int) {
|
||||
// dialer := &net.Dialer{
|
||||
// LocalAddr: &net.TCPAddr{
|
||||
// IP: net.ParseIP("0.0.0.0"),
|
||||
// Port: localPort,
|
||||
// },
|
||||
// }
|
||||
conn, err := reuse.DialTimeout("tcp4", fmt.Sprintf("%s:%d", "0.0.0.0", localPort), fmt.Sprintf("%s:%d", serverHost, serverPort), time.Second*5)
|
||||
conn, err := reuse.DialTimeout("tcp4", fmt.Sprintf("%s:%d", "0.0.0.0", 0), fmt.Sprintf("%s:%d", serverHost, serverPort), NatTestTimeout)
|
||||
// conn, err := net.Dial("tcp4", fmt.Sprintf("%s:%d", serverHost, serverPort))
|
||||
// log.Println(LvINFO, conn.LocalAddr())
|
||||
if err != nil {
|
||||
fmt.Printf("Dial tcp4 %s:%d error:%s", serverHost, serverPort, err)
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
localPort, _ = strconv.Atoi(strings.Split(conn.LocalAddr().String(), ":")[1])
|
||||
_, wrerr := conn.Write([]byte("1"))
|
||||
if wrerr != nil {
|
||||
fmt.Printf("Write error: %s\n", wrerr)
|
||||
return
|
||||
}
|
||||
b := make([]byte, 1000)
|
||||
conn.SetReadDeadline(time.Now().Add(time.Second * 5))
|
||||
conn.SetReadDeadline(time.Now().Add(NatTestTimeout))
|
||||
n, rderr := conn.Read(b)
|
||||
if rderr != nil {
|
||||
fmt.Printf("Read error: %s\n", rderr)
|
||||
@@ -83,7 +83,7 @@ func natTest(serverHost string, serverPort int, localPort int) (publicIP string,
|
||||
return "", 0, err
|
||||
}
|
||||
natRsp := NatDetectRsp{}
|
||||
err = json.Unmarshal(buffer[openP2PHeaderSize:nRead], &natRsp)
|
||||
json.Unmarshal(buffer[openP2PHeaderSize:nRead], &natRsp)
|
||||
|
||||
return natRsp.IP, natRsp.Port, nil
|
||||
}
|
||||
@@ -91,13 +91,12 @@ func natTest(serverHost string, serverPort int, localPort int) (publicIP string,
|
||||
func getNATType(host string, udp1 int, udp2 int) (publicIP string, NATType int, hasIPvr int, hasUPNPorNATPMP int, err error) {
|
||||
// the random local port may be used by other.
|
||||
localPort := int(rand.Uint32()%15000 + 50000)
|
||||
echoPort := P2PNetworkInstance(nil).config.TCPPort
|
||||
echoPort := gConf.Network.TCPPort
|
||||
ip1, port1, err := natTest(host, udp1, localPort)
|
||||
if err != nil {
|
||||
return "", 0, 0, 0, err
|
||||
}
|
||||
hasIPv4, hasUPNPorNATPMP := publicIPTest(ip1, echoPort)
|
||||
gLog.Printf(LvINFO, "local port:%d, nat port:%d, hasIPv4:%d, UPNP:%d", localPort, port1, hasIPv4, hasUPNPorNATPMP)
|
||||
_, port2, err := natTest(host, udp2, localPort) // 2rd nat test not need testing publicip
|
||||
gLog.Printf(LvDEBUG, "local port:%d nat port:%d", localPort, port2)
|
||||
if err != nil {
|
||||
@@ -112,20 +111,18 @@ func getNATType(host string, udp1 int, udp2 int) (publicIP string, NATType int,
|
||||
|
||||
func publicIPTest(publicIP string, echoPort int) (hasPublicIP int, hasUPNPorNATPMP int) {
|
||||
var echoConn *net.UDPConn
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
gLog.Println(LvDEBUG, "echo server start")
|
||||
var err error
|
||||
echoConn, err = net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: echoPort})
|
||||
if err != nil { // listen error
|
||||
gLog.Println(LvERROR, "echo server listen error:", err)
|
||||
return
|
||||
}
|
||||
defer echoConn.Close()
|
||||
go func() {
|
||||
gLog.Println(LvDEBUG, "echo server start")
|
||||
var err error
|
||||
echoConn, err = net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: echoPort})
|
||||
if err != nil {
|
||||
gLog.Println(LvERROR, "echo server listen error:", err)
|
||||
return
|
||||
}
|
||||
buf := make([]byte, 1600)
|
||||
// close outside for breaking the ReadFromUDP
|
||||
// wait 5s for echo testing
|
||||
wg.Done()
|
||||
// wait 30s for echo testing
|
||||
buf := make([]byte, 1600)
|
||||
echoConn.SetReadDeadline(time.Now().Add(time.Second * 30))
|
||||
n, addr, err := echoConn.ReadFromUDP(buf)
|
||||
if err != nil {
|
||||
@@ -134,8 +131,6 @@ func publicIPTest(publicIP string, echoPort int) (hasPublicIP int, hasUPNPorNATP
|
||||
echoConn.WriteToUDP(buf[0:n], addr)
|
||||
gLog.Println(LvDEBUG, "echo server end")
|
||||
}()
|
||||
wg.Wait() // wait echo udp
|
||||
defer echoConn.Close()
|
||||
// testing for public ip
|
||||
for i := 0; i < 2; i++ {
|
||||
if i == 1 {
|
||||
@@ -151,14 +146,14 @@ func publicIPTest(publicIP string, echoPort int) (hasPublicIP int, hasUPNPorNATP
|
||||
gLog.Println(LvDEBUG, "could not perform UPNP external address:", err)
|
||||
break
|
||||
}
|
||||
log.Println("PublicIP:", ext)
|
||||
gLog.Println(LvINFO, "PublicIP:", ext)
|
||||
|
||||
externalPort, err := nat.AddPortMapping("udp", echoPort, echoPort, "openp2p", 30)
|
||||
externalPort, err := nat.AddPortMapping("udp", echoPort, echoPort, "openp2p", 30) // 30 seconds fot upnp testing
|
||||
if err != nil {
|
||||
gLog.Println(LvDEBUG, "could not add udp UPNP port mapping", externalPort)
|
||||
break
|
||||
} else {
|
||||
nat.AddPortMapping("tcp", echoPort, echoPort, "openp2p", 604800)
|
||||
nat.AddPortMapping("tcp", echoPort, echoPort, "openp2p", 604800) // 7 days for tcp connection
|
||||
}
|
||||
}
|
||||
gLog.Printf(LvDEBUG, "public ip test start %s:%d", publicIP, echoPort)
|
||||
|
||||
@@ -13,24 +13,12 @@ func Run() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
baseDir := filepath.Dir(os.Args[0])
|
||||
os.Chdir(baseDir) // for system service
|
||||
gLog = NewLogger(baseDir, ProducnName, LvDEBUG, 1024*1024, LogFileAndConsole)
|
||||
// TODO: install sub command, deamon process
|
||||
gLog = NewLogger(baseDir, ProductName, LvDEBUG, 1024*1024, LogFile|LogConsole)
|
||||
if len(os.Args) > 1 {
|
||||
switch os.Args[1] {
|
||||
case "version", "-v", "--version":
|
||||
fmt.Println(OpenP2PVersion)
|
||||
return
|
||||
case "update":
|
||||
gLog = NewLogger(baseDir, ProducnName, LvDEBUG, 1024*1024, LogFileAndConsole)
|
||||
targetPath := filepath.Join(defaultInstallPath, defaultBinName)
|
||||
d := daemon{}
|
||||
err := d.Control("restart", targetPath, nil)
|
||||
if err != nil {
|
||||
gLog.Println(LvERROR, "restart service error:", err)
|
||||
} else {
|
||||
gLog.Println(LvINFO, "restart service ok.")
|
||||
}
|
||||
return
|
||||
case "install":
|
||||
install()
|
||||
return
|
||||
@@ -53,12 +41,16 @@ func Run() {
|
||||
|
||||
gLog.Println(LvINFO, &gConf)
|
||||
setFirewall()
|
||||
err := setRLimit()
|
||||
if err != nil {
|
||||
gLog.Println(LvINFO, "setRLimit error:", err)
|
||||
}
|
||||
network := P2PNetworkInstance(&gConf.Network)
|
||||
if ok := network.Connect(30000); !ok {
|
||||
gLog.Println(LvERROR, "P2PNetwork login error")
|
||||
return
|
||||
}
|
||||
gLog.Println(LvINFO, "waiting for connection...")
|
||||
// gLog.Println(LvINFO, "waiting for connection...")
|
||||
forever := make(chan bool)
|
||||
<-forever
|
||||
}
|
||||
@@ -70,7 +62,7 @@ var network *P2PNetwork
|
||||
func RunAsModule(baseDir string, token string, bw int, logLevel int) *P2PNetwork {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
os.Chdir(baseDir) // for system service
|
||||
gLog = NewLogger(baseDir, ProducnName, LvDEBUG, 1024*1024, LogFileAndConsole)
|
||||
gLog = NewLogger(baseDir, ProductName, LvDEBUG, 1024*1024, LogFile|LogConsole)
|
||||
|
||||
parseParams("")
|
||||
|
||||
@@ -89,7 +81,7 @@ func RunAsModule(baseDir string, token string, bw int, logLevel int) *P2PNetwork
|
||||
gLog.Println(LvERROR, "P2PNetwork login error")
|
||||
return nil
|
||||
}
|
||||
gLog.Println(LvINFO, "waiting for connection...")
|
||||
// gLog.Println(LvINFO, "waiting for connection...")
|
||||
return network
|
||||
}
|
||||
|
||||
|
||||
@@ -35,24 +35,23 @@ type overlayConn struct {
|
||||
// for udp
|
||||
connUDP *net.UDPConn
|
||||
remoteAddr net.Addr
|
||||
udpRelayData chan []byte
|
||||
udpData chan []byte
|
||||
lastReadUDPTs time.Time
|
||||
}
|
||||
|
||||
func (oConn *overlayConn) run() {
|
||||
gLog.Printf(LvDEBUG, "%d overlayConn run start", oConn.id)
|
||||
defer gLog.Printf(LvDEBUG, "%d overlayConn run end", oConn.id)
|
||||
oConn.running = true
|
||||
oConn.lastReadUDPTs = time.Now()
|
||||
buffer := make([]byte, ReadBuffLen+PaddingSize)
|
||||
readBuf := buffer[:ReadBuffLen]
|
||||
buffer := make([]byte, ReadBuffLen+PaddingSize) // 16 bytes for padding
|
||||
reuseBuff := buffer[:ReadBuffLen]
|
||||
encryptData := make([]byte, ReadBuffLen+PaddingSize) // 16 bytes for padding
|
||||
tunnelHead := new(bytes.Buffer)
|
||||
relayHead := new(bytes.Buffer)
|
||||
binary.Write(relayHead, binary.LittleEndian, oConn.rtid)
|
||||
binary.Write(tunnelHead, binary.LittleEndian, oConn.id)
|
||||
for oConn.running && oConn.tunnel.isRuning() {
|
||||
buff, dataLen, err := oConn.Read(readBuf)
|
||||
readBuff, dataLen, err := oConn.Read(reuseBuff)
|
||||
if err != nil {
|
||||
if ne, ok := err.(net.Error); ok && ne.Timeout() {
|
||||
continue
|
||||
@@ -61,20 +60,20 @@ func (oConn *overlayConn) run() {
|
||||
gLog.Printf(LvDEBUG, "overlayConn %d read error:%s,close it", oConn.id, err)
|
||||
break
|
||||
}
|
||||
payload := buff[:dataLen]
|
||||
payload := readBuff[:dataLen]
|
||||
if oConn.appKey != 0 {
|
||||
payload, _ = encryptBytes(oConn.appKeyBytes, encryptData, buffer[:dataLen], dataLen)
|
||||
payload, _ = encryptBytes(oConn.appKeyBytes, encryptData, readBuff[:dataLen], dataLen)
|
||||
}
|
||||
writeBytes := append(tunnelHead.Bytes(), payload...)
|
||||
if oConn.rtid == 0 {
|
||||
oConn.tunnel.conn.WriteBytes(MsgP2P, MsgOverlayData, writeBytes)
|
||||
gLog.Printf(LvDEBUG, "write overlay data to %d:%d bodylen=%d", oConn.rtid, oConn.id, len(writeBytes))
|
||||
gLog.Printf(LvDEBUG, "write overlay data to tid:%d,oid:%d bodylen=%d", oConn.tunnel.id, oConn.id, len(writeBytes))
|
||||
} else {
|
||||
// write raley data
|
||||
all := append(relayHead.Bytes(), encodeHeader(MsgP2P, MsgOverlayData, uint32(len(writeBytes)))...)
|
||||
all = append(all, writeBytes...)
|
||||
oConn.tunnel.conn.WriteBytes(MsgP2P, MsgRelayData, all)
|
||||
gLog.Printf(LvDEBUG, "write relay data to %d:%d bodylen=%d", oConn.rtid, oConn.id, len(writeBytes))
|
||||
gLog.Printf(LvDEBUG, "write relay data to tid:%d,rtid:%d,oid:%d bodylen=%d", oConn.tunnel.id, oConn.rtid, oConn.id, len(writeBytes))
|
||||
}
|
||||
}
|
||||
if oConn.connTCP != nil {
|
||||
@@ -85,20 +84,22 @@ func (oConn *overlayConn) run() {
|
||||
}
|
||||
oConn.tunnel.overlayConns.Delete(oConn.id)
|
||||
// notify peer disconnect
|
||||
if oConn.isClient {
|
||||
req := OverlayDisconnectReq{ID: oConn.id}
|
||||
if oConn.rtid == 0 {
|
||||
oConn.tunnel.conn.WriteMessage(MsgP2P, MsgOverlayDisconnectReq, &req)
|
||||
} else {
|
||||
// write relay data
|
||||
msg, _ := newMessage(MsgP2P, MsgOverlayDisconnectReq, &req)
|
||||
msgWithHead := append(relayHead.Bytes(), msg...)
|
||||
oConn.tunnel.conn.WriteBytes(MsgP2P, MsgRelayData, msgWithHead)
|
||||
}
|
||||
req := OverlayDisconnectReq{ID: oConn.id}
|
||||
if oConn.rtid == 0 {
|
||||
oConn.tunnel.conn.WriteMessage(MsgP2P, MsgOverlayDisconnectReq, &req)
|
||||
} else {
|
||||
// write relay data
|
||||
msg, _ := newMessage(MsgP2P, MsgOverlayDisconnectReq, &req)
|
||||
msgWithHead := append(relayHead.Bytes(), msg...)
|
||||
oConn.tunnel.conn.WriteBytes(MsgP2P, MsgRelayData, msgWithHead)
|
||||
}
|
||||
}
|
||||
|
||||
func (oConn *overlayConn) Read(reuseBuff []byte) (buff []byte, n int, err error) {
|
||||
func (oConn *overlayConn) Read(reuseBuff []byte) (buff []byte, dataLen int, err error) {
|
||||
if !oConn.running {
|
||||
err = ErrOverlayConnDisconnect
|
||||
return
|
||||
}
|
||||
if oConn.connUDP != nil {
|
||||
if time.Now().After(oConn.lastReadUDPTs.Add(time.Minute * 5)) {
|
||||
err = errors.New("udp close")
|
||||
@@ -106,15 +107,15 @@ func (oConn *overlayConn) Read(reuseBuff []byte) (buff []byte, n int, err error)
|
||||
}
|
||||
if oConn.remoteAddr != nil { // as server
|
||||
select {
|
||||
case buff = <-oConn.udpRelayData:
|
||||
n = len(buff)
|
||||
case buff = <-oConn.udpData:
|
||||
dataLen = len(buff) - PaddingSize
|
||||
oConn.lastReadUDPTs = time.Now()
|
||||
case <-time.After(time.Second * 10):
|
||||
err = ErrDeadlineExceeded
|
||||
}
|
||||
} else { // as client
|
||||
oConn.connUDP.SetReadDeadline(time.Now().Add(5 * time.Second))
|
||||
n, _, err = oConn.connUDP.ReadFrom(reuseBuff)
|
||||
oConn.connUDP.SetReadDeadline(time.Now().Add(UDPReadTimeout))
|
||||
dataLen, _, err = oConn.connUDP.ReadFrom(reuseBuff)
|
||||
if err == nil {
|
||||
oConn.lastReadUDPTs = time.Now()
|
||||
}
|
||||
@@ -122,15 +123,21 @@ func (oConn *overlayConn) Read(reuseBuff []byte) (buff []byte, n int, err error)
|
||||
}
|
||||
return
|
||||
}
|
||||
oConn.connTCP.SetReadDeadline(time.Now().Add(time.Second * 5))
|
||||
n, err = oConn.connTCP.Read(reuseBuff)
|
||||
buff = reuseBuff
|
||||
if oConn.connTCP != nil {
|
||||
oConn.connTCP.SetReadDeadline(time.Now().Add(UDPReadTimeout))
|
||||
dataLen, err = oConn.connTCP.Read(reuseBuff)
|
||||
buff = reuseBuff
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// calling by p2pTunnel
|
||||
func (oConn *overlayConn) Write(buff []byte) (n int, err error) {
|
||||
// add mutex when multi-thread calling
|
||||
if !oConn.running {
|
||||
return 0, ErrOverlayConnDisconnect
|
||||
}
|
||||
if oConn.connUDP != nil {
|
||||
if oConn.remoteAddr == nil {
|
||||
n, err = oConn.connUDP.Write(buff)
|
||||
@@ -142,9 +149,25 @@ func (oConn *overlayConn) Write(buff []byte) (n int, err error) {
|
||||
}
|
||||
return
|
||||
}
|
||||
n, err = oConn.connTCP.Write(buff)
|
||||
if oConn.connTCP != nil {
|
||||
n, err = oConn.connTCP.Write(buff)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
oConn.running = false
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (oConn *overlayConn) Close() (err error) {
|
||||
oConn.running = false
|
||||
if oConn.connTCP != nil {
|
||||
oConn.connTCP.Close()
|
||||
oConn.connTCP = nil
|
||||
}
|
||||
if oConn.connUDP != nil {
|
||||
oConn.connUDP.Close()
|
||||
oConn.connUDP = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ type p2pApp struct {
|
||||
listener net.Listener
|
||||
listenerUDP *net.UDPConn
|
||||
tunnel *P2PTunnel
|
||||
iptree *IPTree
|
||||
rtid uint64 // relay tunnelID
|
||||
relayNode string
|
||||
relayMode string
|
||||
@@ -51,7 +52,11 @@ func (app *p2pApp) listenTCP() error {
|
||||
gLog.Printf(LvDEBUG, "tcp accept on port %d start", app.config.SrcPort)
|
||||
defer gLog.Printf(LvDEBUG, "tcp accept on port %d end", app.config.SrcPort)
|
||||
var err error
|
||||
app.listener, err = net.Listen("tcp4", fmt.Sprintf("0.0.0.0:%d", app.config.SrcPort))
|
||||
listenAddr := ""
|
||||
if IsLocalhost(app.config.Whitelist) { // not expose port
|
||||
listenAddr = "127.0.0.1"
|
||||
}
|
||||
app.listener, err = net.Listen("tcp", fmt.Sprintf("%s:%d", listenAddr, app.config.SrcPort))
|
||||
if err != nil {
|
||||
gLog.Printf(LvERROR, "listen error:%s", err)
|
||||
return err
|
||||
@@ -64,6 +69,15 @@ func (app *p2pApp) listenTCP() error {
|
||||
}
|
||||
break
|
||||
}
|
||||
// check white list
|
||||
if app.config.Whitelist != "" {
|
||||
remoteIP := conn.RemoteAddr().(*net.TCPAddr).IP.String()
|
||||
if !app.iptree.Contains(remoteIP) && !IsLocalhost(remoteIP) {
|
||||
conn.Close()
|
||||
gLog.Printf(LvERROR, "%s not in whitelist, access denied", remoteIP)
|
||||
continue
|
||||
}
|
||||
}
|
||||
oConn := overlayConn{
|
||||
tunnel: app.tunnel,
|
||||
connTCP: conn,
|
||||
@@ -72,6 +86,7 @@ func (app *p2pApp) listenTCP() error {
|
||||
rtid: app.rtid,
|
||||
appID: app.id,
|
||||
appKey: app.key,
|
||||
running: true,
|
||||
}
|
||||
// pre-calc key bytes for encrypt
|
||||
if oConn.appKey != 0 {
|
||||
@@ -81,7 +96,7 @@ func (app *p2pApp) listenTCP() error {
|
||||
oConn.appKeyBytes = encryptKey
|
||||
}
|
||||
app.tunnel.overlayConns.Store(oConn.id, &oConn)
|
||||
gLog.Printf(LvDEBUG, "Accept TCP overlayID:%d", oConn.id)
|
||||
gLog.Printf(LvDEBUG, "Accept TCP overlayID:%d, %s", oConn.id, oConn.connTCP.RemoteAddr())
|
||||
// tell peer connect
|
||||
req := OverlayConnectReq{ID: oConn.id,
|
||||
Token: app.tunnel.pn.config.Token,
|
||||
@@ -100,6 +115,8 @@ func (app *p2pApp) listenTCP() error {
|
||||
msgWithHead := append(relayHead.Bytes(), msg...)
|
||||
app.tunnel.conn.WriteBytes(MsgP2P, MsgRelayData, msgWithHead)
|
||||
}
|
||||
// TODO: wait OverlayConnectRsp instead of sleep
|
||||
time.Sleep(time.Second) // waiting remote node connection ok
|
||||
go oConn.run()
|
||||
}
|
||||
return nil
|
||||
@@ -114,10 +131,10 @@ func (app *p2pApp) listenUDP() error {
|
||||
gLog.Printf(LvERROR, "listen error:%s", err)
|
||||
return err
|
||||
}
|
||||
buffer := make([]byte, 64*1024)
|
||||
buffer := make([]byte, 64*1024+PaddingSize)
|
||||
udpID := make([]byte, 8)
|
||||
for {
|
||||
app.listenerUDP.SetReadDeadline(time.Now().Add(time.Second * 10))
|
||||
app.listenerUDP.SetReadDeadline(time.Now().Add(UDPReadTimeout))
|
||||
len, remoteAddr, err := app.listenerUDP.ReadFrom(buffer)
|
||||
if err != nil {
|
||||
if ne, ok := err.(net.Error); ok && ne.Timeout() {
|
||||
@@ -127,8 +144,8 @@ func (app *p2pApp) listenUDP() error {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
b := bytes.Buffer{}
|
||||
b.Write(buffer[:len])
|
||||
dupData := bytes.Buffer{} // should uses memory pool
|
||||
dupData.Write(buffer[:len+PaddingSize])
|
||||
// load from app.tunnel.overlayConns by remoteAddr error, new udp connection
|
||||
remoteIP := strings.Split(remoteAddr.String(), ":")[0]
|
||||
port, _ := strconv.Atoi(strings.Split(remoteAddr.String(), ":")[1])
|
||||
@@ -139,19 +156,20 @@ func (app *p2pApp) listenUDP() error {
|
||||
udpID[3] = a[3]
|
||||
udpID[4] = byte(port)
|
||||
udpID[5] = byte(port >> 8)
|
||||
id := binary.LittleEndian.Uint64(udpID)
|
||||
id := binary.LittleEndian.Uint64(udpID) // convert remoteIP:port to uint64
|
||||
s, ok := app.tunnel.overlayConns.Load(id)
|
||||
if !ok {
|
||||
oConn := overlayConn{
|
||||
tunnel: app.tunnel,
|
||||
connUDP: app.listenerUDP,
|
||||
remoteAddr: remoteAddr,
|
||||
udpRelayData: make(chan []byte, 1000),
|
||||
id: id,
|
||||
isClient: true,
|
||||
rtid: app.rtid,
|
||||
appID: app.id,
|
||||
appKey: app.key,
|
||||
tunnel: app.tunnel,
|
||||
connUDP: app.listenerUDP,
|
||||
remoteAddr: remoteAddr,
|
||||
udpData: make(chan []byte, 1000),
|
||||
id: id,
|
||||
isClient: true,
|
||||
rtid: app.rtid,
|
||||
appID: app.id,
|
||||
appKey: app.key,
|
||||
running: true,
|
||||
}
|
||||
// calc key bytes for encrypt
|
||||
if oConn.appKey != 0 {
|
||||
@@ -180,8 +198,10 @@ func (app *p2pApp) listenUDP() error {
|
||||
msgWithHead := append(relayHead.Bytes(), msg...)
|
||||
app.tunnel.conn.WriteBytes(MsgP2P, MsgRelayData, msgWithHead)
|
||||
}
|
||||
// TODO: wait OverlayConnectRsp instead of sleep
|
||||
time.Sleep(time.Second) // waiting remote node connection ok
|
||||
go oConn.run()
|
||||
oConn.udpRelayData <- b.Bytes()
|
||||
oConn.udpData <- dupData.Bytes()
|
||||
}
|
||||
|
||||
// load from app.tunnel.overlayConns by remoteAddr ok, write relay data
|
||||
@@ -189,7 +209,7 @@ func (app *p2pApp) listenUDP() error {
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
overlayConn.udpRelayData <- b.Bytes()
|
||||
overlayConn.udpData <- dupData.Bytes()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -204,12 +224,15 @@ func (app *p2pApp) listen() error {
|
||||
if app.rtid != 0 {
|
||||
go app.relayHeartbeatLoop()
|
||||
}
|
||||
for app.tunnel.isRuning() && app.running {
|
||||
for app.tunnel.isRuning() {
|
||||
if app.config.Protocol == "udp" {
|
||||
app.listenUDP()
|
||||
} else {
|
||||
app.listenTCP()
|
||||
}
|
||||
if !app.running {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second * 10)
|
||||
}
|
||||
return nil
|
||||
@@ -233,8 +256,8 @@ func (app *p2pApp) close() {
|
||||
func (app *p2pApp) relayHeartbeatLoop() {
|
||||
app.wg.Add(1)
|
||||
defer app.wg.Done()
|
||||
gLog.Printf(LvDEBUG, "relayHeartbeat to %d start", app.rtid)
|
||||
defer gLog.Printf(LvDEBUG, "relayHeartbeat to %d end", app.rtid)
|
||||
gLog.Printf(LvDEBUG, "relayHeartbeat to rtid:%d start", app.rtid)
|
||||
defer gLog.Printf(LvDEBUG, "relayHeartbeat to rtid%d end", app.rtid)
|
||||
relayHead := new(bytes.Buffer)
|
||||
binary.Write(relayHead, binary.LittleEndian, app.rtid)
|
||||
req := RelayHeartbeat{RelayTunnelID: app.tunnel.id,
|
||||
@@ -242,7 +265,12 @@ func (app *p2pApp) relayHeartbeatLoop() {
|
||||
msg, _ := newMessage(MsgP2P, MsgRelayHeartbeat, &req)
|
||||
msgWithHead := append(relayHead.Bytes(), msg...)
|
||||
for app.tunnel.isRuning() && app.running {
|
||||
app.tunnel.conn.WriteBytes(MsgP2P, MsgRelayData, msgWithHead)
|
||||
err := app.tunnel.conn.WriteBytes(MsgP2P, MsgRelayData, msgWithHead)
|
||||
if err != nil {
|
||||
gLog.Printf(LvERROR, "%d app write relay tunnel heartbeat error %s", app.rtid, err)
|
||||
return
|
||||
}
|
||||
gLog.Printf(LvDEBUG, "%d app write relay tunnel heartbeat ok", app.rtid)
|
||||
time.Sleep(TunnelHeartbeatTime)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,14 +3,15 @@ package openp2p
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -19,27 +20,47 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
instance *P2PNetwork
|
||||
once sync.Once
|
||||
v4l *v4Listener
|
||||
instance *P2PNetwork
|
||||
once sync.Once
|
||||
onceV4Listener sync.Once
|
||||
)
|
||||
|
||||
const (
|
||||
retryLimit = 20
|
||||
retryInterval = 10 * time.Second
|
||||
)
|
||||
|
||||
// golang not support float64 const
|
||||
var (
|
||||
ma10 float64 = 1.0 / 10
|
||||
ma5 float64 = 1.0 / 5
|
||||
)
|
||||
|
||||
type P2PNetwork struct {
|
||||
conn *websocket.Conn
|
||||
online bool
|
||||
running bool
|
||||
restartCh chan bool
|
||||
wg sync.WaitGroup
|
||||
writeMtx sync.Mutex
|
||||
serverTs int64
|
||||
localTs int64
|
||||
hbTime time.Time
|
||||
// msgMap sync.Map
|
||||
msgMap map[uint64]chan []byte //key: nodeID
|
||||
msgMapMtx sync.Mutex
|
||||
conn *websocket.Conn
|
||||
online bool
|
||||
running bool
|
||||
restartCh chan bool
|
||||
wgReconnect sync.WaitGroup
|
||||
writeMtx sync.Mutex
|
||||
hbTime time.Time
|
||||
// for sync server time
|
||||
t1 int64 // nanoSeconds
|
||||
dt int64 // client faster then server dt nanoSeconds
|
||||
ddtma int64
|
||||
ddt int64 // differential of dt
|
||||
msgMap sync.Map //key: nodeID
|
||||
// msgMap map[uint64]chan pushMsg //key: nodeID
|
||||
config NetworkConfig
|
||||
allTunnels sync.Map
|
||||
apps sync.Map //key: protocol+srcport; value: p2pApp
|
||||
limiter *BandwidthLimiter
|
||||
limiter *SpeedLimiter
|
||||
}
|
||||
|
||||
type msgCtx struct {
|
||||
data []byte
|
||||
ts time.Time
|
||||
}
|
||||
|
||||
func P2PNetworkInstance(config *NetworkConfig) *P2PNetwork {
|
||||
@@ -49,33 +70,41 @@ func P2PNetworkInstance(config *NetworkConfig) *P2PNetwork {
|
||||
restartCh: make(chan bool, 2),
|
||||
online: false,
|
||||
running: true,
|
||||
msgMap: make(map[uint64]chan []byte),
|
||||
limiter: newBandwidthLimiter(config.ShareBandwidth),
|
||||
limiter: newSpeedLimiter(config.ShareBandwidth*1024*1024/8, 1),
|
||||
dt: 0,
|
||||
ddt: 0,
|
||||
}
|
||||
instance.msgMap[0] = make(chan []byte) // for gateway
|
||||
instance.msgMap.Store(uint64(0), make(chan msgCtx)) // for gateway
|
||||
if config != nil {
|
||||
instance.config = *config
|
||||
}
|
||||
instance.init()
|
||||
go instance.run()
|
||||
go func() {
|
||||
for {
|
||||
instance.refreshIPv6(false)
|
||||
time.Sleep(time.Hour)
|
||||
}
|
||||
}()
|
||||
cleanTempFiles()
|
||||
})
|
||||
}
|
||||
return instance
|
||||
}
|
||||
|
||||
func (pn *P2PNetwork) run() {
|
||||
go pn.readLoop()
|
||||
go pn.autorunApp()
|
||||
heartbeatTimer := time.NewTicker(NetworkHeartbeatTime)
|
||||
pn.t1 = time.Now().UnixNano()
|
||||
pn.write(MsgHeartbeat, 0, "")
|
||||
for pn.running {
|
||||
select {
|
||||
case <-heartbeatTimer.C: // TODO: deal with connect failed, no send hb
|
||||
case <-heartbeatTimer.C:
|
||||
pn.t1 = time.Now().UnixNano()
|
||||
pn.write(MsgHeartbeat, 0, "")
|
||||
|
||||
case <-pn.restartCh:
|
||||
pn.online = false
|
||||
pn.wg.Wait() // wait read/write goroutine exited
|
||||
time.Sleep(NetworkHeartbeatTime)
|
||||
pn.wgReconnect.Wait() // wait read/autorunapp goroutine end
|
||||
time.Sleep(ClientAPITimeout)
|
||||
err := pn.init()
|
||||
if err != nil {
|
||||
gLog.Println(LvERROR, "P2PNetwork init error:", err)
|
||||
@@ -85,7 +114,7 @@ func (pn *P2PNetwork) run() {
|
||||
}
|
||||
|
||||
func (pn *P2PNetwork) Connect(timeout int) bool {
|
||||
// waiting for login response
|
||||
// waiting for heartbeat
|
||||
for i := 0; i < (timeout / 1000); i++ {
|
||||
if pn.hbTime.After(time.Now().Add(-NetworkHeartbeatTime)) {
|
||||
return true
|
||||
@@ -101,62 +130,50 @@ func (pn *P2PNetwork) runAll() {
|
||||
allApps := gConf.Apps // read a copy, other thread will modify the gConf.Apps
|
||||
|
||||
for _, config := range allApps {
|
||||
if config.nextRetryTime.After(time.Now()) {
|
||||
continue
|
||||
}
|
||||
if config.Enabled == 0 {
|
||||
if config.nextRetryTime.After(time.Now()) || config.Enabled == 0 || config.retryNum >= retryLimit {
|
||||
continue
|
||||
}
|
||||
if config.AppName == "" {
|
||||
config.AppName = fmt.Sprintf("%s%d", config.Protocol, config.SrcPort)
|
||||
config.AppName = config.ID()
|
||||
}
|
||||
appExist := false
|
||||
i, ok := pn.apps.Load(fmt.Sprintf("%s%d", config.Protocol, config.SrcPort))
|
||||
if ok {
|
||||
app := i.(*p2pApp)
|
||||
appExist = true
|
||||
if app.isActive() {
|
||||
if i, ok := pn.apps.Load(config.ID()); ok {
|
||||
if app := i.(*p2pApp); app.isActive() {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if appExist {
|
||||
pn.DeleteApp(*config)
|
||||
}
|
||||
if config.retryNum > 0 {
|
||||
|
||||
if config.retryNum > 0 { // first time not show reconnect log
|
||||
gLog.Printf(LvINFO, "detect app %s disconnect, reconnecting the %d times...", config.AppName, config.retryNum)
|
||||
if time.Now().Add(-time.Minute * 15).After(config.retryTime) { // normal lasts 15min
|
||||
if time.Now().Add(-time.Minute * 15).After(config.retryTime) { // run normally 15min, reset retrynum
|
||||
config.retryNum = 0
|
||||
}
|
||||
}
|
||||
config.retryNum++
|
||||
config.retryTime = time.Now()
|
||||
increase := math.Pow(1.5, float64(config.retryNum)) // exponential increase retry time. 1.5^x
|
||||
if increase > 900 {
|
||||
increase = 900
|
||||
config.Enabled = 0
|
||||
gLog.Printf(LvWARN, "app %s has stopped retry, manually enable it on Web console", config.AppName)
|
||||
continue
|
||||
}
|
||||
config.nextRetryTime = time.Now().Add(time.Second * time.Duration(increase))
|
||||
config.nextRetryTime = time.Now().Add(retryInterval)
|
||||
config.connectTime = time.Now()
|
||||
config.peerToken = pn.config.Token
|
||||
gConf.mtx.Unlock() // AddApp will take a period of time
|
||||
gConf.mtx.Unlock() // AddApp will take a period of time, let outside modify gConf
|
||||
err := pn.AddApp(*config)
|
||||
gConf.mtx.Lock()
|
||||
if err != nil {
|
||||
config.errMsg = err.Error()
|
||||
if err == ErrPeerOffline { // stop retry, waiting for online
|
||||
config.retryNum = retryLimit
|
||||
gLog.Printf(LvINFO, " %s offline, it will auto reconnect when peer node online", config.PeerNode)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pn *P2PNetwork) autorunApp() {
|
||||
gLog.Println(LvINFO, "autorunApp start")
|
||||
for pn.running {
|
||||
pn.wgReconnect.Add(1)
|
||||
defer pn.wgReconnect.Done()
|
||||
for pn.running && pn.online {
|
||||
time.Sleep(time.Second)
|
||||
if !pn.online {
|
||||
continue
|
||||
}
|
||||
pn.runAll()
|
||||
time.Sleep(time.Second * 10)
|
||||
}
|
||||
gLog.Println(LvINFO, "autorunApp end")
|
||||
}
|
||||
@@ -164,54 +181,56 @@ func (pn *P2PNetwork) autorunApp() {
|
||||
func (pn *P2PNetwork) addRelayTunnel(config AppConfig) (*P2PTunnel, uint64, string, error) {
|
||||
gLog.Printf(LvINFO, "addRelayTunnel to %s start", config.PeerNode)
|
||||
defer gLog.Printf(LvINFO, "addRelayTunnel to %s end", config.PeerNode)
|
||||
// request a relay node or specify manually(TODO)
|
||||
pn.write(MsgRelay, MsgRelayNodeReq, &RelayNodeReq{config.PeerNode})
|
||||
head, body := pn.read("", MsgRelay, MsgRelayNodeRsp, time.Second*10)
|
||||
if head == nil {
|
||||
return nil, 0, "", errors.New("read MsgRelayNodeRsp error")
|
||||
}
|
||||
rsp := RelayNodeRsp{}
|
||||
err := json.Unmarshal(body, &rsp)
|
||||
if err != nil {
|
||||
gLog.Printf(LvERROR, "wrong RelayNodeRsp:%s", err)
|
||||
return nil, 0, "", errors.New("unmarshal MsgRelayNodeRsp error")
|
||||
}
|
||||
if rsp.RelayName == "" || rsp.RelayToken == 0 {
|
||||
gLog.Printf(LvERROR, "MsgRelayNodeReq error")
|
||||
return nil, 0, "", errors.New("MsgRelayNodeReq error")
|
||||
}
|
||||
gLog.Printf(LvINFO, "got relay node:%s", rsp.RelayName)
|
||||
relayConfig := config
|
||||
relayConfig.PeerNode = rsp.RelayName
|
||||
relayConfig.peerToken = rsp.RelayToken
|
||||
relayMode := "private"
|
||||
if config.RelayNode == "" {
|
||||
pn.write(MsgRelay, MsgRelayNodeReq, &RelayNodeReq{config.PeerNode})
|
||||
head, body := pn.read("", MsgRelay, MsgRelayNodeRsp, ClientAPITimeout)
|
||||
if head == nil {
|
||||
return nil, 0, "", errors.New("read MsgRelayNodeRsp error")
|
||||
}
|
||||
rsp := RelayNodeRsp{}
|
||||
if err := json.Unmarshal(body, &rsp); err != nil {
|
||||
return nil, 0, "", errors.New("unmarshal MsgRelayNodeRsp error")
|
||||
}
|
||||
if rsp.RelayName == "" || rsp.RelayToken == 0 {
|
||||
gLog.Printf(LvERROR, "MsgRelayNodeReq error")
|
||||
return nil, 0, "", errors.New("MsgRelayNodeReq error")
|
||||
}
|
||||
gLog.Printf(LvINFO, "got relay node:%s", rsp.RelayName)
|
||||
|
||||
relayConfig.PeerNode = rsp.RelayName
|
||||
relayConfig.peerToken = rsp.RelayToken
|
||||
relayMode = rsp.Mode
|
||||
} else {
|
||||
relayConfig.PeerNode = config.RelayNode
|
||||
}
|
||||
///
|
||||
t, err := pn.addDirectTunnel(relayConfig, 0)
|
||||
if err != nil {
|
||||
gLog.Println(LvERROR, "direct connect error:", err)
|
||||
return nil, 0, "", err
|
||||
return nil, 0, "", ErrConnectRelayNode // relay offline will stop retry
|
||||
}
|
||||
// notify peer addRelayTunnel
|
||||
req := AddRelayTunnelReq{
|
||||
From: pn.config.Node,
|
||||
RelayName: rsp.RelayName,
|
||||
RelayToken: rsp.RelayToken,
|
||||
RelayName: relayConfig.PeerNode,
|
||||
RelayToken: relayConfig.peerToken,
|
||||
}
|
||||
gLog.Printf(LvINFO, "push relay %s---------%s", config.PeerNode, rsp.RelayName)
|
||||
gLog.Printf(LvINFO, "push relay %s---------%s", config.PeerNode, relayConfig.PeerNode)
|
||||
pn.push(config.PeerNode, MsgPushAddRelayTunnelReq, &req)
|
||||
|
||||
// wait relay ready
|
||||
head, body = pn.read(config.PeerNode, MsgPush, MsgPushAddRelayTunnelRsp, PeerAddRelayTimeount) // TODO: const value
|
||||
head, body := pn.read(config.PeerNode, MsgPush, MsgPushAddRelayTunnelRsp, PeerAddRelayTimeount)
|
||||
if head == nil {
|
||||
gLog.Printf(LvERROR, "read MsgPushAddRelayTunnelRsp error")
|
||||
return nil, 0, "", errors.New("read MsgPushAddRelayTunnelRsp error")
|
||||
}
|
||||
rspID := TunnelMsg{}
|
||||
err = json.Unmarshal(body, &rspID)
|
||||
if err != nil {
|
||||
gLog.Printf(LvERROR, "wrong RelayNodeRsp:%s", err)
|
||||
return nil, 0, "", errors.New("unmarshal MsgRelayNodeRsp error")
|
||||
if err = json.Unmarshal(body, &rspID); err != nil {
|
||||
return nil, 0, "", errors.New("peer connect relayNode error")
|
||||
}
|
||||
return t, rspID.ID, rsp.Mode, err
|
||||
return t, rspID.ID, relayMode, err
|
||||
}
|
||||
|
||||
// use *AppConfig to save status
|
||||
@@ -223,7 +242,7 @@ func (pn *P2PNetwork) AddApp(config AppConfig) error {
|
||||
}
|
||||
// check if app already exist?
|
||||
appExist := false
|
||||
_, ok := pn.apps.Load(fmt.Sprintf("%s%d", config.Protocol, config.SrcPort))
|
||||
_, ok := pn.apps.Load(config.ID())
|
||||
if ok {
|
||||
appExist = true
|
||||
}
|
||||
@@ -243,8 +262,6 @@ func (pn *P2PNetwork) AddApp(config AppConfig) error {
|
||||
peerNatType = t.config.peerNatType
|
||||
peerIP = t.config.peerIP
|
||||
}
|
||||
// TODO: if tcp failed, should try udp punching, nattype should refactor also, when NATNONE and failed we don't know the peerNatType
|
||||
|
||||
if err != nil && err == ErrorHandshake {
|
||||
gLog.Println(LvERROR, "direct connect failed, try to relay")
|
||||
t, rtid, relayMode, err = pn.addRelayTunnel(config)
|
||||
@@ -281,7 +298,7 @@ func (pn *P2PNetwork) AddApp(config AppConfig) error {
|
||||
AppID: appID,
|
||||
AppKey: appKey,
|
||||
}
|
||||
gLog.Printf(LvINFO, "sync appkey to %s", config.PeerNode)
|
||||
gLog.Printf(LvDEBUG, "sync appkey to %s", config.PeerNode)
|
||||
pn.push(config.PeerNode, MsgPushAPPKey, &req)
|
||||
}
|
||||
app := p2pApp{
|
||||
@@ -289,11 +306,13 @@ func (pn *P2PNetwork) AddApp(config AppConfig) error {
|
||||
key: appKey,
|
||||
tunnel: t,
|
||||
config: config,
|
||||
iptree: NewIPTree(config.Whitelist),
|
||||
rtid: rtid,
|
||||
relayNode: relayNode,
|
||||
relayMode: relayMode,
|
||||
hbTime: time.Now()}
|
||||
pn.apps.Store(fmt.Sprintf("%s%d", config.Protocol, config.SrcPort), &app)
|
||||
pn.apps.Store(config.ID(), &app)
|
||||
gLog.Printf(LvDEBUG, "%s use tunnel %d", app.config.AppName, app.tunnel.id)
|
||||
if err == nil {
|
||||
go app.listen()
|
||||
}
|
||||
@@ -304,81 +323,68 @@ func (pn *P2PNetwork) DeleteApp(config AppConfig) {
|
||||
gLog.Printf(LvINFO, "DeleteApp %s%d start", config.Protocol, config.SrcPort)
|
||||
defer gLog.Printf(LvINFO, "DeleteApp %s%d end", config.Protocol, config.SrcPort)
|
||||
// close the apps of this config
|
||||
i, ok := pn.apps.Load(fmt.Sprintf("%s%d", config.Protocol, config.SrcPort))
|
||||
i, ok := pn.apps.Load(config.ID())
|
||||
if ok {
|
||||
app := i.(*p2pApp)
|
||||
gLog.Printf(LvINFO, "app %s exist, delete it", fmt.Sprintf("%s%d", config.Protocol, config.SrcPort))
|
||||
gLog.Printf(LvINFO, "app %s exist, delete it", app.config.AppName)
|
||||
app.close()
|
||||
pn.apps.Delete(fmt.Sprintf("%s%d", config.Protocol, config.SrcPort))
|
||||
pn.apps.Delete(config.ID())
|
||||
}
|
||||
}
|
||||
|
||||
func (pn *P2PNetwork) addDirectTunnel(config AppConfig, tid uint64) (*P2PTunnel, error) {
|
||||
gLog.Printf(LvDEBUG, "addDirectTunnel %s%d to %s:%s:%d start", config.Protocol, config.SrcPort, config.PeerNode, config.DstHost, config.DstPort)
|
||||
defer gLog.Printf(LvDEBUG, "addDirectTunnel %s%d to %s:%s:%d end", config.Protocol, config.SrcPort, config.PeerNode, config.DstHost, config.DstPort)
|
||||
func (pn *P2PNetwork) findTunnel(config *AppConfig) (t *P2PTunnel) {
|
||||
// find existing tunnel to peer
|
||||
pn.allTunnels.Range(func(id, i interface{}) bool {
|
||||
tmpt := i.(*P2PTunnel)
|
||||
if tmpt.config.PeerNode == config.PeerNode {
|
||||
gLog.Println(LvINFO, "tunnel already exist ", config.PeerNode)
|
||||
isActive := tmpt.checkActive()
|
||||
// inactive, close it
|
||||
if !isActive {
|
||||
gLog.Println(LvINFO, "but it's not active, close it ", config.PeerNode)
|
||||
tmpt.close()
|
||||
} else {
|
||||
t = tmpt
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
return t
|
||||
}
|
||||
|
||||
func (pn *P2PNetwork) addDirectTunnel(config AppConfig, tid uint64) (t *P2PTunnel, err error) {
|
||||
gLog.Printf(LvDEBUG, "addDirectTunnel %s%d to %s:%s:%d tid:%d start", config.Protocol, config.SrcPort, config.PeerNode, config.DstHost, config.DstPort, tid)
|
||||
defer gLog.Printf(LvDEBUG, "addDirectTunnel %s%d to %s:%s:%d tid:%d end", config.Protocol, config.SrcPort, config.PeerNode, config.DstHost, config.DstPort, tid)
|
||||
isClient := false
|
||||
// client side tid=0, assign random uint64
|
||||
if tid == 0 {
|
||||
tid = rand.Uint64()
|
||||
isClient = true
|
||||
}
|
||||
exist := false
|
||||
// find existing tunnel to peer
|
||||
var t *P2PTunnel
|
||||
pn.allTunnels.Range(func(id, i interface{}) bool {
|
||||
t = i.(*P2PTunnel)
|
||||
if t.config.PeerNode == config.PeerNode {
|
||||
// server side force close existing tunnel
|
||||
if !isClient {
|
||||
t.close()
|
||||
return false
|
||||
}
|
||||
if _, ok := pn.msgMap.Load(nodeNameToID(config.PeerNode)); !ok {
|
||||
pn.msgMap.Store(nodeNameToID(config.PeerNode), make(chan msgCtx, 50))
|
||||
}
|
||||
|
||||
// client side checking
|
||||
gLog.Println(LvINFO, "tunnel already exist ", config.PeerNode)
|
||||
isActive := t.checkActive()
|
||||
// inactive, close it
|
||||
if !isActive {
|
||||
gLog.Println(LvINFO, "but it's not active, close it ", config.PeerNode)
|
||||
t.close()
|
||||
} else {
|
||||
// active
|
||||
exist = true
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
if exist {
|
||||
return t, nil
|
||||
}
|
||||
// create tunnel if not exist
|
||||
t = &P2PTunnel{pn: pn,
|
||||
config: config,
|
||||
id: tid,
|
||||
}
|
||||
pn.msgMapMtx.Lock()
|
||||
pn.msgMap[nodeNameToID(config.PeerNode)] = make(chan []byte, 50)
|
||||
pn.msgMapMtx.Unlock()
|
||||
// server side
|
||||
if !isClient {
|
||||
err := pn.newTunnel(t, tid, isClient)
|
||||
t, err = pn.newTunnel(config, tid, isClient)
|
||||
return t, err // always return
|
||||
}
|
||||
// client side
|
||||
// peer info
|
||||
initErr := t.requestPeerInfo()
|
||||
initErr := pn.requestPeerInfo(&config)
|
||||
if initErr != nil {
|
||||
gLog.Println(LvERROR, "init error:", initErr)
|
||||
|
||||
return nil, initErr
|
||||
}
|
||||
err := ErrorHandshake
|
||||
// try TCP6
|
||||
if IsIPv6(t.config.peerIPv6) && IsIPv6(t.pn.config.publicIPv6) {
|
||||
if IsIPv6(config.peerIPv6) && IsIPv6(gConf.IPv6()) {
|
||||
gLog.Println(LvINFO, "try TCP6")
|
||||
t.config.linkMode = LinkModeTCP6
|
||||
t.config.isUnderlayServer = 0
|
||||
if err = pn.newTunnel(t, tid, isClient); err == nil {
|
||||
config.linkMode = LinkModeTCP6
|
||||
config.isUnderlayServer = 0
|
||||
if t, err = pn.newTunnel(config, tid, isClient); err == nil {
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
@@ -386,61 +392,84 @@ func (pn *P2PNetwork) addDirectTunnel(config AppConfig, tid uint64) (*P2PTunnel,
|
||||
// TODO: try UDP6
|
||||
|
||||
// try TCP4
|
||||
if t.config.hasIPv4 == 1 || t.pn.config.hasIPv4 == 1 || t.config.hasUPNPorNATPMP == 1 || t.pn.config.hasUPNPorNATPMP == 1 {
|
||||
if config.hasIPv4 == 1 || pn.config.hasIPv4 == 1 || config.hasUPNPorNATPMP == 1 || pn.config.hasUPNPorNATPMP == 1 {
|
||||
gLog.Println(LvINFO, "try TCP4")
|
||||
t.config.linkMode = LinkModeTCP4
|
||||
if t.config.hasIPv4 == 1 || t.config.hasUPNPorNATPMP == 1 {
|
||||
t.config.isUnderlayServer = 0
|
||||
config.linkMode = LinkModeTCP4
|
||||
if config.hasIPv4 == 1 || config.hasUPNPorNATPMP == 1 {
|
||||
config.isUnderlayServer = 0
|
||||
} else {
|
||||
t.config.isUnderlayServer = 1
|
||||
config.isUnderlayServer = 1
|
||||
}
|
||||
if err = pn.newTunnel(t, tid, isClient); err == nil {
|
||||
if t, err = pn.newTunnel(config, tid, isClient); err == nil {
|
||||
return t, nil
|
||||
} else if config.hasIPv4 == 1 || config.hasUPNPorNATPMP == 1 { // peer has ipv4 no punching
|
||||
return nil, ErrConnectPublicV4
|
||||
}
|
||||
}
|
||||
// TODO: try UDP4
|
||||
|
||||
// try TCPPunch
|
||||
if t.config.peerNatType == NATCone && t.pn.config.natType == NATCone { // TODO: support c2s
|
||||
gLog.Println(LvINFO, "try TCP4 Punch")
|
||||
t.config.linkMode = LinkModeTCPPunch
|
||||
t.config.isUnderlayServer = 0
|
||||
if err = pn.newTunnel(t, tid, isClient); err == nil {
|
||||
return t, nil
|
||||
for i := 0; i < Cone2ConeTCPPunchMaxRetry; i++ { // when both 2 nats has restrict firewall, simultaneous punching needs to be very precise, it takes a few tries
|
||||
if config.peerNatType == NATCone && pn.config.natType == NATCone {
|
||||
gLog.Println(LvINFO, "try TCP4 Punch")
|
||||
config.linkMode = LinkModeTCPPunch
|
||||
config.isUnderlayServer = 0
|
||||
if t, err = pn.newTunnel(config, tid, isClient); err == nil {
|
||||
gLog.Println(LvINFO, "TCP4 Punch ok")
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// try UDPPunch
|
||||
if t.config.peerNatType == NATCone || t.pn.config.natType == NATCone {
|
||||
gLog.Println(LvINFO, "try UDP4 Punch")
|
||||
t.config.linkMode = LinkModeUDPPunch
|
||||
t.config.isUnderlayServer = 0
|
||||
if err = pn.newTunnel(t, tid, isClient); err == nil {
|
||||
return t, nil
|
||||
for i := 0; i < Cone2ConeUDPPunchMaxRetry; i++ { // when both 2 nats has restrict firewall, simultaneous punching needs to be very precise, it takes a few tries
|
||||
if config.peerNatType == NATCone || pn.config.natType == NATCone {
|
||||
gLog.Println(LvINFO, "try UDP4 Punch")
|
||||
config.linkMode = LinkModeUDPPunch
|
||||
config.isUnderlayServer = 0
|
||||
if t, err = pn.newTunnel(config, tid, isClient); err == nil {
|
||||
return t, nil
|
||||
}
|
||||
}
|
||||
if !(config.peerNatType == NATCone && pn.config.natType == NATCone) { // not cone2cone, no more try
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
return nil, ErrorHandshake // only ErrorHandshake will try relay
|
||||
}
|
||||
|
||||
func (pn *P2PNetwork) newTunnel(t *P2PTunnel, tid uint64, isClient bool) error {
|
||||
func (pn *P2PNetwork) newTunnel(config AppConfig, tid uint64, isClient bool) (t *P2PTunnel, err error) {
|
||||
if isClient { // only client side find existing tunnel
|
||||
if existTunnel := pn.findTunnel(&config); existTunnel != nil {
|
||||
return existTunnel, nil
|
||||
}
|
||||
}
|
||||
|
||||
t = &P2PTunnel{pn: pn,
|
||||
config: config,
|
||||
id: tid,
|
||||
}
|
||||
t.initPort()
|
||||
if isClient {
|
||||
if err := t.connect(); err != nil {
|
||||
if err = t.connect(); err != nil {
|
||||
gLog.Println(LvERROR, "p2pTunnel connect error:", err)
|
||||
return err
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if err := t.listen(); err != nil {
|
||||
if err = t.listen(); err != nil {
|
||||
gLog.Println(LvERROR, "p2pTunnel listen error:", err)
|
||||
return err
|
||||
return
|
||||
}
|
||||
}
|
||||
// store it when success
|
||||
gLog.Printf(LvDEBUG, "store tunnel %d", tid)
|
||||
pn.allTunnels.Store(tid, t)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
func (pn *P2PNetwork) init() error {
|
||||
gLog.Println(LvINFO, "init start")
|
||||
gLog.Println(LvINFO, "P2PNetwork start")
|
||||
pn.wgReconnect.Add(1)
|
||||
defer pn.wgReconnect.Done()
|
||||
var err error
|
||||
for {
|
||||
// detect nat type
|
||||
@@ -450,22 +479,41 @@ func (pn *P2PNetwork) init() error {
|
||||
pn.config.natType = NATSymmetric
|
||||
pn.config.hasIPv4 = 0
|
||||
pn.config.hasUPNPorNATPMP = 0
|
||||
gLog.Println(LvINFO, "openp2pS2STest debug")
|
||||
|
||||
}
|
||||
if strings.Contains(pn.config.Node, "openp2pC2CTest") {
|
||||
pn.config.natType = NATCone
|
||||
pn.config.hasIPv4 = 0
|
||||
pn.config.hasUPNPorNATPMP = 0
|
||||
gLog.Println(LvINFO, "openp2pC2CTest debug")
|
||||
}
|
||||
if err != nil {
|
||||
gLog.Println(LvDEBUG, "detect NAT type error:", err)
|
||||
break
|
||||
}
|
||||
gLog.Println(LvDEBUG, "detect NAT type:", pn.config.natType, " publicIP:", pn.config.publicIP)
|
||||
if pn.config.hasIPv4 == 1 || pn.config.hasUPNPorNATPMP == 1 {
|
||||
onceV4Listener.Do(func() {
|
||||
v4l = &v4Listener{port: gConf.Network.TCPPort}
|
||||
go v4l.start()
|
||||
})
|
||||
}
|
||||
gLog.Printf(LvINFO, "hasIPv4:%d, UPNP:%d, NAT type:%d, publicIP:%s", pn.config.hasIPv4, pn.config.hasUPNPorNATPMP, pn.config.natType, pn.config.publicIP)
|
||||
gatewayURL := fmt.Sprintf("%s:%d", pn.config.ServerHost, pn.config.ServerPort)
|
||||
uri := "/openp2p/v1/login"
|
||||
config := tls.Config{InsecureSkipVerify: true} // let's encrypt root cert "DST Root CA X3" expired at 2021/09/29. many old system(windows server 2008 etc) will not trust our cert
|
||||
uri := "/api/v1/login"
|
||||
caCertPool, err := x509.SystemCertPool()
|
||||
if err != nil {
|
||||
gLog.Println(LvERROR, "Failed to load system root CAs:", err)
|
||||
} else {
|
||||
caCertPool = x509.NewCertPool()
|
||||
}
|
||||
caCertPool.AppendCertsFromPEM([]byte(rootCA))
|
||||
caCertPool.AppendCertsFromPEM([]byte(ISRGRootX1))
|
||||
config := tls.Config{
|
||||
RootCAs: caCertPool,
|
||||
InsecureSkipVerify: false} // let's encrypt root cert "DST Root CA X3" expired at 2021/09/29. many old system(windows server 2008 etc) will not trust our cert
|
||||
websocket.DefaultDialer.TLSClientConfig = &config
|
||||
websocket.DefaultDialer.HandshakeTimeout = ClientAPITimeout
|
||||
u := url.URL{Scheme: "wss", Host: gatewayURL, Path: uri}
|
||||
q := u.Query()
|
||||
q.Add("node", pn.config.Node)
|
||||
@@ -477,6 +525,7 @@ func (pn *P2PNetwork) init() error {
|
||||
var ws *websocket.Conn
|
||||
ws, _, err = websocket.DefaultDialer.Dial(u.String(), nil)
|
||||
if err != nil {
|
||||
gLog.Println(LvERROR, "Dial error:", err)
|
||||
break
|
||||
}
|
||||
pn.online = true
|
||||
@@ -488,30 +537,32 @@ func (pn *P2PNetwork) init() error {
|
||||
err = errors.New("get local ip failed")
|
||||
break
|
||||
}
|
||||
|
||||
go pn.readLoop()
|
||||
pn.config.mac = getmac(pn.config.localIP)
|
||||
pn.config.os = getOsName()
|
||||
|
||||
req := ReportBasic{
|
||||
Mac: pn.config.mac,
|
||||
LanIP: pn.config.localIP,
|
||||
OS: pn.config.os,
|
||||
HasIPv4: pn.config.hasIPv4,
|
||||
HasUPNPorNATPMP: pn.config.hasUPNPorNATPMP,
|
||||
Version: OpenP2PVersion,
|
||||
}
|
||||
rsp := netInfo()
|
||||
gLog.Println(LvDEBUG, "netinfo:", rsp)
|
||||
if rsp != nil && rsp.Country != "" {
|
||||
if IsIPv6(rsp.IP.String()) {
|
||||
pn.config.publicIPv6 = rsp.IP.String()
|
||||
go func() {
|
||||
req := ReportBasic{
|
||||
Mac: pn.config.mac,
|
||||
LanIP: pn.config.localIP,
|
||||
OS: pn.config.os,
|
||||
HasIPv4: pn.config.hasIPv4,
|
||||
HasUPNPorNATPMP: pn.config.hasUPNPorNATPMP,
|
||||
Version: OpenP2PVersion,
|
||||
}
|
||||
req.NetInfo = *rsp
|
||||
} else {
|
||||
pn.refreshIPv6(true)
|
||||
}
|
||||
req.IPv6 = pn.config.publicIPv6
|
||||
pn.write(MsgReport, MsgReportBasic, &req)
|
||||
rsp := netInfo()
|
||||
gLog.Println(LvDEBUG, "netinfo:", rsp)
|
||||
if rsp != nil && rsp.Country != "" {
|
||||
if IsIPv6(rsp.IP.String()) {
|
||||
gConf.setIPv6(rsp.IP.String())
|
||||
}
|
||||
req.NetInfo = *rsp
|
||||
} else {
|
||||
pn.refreshIPv6(true)
|
||||
}
|
||||
req.IPv6 = gConf.IPv6()
|
||||
pn.write(MsgReport, MsgReportBasic, &req)
|
||||
}()
|
||||
go pn.autorunApp()
|
||||
gLog.Println(LvDEBUG, "P2PNetwork init ok")
|
||||
break
|
||||
}
|
||||
@@ -534,46 +585,61 @@ func (pn *P2PNetwork) handleMessage(t int, msg []byte) {
|
||||
case MsgLogin:
|
||||
// gLog.Println(LevelINFO,string(msg))
|
||||
rsp := LoginRsp{}
|
||||
err = json.Unmarshal(msg[openP2PHeaderSize:], &rsp)
|
||||
if err != nil {
|
||||
gLog.Printf(LvERROR, "wrong login response:%s", err)
|
||||
if err = json.Unmarshal(msg[openP2PHeaderSize:], &rsp); err != nil {
|
||||
gLog.Printf(LvERROR, "wrong %v:%s", reflect.TypeOf(rsp), err)
|
||||
return
|
||||
}
|
||||
if rsp.Error != 0 {
|
||||
gLog.Printf(LvERROR, "login error:%d, detail:%s", rsp.Error, rsp.Detail)
|
||||
pn.running = false
|
||||
} else {
|
||||
pn.serverTs = rsp.Ts
|
||||
pn.hbTime = time.Now()
|
||||
pn.config.Token = rsp.Token
|
||||
pn.config.User = rsp.User
|
||||
gConf.setToken(rsp.Token)
|
||||
gConf.setUser(rsp.User)
|
||||
if len(rsp.Node) >= MinNodeNameLen {
|
||||
gConf.setNode(rsp.Node)
|
||||
pn.config.Node = rsp.Node
|
||||
}
|
||||
gConf.save()
|
||||
pn.localTs = time.Now().Unix()
|
||||
gLog.Printf(LvINFO, "login ok. user=%s,node=%s,Server ts=%d, local ts=%d", rsp.User, rsp.Node, rsp.Ts, pn.localTs)
|
||||
gLog.Printf(LvINFO, "login ok. user=%s,node=%s", rsp.User, rsp.Node)
|
||||
}
|
||||
case MsgHeartbeat:
|
||||
gLog.Printf(LvDEBUG, "P2PNetwork heartbeat ok")
|
||||
pn.hbTime = time.Now()
|
||||
rtt := pn.hbTime.UnixNano() - pn.t1
|
||||
t2 := int64(binary.LittleEndian.Uint64(msg[openP2PHeaderSize : openP2PHeaderSize+8]))
|
||||
dt := pn.t1 + rtt/2 - t2
|
||||
if pn.dt != 0 {
|
||||
ddt := dt - pn.dt
|
||||
pn.ddt = ddt
|
||||
if pn.ddtma == 0 {
|
||||
pn.ddtma = pn.ddt
|
||||
} else {
|
||||
pn.ddtma = int64(float64(pn.ddtma)*(1-ma10) + float64(pn.ddt)*ma10) // avoid int64 overflow
|
||||
newdt := pn.dt + pn.ddtma
|
||||
// gLog.Printf(LvDEBUG, "server time auto adjust dt=%.2fms to %.2fms", float64(dt)/float64(time.Millisecond), float64(newdt)/float64(time.Millisecond))
|
||||
dt = newdt
|
||||
}
|
||||
}
|
||||
pn.dt = dt
|
||||
gLog.Printf(LvDEBUG, "synctime dt=%dms ddt=%dns ddtma=%dns rtt=%dms ", pn.dt/int64(time.Millisecond), pn.ddt, pn.ddtma, rtt/int64(time.Millisecond))
|
||||
case MsgPush:
|
||||
handlePush(pn, head.SubType, msg)
|
||||
default:
|
||||
pn.msgMapMtx.Lock()
|
||||
ch := pn.msgMap[0]
|
||||
pn.msgMapMtx.Unlock()
|
||||
ch <- msg
|
||||
i, ok := pn.msgMap.Load(uint64(0))
|
||||
if ok {
|
||||
ch := i.(chan msgCtx)
|
||||
ch <- msgCtx{data: msg, ts: time.Now()}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (pn *P2PNetwork) readLoop() {
|
||||
gLog.Printf(LvDEBUG, "P2PNetwork readLoop start")
|
||||
pn.wg.Add(1)
|
||||
defer pn.wg.Done()
|
||||
pn.wgReconnect.Add(1)
|
||||
defer pn.wgReconnect.Done()
|
||||
for pn.running {
|
||||
pn.conn.SetReadDeadline(time.Now().Add(NetworkHeartbeatTime + 10*time.Second))
|
||||
t, msg, err := pn.conn.ReadMessage()
|
||||
@@ -606,17 +672,20 @@ func (pn *P2PNetwork) write(mainType uint16, subType uint16, packet interface{})
|
||||
}
|
||||
|
||||
func (pn *P2PNetwork) relay(to uint64, body []byte) error {
|
||||
gLog.Printf(LvDEBUG, "relay data to %d", to)
|
||||
i, ok := pn.allTunnels.Load(to)
|
||||
if !ok {
|
||||
return nil
|
||||
gLog.Printf(LvERROR, "relay to %d len=%d error:%s", to, len(body), ErrRelayTunnelNotFound)
|
||||
return ErrRelayTunnelNotFound
|
||||
}
|
||||
tunnel := i.(*P2PTunnel)
|
||||
if tunnel.config.shareBandwidth > 0 {
|
||||
pn.limiter.Add(len(body))
|
||||
pn.limiter.Add(len(body), true)
|
||||
}
|
||||
tunnel.conn.WriteBuffer(body)
|
||||
return nil
|
||||
var err error
|
||||
if err = tunnel.conn.WriteBuffer(body); err != nil {
|
||||
gLog.Printf(LvERROR, "relay to %d len=%d error:%s", to, len(body), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (pn *P2PNetwork) push(to string, subType uint16, packet interface{}) error {
|
||||
@@ -655,9 +724,11 @@ func (pn *P2PNetwork) read(node string, mainType uint16, subType uint16, timeout
|
||||
} else {
|
||||
nodeID = nodeNameToID(node)
|
||||
}
|
||||
pn.msgMapMtx.Lock()
|
||||
ch := pn.msgMap[nodeID]
|
||||
pn.msgMapMtx.Unlock()
|
||||
i, ok := pn.msgMap.Load(nodeID)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ch := i.(chan msgCtx)
|
||||
for {
|
||||
select {
|
||||
case <-time.After(timeout):
|
||||
@@ -665,18 +736,25 @@ func (pn *P2PNetwork) read(node string, mainType uint16, subType uint16, timeout
|
||||
return
|
||||
case msg := <-ch:
|
||||
head = &openP2PHeader{}
|
||||
err := binary.Read(bytes.NewReader(msg[:openP2PHeaderSize]), binary.LittleEndian, head)
|
||||
err := binary.Read(bytes.NewReader(msg.data[:openP2PHeaderSize]), binary.LittleEndian, head)
|
||||
if err != nil {
|
||||
gLog.Println(LvERROR, "read msg error:", err)
|
||||
break
|
||||
}
|
||||
if time.Since(msg.ts) > ReadMsgTimeout {
|
||||
gLog.Printf(LvDEBUG, "msg expired error %d:%d", head.MainType, head.SubType)
|
||||
continue
|
||||
}
|
||||
if head.MainType != mainType || head.SubType != subType {
|
||||
gLog.Printf(LvDEBUG, "read msg type error %d:%d, requeue it", head.MainType, head.SubType)
|
||||
ch <- msg
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
if mainType == MsgPush {
|
||||
body = msg[openP2PHeaderSize+PushHeaderSize:]
|
||||
body = msg.data[openP2PHeaderSize+PushHeaderSize:]
|
||||
} else {
|
||||
body = msg[openP2PHeaderSize:]
|
||||
body = msg.data[openP2PHeaderSize:]
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -694,22 +772,54 @@ func (pn *P2PNetwork) updateAppHeartbeat(appID uint64) {
|
||||
})
|
||||
}
|
||||
|
||||
// ipv6 will expired need to refresh.
|
||||
func (pn *P2PNetwork) refreshIPv6(force bool) {
|
||||
if !force && !IsIPv6(pn.config.publicIPv6) { // not support ipv6, not refresh
|
||||
if !force && !IsIPv6(gConf.IPv6()) { // not support ipv6, not refresh
|
||||
return
|
||||
}
|
||||
client := &http.Client{Timeout: time.Second * 10}
|
||||
r, err := client.Get("http://6.ipw.cn")
|
||||
if err != nil {
|
||||
gLog.Println(LvDEBUG, "refreshIPv6 error:", err)
|
||||
return
|
||||
for i := 0; i < 3; i++ {
|
||||
client := &http.Client{Timeout: time.Second * 10}
|
||||
r, err := client.Get("http://6.ipw.cn")
|
||||
if err != nil {
|
||||
gLog.Println(LvDEBUG, "refreshIPv6 error:", err)
|
||||
continue
|
||||
}
|
||||
defer r.Body.Close()
|
||||
buf := make([]byte, 1024)
|
||||
n, err := r.Body.Read(buf)
|
||||
if n <= 0 {
|
||||
gLog.Println(LvINFO, "refreshIPv6 error:", err, n)
|
||||
continue
|
||||
}
|
||||
gConf.setIPv6(string(buf[:n]))
|
||||
break
|
||||
}
|
||||
defer r.Body.Close()
|
||||
buf := make([]byte, 1024)
|
||||
n, err := r.Body.Read(buf)
|
||||
if n <= 0 {
|
||||
gLog.Println(LvINFO, "refreshIPv6 error:", err, n)
|
||||
return
|
||||
}
|
||||
pn.config.publicIPv6 = string(buf[:n])
|
||||
|
||||
}
|
||||
|
||||
func (pn *P2PNetwork) requestPeerInfo(config *AppConfig) error {
|
||||
// request peer info
|
||||
pn.write(MsgQuery, MsgQueryPeerInfoReq, &QueryPeerInfoReq{config.peerToken, config.PeerNode})
|
||||
head, body := pn.read("", MsgQuery, MsgQueryPeerInfoRsp, ClientAPITimeout)
|
||||
if head == nil {
|
||||
return ErrNetwork // network error, should not be ErrPeerOffline
|
||||
}
|
||||
rsp := QueryPeerInfoRsp{}
|
||||
if err := json.Unmarshal(body, &rsp); err != nil {
|
||||
return ErrMsgFormat
|
||||
}
|
||||
if rsp.Online == 0 {
|
||||
return ErrPeerOffline
|
||||
}
|
||||
if compareVersion(rsp.Version, LeastSupportVersion) == LESS {
|
||||
return ErrVersionNotCompatible
|
||||
}
|
||||
config.peerVersion = rsp.Version
|
||||
config.hasIPv4 = rsp.HasIPv4
|
||||
config.peerIP = rsp.IPv4
|
||||
config.peerIPv6 = rsp.IPv6
|
||||
config.hasUPNPorNATPMP = rsp.HasUPNPorNATPMP
|
||||
config.peerNatType = rsp.NatType
|
||||
///
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@@ -17,7 +18,6 @@ type P2PTunnel struct {
|
||||
conn underlay
|
||||
hbTime time.Time
|
||||
hbMtx sync.Mutex
|
||||
hbTimeRelay time.Time
|
||||
config AppConfig
|
||||
la *net.UDPAddr // local hole address
|
||||
ra *net.UDPAddr // remote hole address
|
||||
@@ -29,60 +29,26 @@ type P2PTunnel struct {
|
||||
coneLocalPort int
|
||||
coneNatPort int
|
||||
linkModeWeb string // use config.linkmode
|
||||
punchTs uint64
|
||||
}
|
||||
|
||||
func (t *P2PTunnel) requestPeerInfo() error {
|
||||
// request peer info
|
||||
t.pn.write(MsgQuery, MsgQueryPeerInfoReq, &QueryPeerInfoReq{t.config.peerToken, t.config.PeerNode})
|
||||
head, body := t.pn.read("", MsgQuery, MsgQueryPeerInfoRsp, time.Second*10)
|
||||
if head == nil {
|
||||
return ErrPeerOffline
|
||||
}
|
||||
rsp := QueryPeerInfoRsp{}
|
||||
err := json.Unmarshal(body, &rsp)
|
||||
if err != nil {
|
||||
gLog.Printf(LvERROR, "wrong QueryPeerInfoRsp:%s", err)
|
||||
return ErrMsgFormat
|
||||
}
|
||||
if rsp.Online == 0 {
|
||||
return ErrPeerOffline
|
||||
}
|
||||
if compareVersion(rsp.Version, LeastSupportVersion) == LESS {
|
||||
return ErrVersionNotCompatible
|
||||
}
|
||||
t.config.peerVersion = rsp.Version
|
||||
t.config.hasIPv4 = rsp.HasIPv4
|
||||
t.config.peerIP = rsp.IPv4
|
||||
t.config.peerIPv6 = rsp.IPv6
|
||||
t.config.hasUPNPorNATPMP = rsp.HasUPNPorNATPMP
|
||||
t.config.peerNatType = rsp.NatType
|
||||
///
|
||||
return nil
|
||||
}
|
||||
func (t *P2PTunnel) initPort() {
|
||||
t.running = true
|
||||
t.hbMtx.Lock()
|
||||
t.hbTime = time.Now()
|
||||
t.hbMtx.Unlock()
|
||||
t.hbTimeRelay = time.Now().Add(time.Second * 600) // TODO: test fake time
|
||||
localPort := int(rand.Uint32()%15000 + 50000) // if the process has bug, will add many upnp port. use specify p2p port by param
|
||||
if t.config.linkMode == LinkModeTCP6 {
|
||||
t.pn.refreshIPv6(false)
|
||||
}
|
||||
localPort := int(rand.Uint32()%15000 + 50000) // if the process has bug, will add many upnp port. use specify p2p port by param
|
||||
if t.config.linkMode == LinkModeTCP6 || t.config.linkMode == LinkModeTCP4 {
|
||||
t.coneLocalPort = t.pn.config.TCPPort
|
||||
t.coneNatPort = t.pn.config.TCPPort // symmetric doesn't need coneNatPort
|
||||
}
|
||||
if t.config.linkMode == LinkModeUDPPunch {
|
||||
// prepare one random cone hole
|
||||
// prepare one random cone hole manually
|
||||
_, natPort, _ := natTest(t.pn.config.ServerHost, t.pn.config.UDPPort1, localPort)
|
||||
t.coneLocalPort = localPort
|
||||
t.coneNatPort = natPort
|
||||
}
|
||||
if t.config.linkMode == LinkModeTCPPunch {
|
||||
// prepare one random cone hole
|
||||
_, natPort := natTCP(t.pn.config.ServerHost, IfconfigPort1, localPort)
|
||||
t.coneLocalPort = localPort
|
||||
// prepare one random cone hole by system automatically
|
||||
_, natPort, localPort2 := natTCP(t.pn.config.ServerHost, IfconfigPort1)
|
||||
t.coneLocalPort = localPort2
|
||||
t.coneNatPort = natPort
|
||||
}
|
||||
t.la = &net.UDPAddr{IP: net.ParseIP(t.pn.config.localIP), Port: t.coneLocalPort}
|
||||
@@ -100,26 +66,25 @@ func (t *P2PTunnel) connect() error {
|
||||
ConeNatPort: t.coneNatPort,
|
||||
NatType: t.pn.config.natType,
|
||||
HasIPv4: t.pn.config.hasIPv4,
|
||||
IPv6: t.pn.config.publicIPv6,
|
||||
IPv6: gConf.IPv6(),
|
||||
HasUPNPorNATPMP: t.pn.config.hasUPNPorNATPMP,
|
||||
ID: t.id,
|
||||
AppKey: appKey,
|
||||
Version: OpenP2PVersion,
|
||||
LinkMode: t.config.linkMode,
|
||||
IsUnderlayServer: t.config.isUnderlayServer ^ 1,
|
||||
IsUnderlayServer: t.config.isUnderlayServer ^ 1, // peer
|
||||
}
|
||||
if req.Token == 0 { // no relay token
|
||||
req.Token = t.pn.config.Token
|
||||
}
|
||||
t.pn.push(t.config.PeerNode, MsgPushConnectReq, req)
|
||||
head, body := t.pn.read(t.config.PeerNode, MsgPush, MsgPushConnectRsp, time.Second*10)
|
||||
head, body := t.pn.read(t.config.PeerNode, MsgPush, MsgPushConnectRsp, UnderlayConnectTimeout*3)
|
||||
if head == nil {
|
||||
return errors.New("connect error")
|
||||
}
|
||||
rsp := PushConnectRsp{}
|
||||
err := json.Unmarshal(body, &rsp)
|
||||
if err != nil {
|
||||
gLog.Printf(LvERROR, "wrong MsgPushConnectRsp:%s", err)
|
||||
if err := json.Unmarshal(body, &rsp); err != nil {
|
||||
gLog.Printf(LvERROR, "wrong %v:%s", reflect.TypeOf(rsp), err)
|
||||
return err
|
||||
}
|
||||
// gLog.Println(LevelINFO, rsp)
|
||||
@@ -133,7 +98,8 @@ func (t *P2PTunnel) connect() error {
|
||||
t.config.peerVersion = rsp.Version
|
||||
t.config.peerConeNatPort = rsp.ConeNatPort
|
||||
t.config.peerIP = rsp.FromIP
|
||||
err = t.start()
|
||||
t.punchTs = rsp.PunchTs
|
||||
err := t.start()
|
||||
if err != nil {
|
||||
gLog.Println(LvERROR, "handshake error:", err)
|
||||
err = ErrorHandshake
|
||||
@@ -154,20 +120,19 @@ func (t *P2PTunnel) setRun(running bool) {
|
||||
}
|
||||
|
||||
func (t *P2PTunnel) isActive() bool {
|
||||
if !t.isRuning() || t.conn == nil {
|
||||
return false
|
||||
}
|
||||
t.hbMtx.Lock()
|
||||
defer t.hbMtx.Unlock()
|
||||
return time.Now().Before(t.hbTime.Add(TunnelIdleTimeout))
|
||||
return time.Now().Before(t.hbTime.Add(TunnelHeartbeatTime * 2))
|
||||
}
|
||||
|
||||
func (t *P2PTunnel) checkActive() bool {
|
||||
hbt := time.Now()
|
||||
t.hbMtx.Lock()
|
||||
if t.hbTime.Before(time.Now().Add(-TunnelHeartbeatTime)) {
|
||||
t.hbMtx.Unlock()
|
||||
if !t.isActive() {
|
||||
return false
|
||||
}
|
||||
t.hbMtx.Unlock()
|
||||
// hbtime within TunnelHeartbeatTime, check it now
|
||||
hbt := time.Now()
|
||||
t.conn.WriteBytes(MsgP2P, MsgTunnelHeartbeat, nil)
|
||||
isActive := false
|
||||
// wait at most 5s
|
||||
@@ -179,6 +144,7 @@ func (t *P2PTunnel) checkActive() bool {
|
||||
t.hbMtx.Unlock()
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
}
|
||||
gLog.Printf(LvINFO, "checkActive %t. hbtime=%d", isActive, t.hbTime)
|
||||
return isActive
|
||||
}
|
||||
|
||||
@@ -210,9 +176,15 @@ func (t *P2PTunnel) handshake() error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if compareVersion(t.config.peerVersion, SyncServerTimeVersion) == LESS {
|
||||
gLog.Printf(LvDEBUG, "peer version %s less than %s", t.config.peerVersion, SyncServerTimeVersion)
|
||||
} else {
|
||||
ts := time.Duration(int64(t.punchTs) + t.pn.dt + t.pn.ddtma*int64(time.Since(t.pn.hbTime)+PunchTsDelay)/int64(NetworkHeartbeatTime) - time.Now().UnixNano())
|
||||
gLog.Printf(LvDEBUG, "sleep %d ms", ts/time.Millisecond)
|
||||
time.Sleep(ts)
|
||||
}
|
||||
gLog.Println(LvDEBUG, "handshake to ", t.config.PeerNode)
|
||||
var err error
|
||||
// TODO: handle NATNone, nodes with public ip has no punching
|
||||
if t.pn.config.natType == NATCone && t.config.peerNatType == NATCone {
|
||||
err = handshakeC2C(t)
|
||||
} else if t.config.peerNatType == NATSymmetric && t.pn.config.natType == NATSymmetric {
|
||||
@@ -260,30 +232,30 @@ func (t *P2PTunnel) connectUnderlay() (err error) {
|
||||
func (t *P2PTunnel) connectUnderlayQuic() (c underlay, err error) {
|
||||
gLog.Println(LvINFO, "connectUnderlayQuic start")
|
||||
defer gLog.Println(LvINFO, "connectUnderlayQuic end")
|
||||
var qConn *underlayQUIC
|
||||
var ul *underlayQUIC
|
||||
if t.config.isUnderlayServer == 1 {
|
||||
time.Sleep(time.Millisecond * 10) // punching udp port will need some times in some env
|
||||
qConn, err = listenQuic(t.la.String(), TunnelIdleTimeout)
|
||||
ul, err = listenQuic(t.la.String(), TunnelIdleTimeout)
|
||||
if err != nil {
|
||||
gLog.Println(LvINFO, "listen quic error:", err, ", retry...")
|
||||
}
|
||||
t.pn.push(t.config.PeerNode, MsgPushUnderlayConnect, nil)
|
||||
err = qConn.Accept()
|
||||
err = ul.Accept()
|
||||
if err != nil {
|
||||
qConn.CloseListener()
|
||||
ul.CloseListener()
|
||||
return nil, fmt.Errorf("accept quic error:%s", err)
|
||||
}
|
||||
_, buff, err := qConn.ReadBuffer()
|
||||
_, buff, err := ul.ReadBuffer()
|
||||
if err != nil {
|
||||
qConn.listener.Close()
|
||||
ul.listener.Close()
|
||||
return nil, fmt.Errorf("read start msg error:%s", err)
|
||||
}
|
||||
if buff != nil {
|
||||
gLog.Println(LvDEBUG, string(buff))
|
||||
}
|
||||
qConn.WriteBytes(MsgP2P, MsgTunnelHandshakeAck, []byte("OpenP2P,hello2"))
|
||||
ul.WriteBytes(MsgP2P, MsgTunnelHandshakeAck, []byte("OpenP2P,hello2"))
|
||||
gLog.Println(LvDEBUG, "quic connection ok")
|
||||
return qConn, nil
|
||||
return ul, nil
|
||||
}
|
||||
|
||||
//else
|
||||
@@ -295,17 +267,17 @@ func (t *P2PTunnel) connectUnderlayQuic() (c underlay, err error) {
|
||||
return nil, fmt.Errorf("quic listen error:%s", e)
|
||||
}
|
||||
}
|
||||
t.pn.read(t.config.PeerNode, MsgPush, MsgPushUnderlayConnect, time.Second*5)
|
||||
t.pn.read(t.config.PeerNode, MsgPush, MsgPushUnderlayConnect, ReadMsgTimeout)
|
||||
gLog.Println(LvDEBUG, "quic dial to ", t.ra.String())
|
||||
qConn, e = dialQuic(conn, t.ra, TunnelIdleTimeout)
|
||||
ul, e = dialQuic(conn, t.ra, TunnelIdleTimeout)
|
||||
if e != nil {
|
||||
return nil, fmt.Errorf("quic dial to %s error:%s", t.ra.String(), e)
|
||||
}
|
||||
handshakeBegin := time.Now()
|
||||
qConn.WriteBytes(MsgP2P, MsgTunnelHandshake, []byte("OpenP2P,hello"))
|
||||
_, buff, err := qConn.ReadBuffer()
|
||||
ul.WriteBytes(MsgP2P, MsgTunnelHandshake, []byte("OpenP2P,hello"))
|
||||
_, buff, err := ul.ReadBuffer()
|
||||
if e != nil {
|
||||
qConn.listener.Close()
|
||||
ul.listener.Close()
|
||||
return nil, fmt.Errorf("read MsgTunnelHandshake error:%s", err)
|
||||
}
|
||||
if buff != nil {
|
||||
@@ -315,91 +287,92 @@ func (t *P2PTunnel) connectUnderlayQuic() (c underlay, err error) {
|
||||
gLog.Println(LvINFO, "rtt=", time.Since(handshakeBegin))
|
||||
gLog.Println(LvDEBUG, "quic connection ok")
|
||||
t.linkModeWeb = LinkModeUDPPunch
|
||||
return qConn, nil
|
||||
return ul, nil
|
||||
}
|
||||
|
||||
// websocket
|
||||
func (t *P2PTunnel) connectUnderlayTCP() (c underlay, err error) {
|
||||
gLog.Println(LvINFO, "connectUnderlayTCP start")
|
||||
defer gLog.Println(LvINFO, "connectUnderlayTCP end")
|
||||
var qConn *underlayTCP
|
||||
gLog.Println(LvDEBUG, "connectUnderlayTCP start")
|
||||
defer gLog.Println(LvDEBUG, "connectUnderlayTCP end")
|
||||
var ul *underlayTCP
|
||||
if t.config.isUnderlayServer == 1 {
|
||||
t.pn.push(t.config.PeerNode, MsgPushUnderlayConnect, nil)
|
||||
qConn, err = listenTCP(t.config.peerIP, t.config.peerConeNatPort, t.coneLocalPort, t.config.linkMode)
|
||||
ul, err = listenTCP(t.config.peerIP, t.config.peerConeNatPort, t.coneLocalPort, t.config.linkMode, t)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listen TCP error:%s", err)
|
||||
}
|
||||
|
||||
_, buff, err := qConn.ReadBuffer()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read start msg error:%s", err)
|
||||
}
|
||||
if buff != nil {
|
||||
gLog.Println(LvDEBUG, string(buff))
|
||||
}
|
||||
qConn.WriteBytes(MsgP2P, MsgTunnelHandshakeAck, []byte("OpenP2P,hello2"))
|
||||
gLog.Println(LvINFO, "TCP connection ok")
|
||||
return qConn, nil
|
||||
return ul, nil
|
||||
}
|
||||
|
||||
//else
|
||||
t.pn.read(t.config.PeerNode, MsgPush, MsgPushUnderlayConnect, time.Second*5)
|
||||
gLog.Println(LvDEBUG, "TCP dial to ", t.config.peerIP, ":", t.config.peerConeNatPort)
|
||||
qConn, err = dialTCP(t.config.peerIP, t.config.peerConeNatPort, t.coneLocalPort, t.config.linkMode)
|
||||
// client side
|
||||
if t.config.linkMode == LinkModeTCP4 {
|
||||
t.pn.read(t.config.PeerNode, MsgPush, MsgPushUnderlayConnect, ReadMsgTimeout)
|
||||
} else { //tcp punch should sleep for punch the same time
|
||||
if compareVersion(t.config.peerVersion, SyncServerTimeVersion) == LESS {
|
||||
gLog.Printf(LvDEBUG, "peer version %s less than %s", t.config.peerVersion, SyncServerTimeVersion)
|
||||
} else {
|
||||
ts := time.Duration(int64(t.punchTs) + t.pn.dt + t.pn.ddtma*int64(time.Since(t.pn.hbTime)+PunchTsDelay)/int64(NetworkHeartbeatTime) - time.Now().UnixNano())
|
||||
gLog.Printf(LvDEBUG, "sleep %d ms", ts/time.Millisecond)
|
||||
time.Sleep(ts)
|
||||
}
|
||||
}
|
||||
ul, err = dialTCP(t.config.peerIP, t.config.peerConeNatPort, t.coneLocalPort, t.config.linkMode)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("TCP dial to %s:%d error:%s", t.config.peerIP, t.config.peerConeNatPort, err)
|
||||
}
|
||||
handshakeBegin := time.Now()
|
||||
qConn.WriteBytes(MsgP2P, MsgTunnelHandshake, []byte("OpenP2P,hello"))
|
||||
_, buff, err := qConn.ReadBuffer()
|
||||
tidBuff := new(bytes.Buffer)
|
||||
binary.Write(tidBuff, binary.LittleEndian, t.id)
|
||||
ul.WriteBytes(MsgP2P, MsgTunnelHandshake, tidBuff.Bytes()) // tunnelID
|
||||
_, buff, err := ul.ReadBuffer()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read MsgTunnelHandshake error:%s", err)
|
||||
}
|
||||
if buff != nil {
|
||||
gLog.Println(LvDEBUG, string(buff))
|
||||
gLog.Println(LvDEBUG, "hello ", string(buff))
|
||||
}
|
||||
|
||||
gLog.Println(LvINFO, "rtt=", time.Since(handshakeBegin))
|
||||
gLog.Println(LvINFO, "TCP connection ok")
|
||||
t.linkModeWeb = LinkModeIPv4
|
||||
return qConn, nil
|
||||
return ul, nil
|
||||
}
|
||||
|
||||
func (t *P2PTunnel) connectUnderlayTCP6() (c underlay, err error) {
|
||||
gLog.Println(LvINFO, "connectUnderlayTCP6 start")
|
||||
defer gLog.Println(LvINFO, "connectUnderlayTCP6 end")
|
||||
var qConn *underlayTCP6
|
||||
var ul *underlayTCP6
|
||||
if t.config.isUnderlayServer == 1 {
|
||||
t.pn.push(t.config.PeerNode, MsgPushUnderlayConnect, nil)
|
||||
qConn, err = listenTCP6(t.coneNatPort, TunnelIdleTimeout)
|
||||
ul, err = listenTCP6(t.coneNatPort, UnderlayConnectTimeout)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listen TCP6 error:%s", err)
|
||||
}
|
||||
_, buff, err := qConn.ReadBuffer()
|
||||
_, buff, err := ul.ReadBuffer()
|
||||
if err != nil {
|
||||
qConn.listener.Close()
|
||||
ul.listener.Close()
|
||||
return nil, fmt.Errorf("read start msg error:%s", err)
|
||||
}
|
||||
if buff != nil {
|
||||
gLog.Println(LvDEBUG, string(buff))
|
||||
}
|
||||
qConn.WriteBytes(MsgP2P, MsgTunnelHandshakeAck, []byte("OpenP2P,hello2"))
|
||||
ul.WriteBytes(MsgP2P, MsgTunnelHandshakeAck, []byte("OpenP2P,hello2"))
|
||||
gLog.Println(LvDEBUG, "TCP6 connection ok")
|
||||
return qConn, nil
|
||||
return ul, nil
|
||||
}
|
||||
|
||||
//else
|
||||
t.pn.read(t.config.PeerNode, MsgPush, MsgPushUnderlayConnect, time.Second*5)
|
||||
t.pn.read(t.config.PeerNode, MsgPush, MsgPushUnderlayConnect, ReadMsgTimeout)
|
||||
gLog.Println(LvDEBUG, "TCP6 dial to ", t.config.peerIPv6)
|
||||
qConn, err = dialTCP6(t.config.peerIPv6, t.config.peerConeNatPort)
|
||||
ul, err = dialTCP6(t.config.peerIPv6, t.config.peerConeNatPort)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("TCP6 dial to %s:%d error:%s", t.config.peerIPv6, t.config.peerConeNatPort, err)
|
||||
}
|
||||
handshakeBegin := time.Now()
|
||||
qConn.WriteBytes(MsgP2P, MsgTunnelHandshake, []byte("OpenP2P,hello"))
|
||||
_, buff, err := qConn.ReadBuffer()
|
||||
ul.WriteBytes(MsgP2P, MsgTunnelHandshake, []byte("OpenP2P,hello"))
|
||||
_, buff, err := ul.ReadBuffer()
|
||||
if err != nil {
|
||||
qConn.listener.Close()
|
||||
ul.listener.Close()
|
||||
return nil, fmt.Errorf("read MsgTunnelHandshake error:%s", err)
|
||||
}
|
||||
if buff != nil {
|
||||
@@ -409,7 +382,7 @@ func (t *P2PTunnel) connectUnderlayTCP6() (c underlay, err error) {
|
||||
gLog.Println(LvINFO, "rtt=", time.Since(handshakeBegin))
|
||||
gLog.Println(LvDEBUG, "TCP6 connection ok")
|
||||
t.linkModeWeb = LinkModeIPv6
|
||||
return qConn, nil
|
||||
return ul, nil
|
||||
}
|
||||
|
||||
func (t *P2PTunnel) readLoop() {
|
||||
@@ -425,10 +398,12 @@ func (t *P2PTunnel) readLoop() {
|
||||
break
|
||||
}
|
||||
if head.MainType != MsgP2P {
|
||||
gLog.Printf(LvWARN, "%d head.MainType != MsgP2P", t.id)
|
||||
continue
|
||||
}
|
||||
switch head.SubType {
|
||||
case MsgTunnelHeartbeat:
|
||||
t.hbTime = time.Now()
|
||||
t.conn.WriteBytes(MsgP2P, MsgTunnelHeartbeatAck, nil)
|
||||
gLog.Printf(LvDEBUG, "%d read tunnel heartbeat", t.id)
|
||||
case MsgTunnelHeartbeatAck:
|
||||
@@ -438,10 +413,11 @@ func (t *P2PTunnel) readLoop() {
|
||||
gLog.Printf(LvDEBUG, "%d read tunnel heartbeat ack", t.id)
|
||||
case MsgOverlayData:
|
||||
if len(body) < overlayHeaderSize {
|
||||
gLog.Printf(LvWARN, "%d len(body) < overlayHeaderSize", t.id)
|
||||
continue
|
||||
}
|
||||
overlayID := binary.LittleEndian.Uint64(body[:8])
|
||||
gLog.Printf(LvDEBUG, "%d tunnel read overlay data %d", t.id, overlayID)
|
||||
gLog.Printf(LvDEBUG, "%d tunnel read overlay data %d bodylen=%d", t.id, overlayID, head.DataLen)
|
||||
s, ok := t.overlayConns.Load(overlayID)
|
||||
if !ok {
|
||||
// debug level, when overlay connection closed, always has some packet not found tunnel
|
||||
@@ -462,20 +438,19 @@ func (t *P2PTunnel) readLoop() {
|
||||
gLog.Println(LvERROR, "overlay write error:", err)
|
||||
}
|
||||
case MsgRelayData:
|
||||
gLog.Printf(LvDEBUG, "got relay data datalen=%d", head.DataLen)
|
||||
if len(body) < 8 {
|
||||
continue
|
||||
}
|
||||
tunnelID := binary.LittleEndian.Uint64(body[:8])
|
||||
t.pn.relay(tunnelID, body[8:])
|
||||
gLog.Printf(LvDEBUG, "relay data to %d, len=%d", tunnelID, head.DataLen-RelayHeaderSize)
|
||||
t.pn.relay(tunnelID, body[RelayHeaderSize:])
|
||||
case MsgRelayHeartbeat:
|
||||
req := RelayHeartbeat{}
|
||||
err := json.Unmarshal(body, &req)
|
||||
if err != nil {
|
||||
gLog.Printf(LvERROR, "wrong RelayHeartbeat:%s", err)
|
||||
if err := json.Unmarshal(body, &req); err != nil {
|
||||
gLog.Printf(LvERROR, "wrong %v:%s", reflect.TypeOf(req), err)
|
||||
continue
|
||||
}
|
||||
gLog.Printf(LvDEBUG, "got MsgRelayHeartbeat from %d:%d", req.RelayTunnelID, req.AppID)
|
||||
gLog.Printf(LvDEBUG, "read MsgRelayHeartbeat from rtid:%d,appid:%d", req.RelayTunnelID, req.AppID)
|
||||
relayHead := new(bytes.Buffer)
|
||||
binary.Write(relayHead, binary.LittleEndian, req.RelayTunnelID)
|
||||
msg, _ := newMessage(MsgP2P, MsgRelayHeartbeatAck, &req)
|
||||
@@ -488,13 +463,12 @@ func (t *P2PTunnel) readLoop() {
|
||||
gLog.Printf(LvERROR, "wrong RelayHeartbeat:%s", err)
|
||||
continue
|
||||
}
|
||||
gLog.Printf(LvDEBUG, "got MsgRelayHeartbeatAck to %d", req.AppID)
|
||||
gLog.Printf(LvDEBUG, "read MsgRelayHeartbeatAck to appid:%d", req.AppID)
|
||||
t.pn.updateAppHeartbeat(req.AppID)
|
||||
case MsgOverlayConnectReq:
|
||||
req := OverlayConnectReq{}
|
||||
err := json.Unmarshal(body, &req)
|
||||
if err != nil {
|
||||
gLog.Printf(LvERROR, "wrong MsgOverlayConnectReq:%s", err)
|
||||
if err := json.Unmarshal(body, &req); err != nil {
|
||||
gLog.Printf(LvERROR, "wrong %v:%s", reflect.TypeOf(req), err)
|
||||
continue
|
||||
}
|
||||
// app connect only accept token(not relay totp token), avoid someone using the share relay node's token
|
||||
@@ -504,7 +478,7 @@ func (t *P2PTunnel) readLoop() {
|
||||
}
|
||||
|
||||
overlayID := req.ID
|
||||
gLog.Printf(LvDEBUG, "App:%d overlayID:%d connect %+v", req.AppID, overlayID, req)
|
||||
gLog.Printf(LvDEBUG, "App:%d overlayID:%d connect %s:%d", req.AppID, overlayID, req.DstIP, req.DstPort)
|
||||
oConn := overlayConn{
|
||||
tunnel: t,
|
||||
id: overlayID,
|
||||
@@ -512,11 +486,13 @@ func (t *P2PTunnel) readLoop() {
|
||||
rtid: req.RelayTunnelID,
|
||||
appID: req.AppID,
|
||||
appKey: GetKey(req.AppID),
|
||||
running: true,
|
||||
}
|
||||
if req.Protocol == "udp" {
|
||||
oConn.connUDP, err = net.DialUDP("udp", nil, &net.UDPAddr{IP: net.ParseIP(req.DstIP), Port: req.DstPort})
|
||||
} else {
|
||||
oConn.connTCP, err = net.DialTimeout("tcp", fmt.Sprintf("%s:%d", req.DstIP, req.DstPort), time.Second*5)
|
||||
oConn.connTCP, err = net.DialTimeout("tcp", fmt.Sprintf("%s:%d", req.DstIP, req.DstPort), ReadMsgTimeout)
|
||||
|
||||
}
|
||||
if err != nil {
|
||||
gLog.Println(LvERROR, err)
|
||||
@@ -525,7 +501,7 @@ func (t *P2PTunnel) readLoop() {
|
||||
|
||||
// calc key bytes for encrypt
|
||||
if oConn.appKey != 0 {
|
||||
encryptKey := make([]byte, 16)
|
||||
encryptKey := make([]byte, AESKeySize)
|
||||
binary.LittleEndian.PutUint64(encryptKey, oConn.appKey)
|
||||
binary.LittleEndian.PutUint64(encryptKey[8:], oConn.appKey)
|
||||
oConn.appKeyBytes = encryptKey
|
||||
@@ -535,9 +511,8 @@ func (t *P2PTunnel) readLoop() {
|
||||
go oConn.run()
|
||||
case MsgOverlayDisconnectReq:
|
||||
req := OverlayDisconnectReq{}
|
||||
err := json.Unmarshal(body, &req)
|
||||
if err != nil {
|
||||
gLog.Printf(LvERROR, "wrong OverlayDisconnectRequest:%s", err)
|
||||
if err := json.Unmarshal(body, &req); err != nil {
|
||||
gLog.Printf(LvERROR, "wrong %v:%s", reflect.TypeOf(req), err)
|
||||
continue
|
||||
}
|
||||
overlayID := req.ID
|
||||
@@ -545,7 +520,7 @@ func (t *P2PTunnel) readLoop() {
|
||||
i, ok := t.overlayConns.Load(overlayID)
|
||||
if ok {
|
||||
oConn := i.(*overlayConn)
|
||||
oConn.running = false
|
||||
oConn.Close()
|
||||
}
|
||||
default:
|
||||
}
|
||||
@@ -556,6 +531,9 @@ func (t *P2PTunnel) readLoop() {
|
||||
}
|
||||
|
||||
func (t *P2PTunnel) heartbeatLoop() {
|
||||
t.hbMtx.Lock()
|
||||
t.hbTime = time.Now() // init
|
||||
t.hbMtx.Unlock()
|
||||
tc := time.NewTicker(TunnelHeartbeatTime)
|
||||
defer tc.Stop()
|
||||
gLog.Printf(LvDEBUG, "%d tunnel heartbeatLoop start", t.id)
|
||||
@@ -589,12 +567,13 @@ func (t *P2PTunnel) listen() error {
|
||||
FromIP: t.pn.config.publicIP,
|
||||
ConeNatPort: t.coneNatPort,
|
||||
ID: t.id,
|
||||
PunchTs: uint64(time.Now().UnixNano() + int64(PunchTsDelay) - t.pn.dt),
|
||||
Version: OpenP2PVersion,
|
||||
}
|
||||
t.punchTs = rsp.PunchTs
|
||||
// only private node set ipv6
|
||||
if t.config.fromToken == t.pn.config.Token {
|
||||
t.pn.refreshIPv6(false)
|
||||
rsp.IPv6 = t.pn.config.publicIPv6
|
||||
rsp.IPv6 = gConf.IPv6()
|
||||
}
|
||||
|
||||
t.pn.push(t.config.PeerNode, MsgPushConnectRsp, rsp)
|
||||
@@ -607,14 +586,7 @@ func (t *P2PTunnel) closeOverlayConns(appID uint64) {
|
||||
t.overlayConns.Range(func(_, i interface{}) bool {
|
||||
oConn := i.(*overlayConn)
|
||||
if oConn.appID == appID {
|
||||
if oConn.connTCP != nil {
|
||||
oConn.connTCP.Close()
|
||||
oConn.connTCP = nil
|
||||
}
|
||||
if oConn.connUDP != nil {
|
||||
oConn.connUDP.Close()
|
||||
oConn.connUDP = nil
|
||||
}
|
||||
oConn.Close()
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
162
core/protocol.go
162
core/protocol.go
@@ -10,9 +10,12 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const OpenP2PVersion = "3.5.0"
|
||||
const ProducnName string = "openp2p"
|
||||
const OpenP2PVersion = "3.12.0"
|
||||
const ProductName string = "openp2p"
|
||||
const LeastSupportVersion = "3.0.0"
|
||||
const SyncServerTimeVersion = "3.9.0"
|
||||
const SymmetricSimultaneouslySendVersion = "3.10.7"
|
||||
const PublicIPVersion = "3.11.2"
|
||||
|
||||
const (
|
||||
IfconfigPort1 = 27180
|
||||
@@ -37,6 +40,8 @@ type PushHeader struct {
|
||||
|
||||
var PushHeaderSize = binary.Size(PushHeader{})
|
||||
|
||||
const RelayHeaderSize = 8
|
||||
|
||||
type overlayHeader struct {
|
||||
id uint64
|
||||
}
|
||||
@@ -96,6 +101,7 @@ const (
|
||||
MsgPushEditNode = 12
|
||||
MsgPushAPPKey = 13
|
||||
MsgPushReportLog = 14
|
||||
MsgPushDstNodeOnline = 15
|
||||
)
|
||||
|
||||
// MsgP2P sub type message
|
||||
@@ -131,24 +137,29 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
ReadBuffLen = 4096 // for UDP maybe not enough
|
||||
NetworkHeartbeatTime = time.Second * 30 // TODO: server no response hb, save flow
|
||||
TunnelHeartbeatTime = time.Second * 15
|
||||
ReadBuffLen = 4096 // for UDP maybe not enough
|
||||
NetworkHeartbeatTime = time.Second * 30
|
||||
TunnelHeartbeatTime = time.Second * 10 // some nat udp session expired time less than 15s. change to 10s
|
||||
TunnelIdleTimeout = time.Minute
|
||||
SymmetricHandshakeNum = 800 // 0.992379
|
||||
// SymmetricHandshakeNum = 1000 // 0.999510
|
||||
SymmetricHandshakeInterval = time.Millisecond
|
||||
SymmetricHandshakeAckTimeout = time.Second * 11
|
||||
PeerAddRelayTimeount = time.Second * 20
|
||||
CheckActiveTimeout = time.Second * 5
|
||||
PaddingSize = 16
|
||||
AESKeySize = 16
|
||||
MaxRetry = 10
|
||||
RetryInterval = time.Second * 30
|
||||
PublicIPEchoTimeout = time.Second * 1
|
||||
NatTestTimeout = time.Second * 10
|
||||
ClientAPITimeout = time.Second * 10
|
||||
MaxDirectTry = 3
|
||||
SymmetricHandshakeInterval = time.Millisecond
|
||||
HandshakeTimeout = time.Second * 7
|
||||
PunchTsDelay = time.Second * 3
|
||||
PeerAddRelayTimeount = time.Second * 30 // peer need times. S2C\TCP\TCP Punch\UDP Punch
|
||||
CheckActiveTimeout = time.Second * 5
|
||||
ReadMsgTimeout = time.Second * 5
|
||||
PaddingSize = 16
|
||||
AESKeySize = 16
|
||||
MaxRetry = 10
|
||||
Cone2ConeTCPPunchMaxRetry = 1
|
||||
Cone2ConeUDPPunchMaxRetry = 1
|
||||
PublicIPEchoTimeout = time.Second * 1
|
||||
NatTestTimeout = time.Second * 5
|
||||
UDPReadTimeout = time.Second * 5
|
||||
ClientAPITimeout = time.Second * 10
|
||||
UnderlayConnectTimeout = time.Second * 10
|
||||
MaxDirectTry = 3
|
||||
)
|
||||
|
||||
// NATNone has public ip
|
||||
@@ -223,6 +234,9 @@ type PushConnectReq struct {
|
||||
LinkMode string `json:"linkMode,omitempty"`
|
||||
IsUnderlayServer int `json:"isServer,omitempty"` // Requset spec peer is server
|
||||
}
|
||||
type PushDstNodeOnline struct {
|
||||
Node string `json:"node,omitempty"`
|
||||
}
|
||||
type PushConnectRsp struct {
|
||||
Error int `json:"error,omitempty"`
|
||||
From string `json:"from,omitempty"`
|
||||
@@ -235,6 +249,7 @@ type PushConnectRsp struct {
|
||||
ConeNatPort int `json:"coneNatPort,omitempty"` //it's not only cone, but also upnp or nat-pmp hole
|
||||
FromIP string `json:"fromIP,omitempty"`
|
||||
ID uint64 `json:"id,omitempty"`
|
||||
PunchTs uint64 `json:"punchts,omitempty"` // server timestamp
|
||||
Version string `json:"version,omitempty"`
|
||||
}
|
||||
type PushRsp struct {
|
||||
@@ -341,9 +356,10 @@ type AppInfo struct {
|
||||
AppName string `json:"appName,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Protocol string `json:"protocol,omitempty"`
|
||||
Whitelist string `json:"whitelist,omitempty"`
|
||||
SrcPort int `json:"srcPort,omitempty"`
|
||||
Protocol0 string `json:"protocol0,omitempty"`
|
||||
SrcPort0 int `json:"srcPort0,omitempty"`
|
||||
SrcPort0 int `json:"srcPort0,omitempty"` // srcport+protocol is uneque, use as old app id
|
||||
NatType int `json:"natType,omitempty"`
|
||||
PeerNode string `json:"peerNode,omitempty"`
|
||||
DstPort int `json:"dstPort,omitempty"`
|
||||
@@ -430,3 +446,113 @@ type QueryPeerInfoRsp struct {
|
||||
IPv6 string `json:"IPv6,omitempty"` // if public relay node, ipv6 not set
|
||||
HasUPNPorNATPMP int `json:"hasUPNPorNATPMP,omitempty"`
|
||||
}
|
||||
|
||||
const rootCA = `-----BEGIN CERTIFICATE-----
|
||||
MIIDhTCCAm0CFHm0cd8dnGCbUW/OcS56jf0gvRk7MA0GCSqGSIb3DQEBCwUAMH4x
|
||||
CzAJBgNVBAYTAkNOMQswCQYDVQQIDAJHRDETMBEGA1UECgwKb3BlbnAycC5jbjET
|
||||
MBEGA1UECwwKb3BlbnAycC5jbjETMBEGA1UEAwwKb3BlbnAycC5jbjEjMCEGCSqG
|
||||
SIb3DQEJARYUb3BlbnAycC5jbkBnbWFpbC5jb20wIBcNMjMwODAxMDkwMjMwWhgP
|
||||
MjEyMzA3MDgwOTAyMzBaMH4xCzAJBgNVBAYTAkNOMQswCQYDVQQIDAJHRDETMBEG
|
||||
A1UECgwKb3BlbnAycC5jbjETMBEGA1UECwwKb3BlbnAycC5jbjETMBEGA1UEAwwK
|
||||
b3BlbnAycC5jbjEjMCEGCSqGSIb3DQEJARYUb3BlbnAycC5jbkBnbWFpbC5jb20w
|
||||
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDWg8wPy5hBLUaY4WOXayKu
|
||||
+magEz1LAY0krzXYSZaSCvGMwA0cervwAqgKfiiZEhho5UNA5iVOJ6bO1RL9H7Vp
|
||||
4HuW9BttDU/NQHguD8pyqx06Kaosz5LRw8USz1BCWWFdmi8Mv4I0omtd7m6lbWnY
|
||||
nrjQKLYPahPW481jUfJPqR6wUTnBuBMr2ZAGqmFR4Lhqs9B1P9GeBfDWNwVApJUC
|
||||
VEhbElukRJxdUvWeJ5+HMENKQcHCTTgmQbmDLMobHXs3Xf7fT9qC76wOe9LFHI6L
|
||||
dAww9gryQhxWauQl1NO8aGJTFu+3wgnKBdTMJmF/1iuZYXJOCR1solwqU1hCgBsj
|
||||
AgMBAAEwDQYJKoZIhvcNAQELBQADggEBADp153YNVN8p6/3PLnXxHBDeDViAfeQd
|
||||
VJmy8eH1LTq/xtUY71HGSpL7iIBNoQdDTHfsg3c6ZANBCxbO/7AhFAzPt1aK8eHy
|
||||
XuEiW0Z6R8np1Khh3alCOfD15tKcjok//Wxisbz+YItlbDus/eWRbLGB3HGrzn4l
|
||||
GB18jw+G7o4U3rGX8agHqVGQEd06gk1ZaprASpTGwSsv4A5ehosjT1d7re8Z5eD4
|
||||
RVtXS+DplMClQ5QSlv3StwcWOsjyiAimNfLEU5xoEfq17yOJUTU1OTL4YOt16QUc
|
||||
C1tnzFr3k/ioqFR7cnyzNrbjlfPOmO9l2WReEbMP3bvaSHm6EcpJKS8=
|
||||
-----END CERTIFICATE-----`
|
||||
|
||||
const ISRGRootX1 = `-----BEGIN CERTIFICATE-----
|
||||
MIIEJjCCAw6gAwIBAgISAztStWq026ej0RCsk3ErbUdPMA0GCSqGSIb3DQEBCwUA
|
||||
MDIxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1MZXQncyBFbmNyeXB0MQswCQYDVQQD
|
||||
EwJSMzAeFw0yMzA4MDQwODUyMjlaFw0yMzExMDIwODUyMjhaMBcxFTATBgNVBAMM
|
||||
DCoub3BlbnAycC5jbjBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABPRdkgLV2FA+
|
||||
3g/GjcA9UcfDfIFYgofSTNbOCQFIiQVMXrTgAToF1/tWaS2LOuysZcCX6OE7SCeG
|
||||
lQ+0g+L2qvujggIaMIICFjAOBgNVHQ8BAf8EBAMCB4AwHQYDVR0lBBYwFAYIKwYB
|
||||
BQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFIdL5LNQC+X4
|
||||
8r6u+3NlM238Vmk5MB8GA1UdIwQYMBaAFBQusxe3WFbLrlAJQOYfr52LFMLGMFUG
|
||||
CCsGAQUFBwEBBEkwRzAhBggrBgEFBQcwAYYVaHR0cDovL3IzLm8ubGVuY3Iub3Jn
|
||||
MCIGCCsGAQUFBzAChhZodHRwOi8vcjMuaS5sZW5jci5vcmcvMCMGA1UdEQQcMBqC
|
||||
DCoub3BlbnAycC5jboIKb3BlbnAycC5jbjATBgNVHSAEDDAKMAgGBmeBDAECATCC
|
||||
AQQGCisGAQQB1nkCBAIEgfUEgfIA8AB2AHoyjFTYty22IOo44FIe6YQWcDIThU07
|
||||
0ivBOlejUutSAAABib/2fCgAAAQDAEcwRQIhAJzf9XNe0cu9CNYLLqtDCZZMqI6u
|
||||
qsHrnnXcFQW23ioZAiAgwKp5DwZw9RmF19KOjD6lYJfTxc+anJUuWAlMwu1HYQB2
|
||||
AK33vvp8/xDIi509nB4+GGq0Zyldz7EMJMqFhjTr3IKKAAABib/2fEEAAAQDAEcw
|
||||
RQIgKeI7DopyzFXPdRQZKZrHVqfXQ8OipvlKXd5xRnKFjH4CIQDMM+TU+LOux8xK
|
||||
1NlTiSs9DhQI/eU3ZXKxSQAqF50RnTANBgkqhkiG9w0BAQsFAAOCAQEATqZ+H2NT
|
||||
cv4FzArD/Krlnur1OTitvpubRWM+ClB9Cr6pvPVB7Dp0/ALxu35ZmCtrzdJWTfmp
|
||||
lHxU4nPXRPVjuPRNXooSyH//KTfHyf32919PQOi/qc/QEAuIzkGLJg0dIPKLxaNK
|
||||
CiTWU+2iAYSHBgCWulfLX/RYNbBZQ9w0xIm3XhuMjCF/omG8ofuz1DmiRVR+17JA
|
||||
nuDXQkxm7KhmbxSA4PsLwzvIWA8Wk44ZK7uncgRY3WIUXcVRELSFA5LuH67TOwag
|
||||
al6iG56KW1N2Yy9YmeG27SYvHZYkjmuJ8NEy7Ku+Mi6gwO4hs0CYr2wtUacPfjKF
|
||||
aYTGWSt6Pt8kmw==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFFjCCAv6gAwIBAgIRAJErCErPDBinU/bWLiWnX1owDQYJKoZIhvcNAQELBQAw
|
||||
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
||||
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMjAwOTA0MDAwMDAw
|
||||
WhcNMjUwOTE1MTYwMDAwWjAyMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNTGV0J3Mg
|
||||
RW5jcnlwdDELMAkGA1UEAxMCUjMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
|
||||
AoIBAQC7AhUozPaglNMPEuyNVZLD+ILxmaZ6QoinXSaqtSu5xUyxr45r+XXIo9cP
|
||||
R5QUVTVXjJ6oojkZ9YI8QqlObvU7wy7bjcCwXPNZOOftz2nwWgsbvsCUJCWH+jdx
|
||||
sxPnHKzhm+/b5DtFUkWWqcFTzjTIUu61ru2P3mBw4qVUq7ZtDpelQDRrK9O8Zutm
|
||||
NHz6a4uPVymZ+DAXXbpyb/uBxa3Shlg9F8fnCbvxK/eG3MHacV3URuPMrSXBiLxg
|
||||
Z3Vms/EY96Jc5lP/Ooi2R6X/ExjqmAl3P51T+c8B5fWmcBcUr2Ok/5mzk53cU6cG
|
||||
/kiFHaFpriV1uxPMUgP17VGhi9sVAgMBAAGjggEIMIIBBDAOBgNVHQ8BAf8EBAMC
|
||||
AYYwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMBMBIGA1UdEwEB/wQIMAYB
|
||||
Af8CAQAwHQYDVR0OBBYEFBQusxe3WFbLrlAJQOYfr52LFMLGMB8GA1UdIwQYMBaA
|
||||
FHm0WeZ7tuXkAXOACIjIGlj26ZtuMDIGCCsGAQUFBwEBBCYwJDAiBggrBgEFBQcw
|
||||
AoYWaHR0cDovL3gxLmkubGVuY3Iub3JnLzAnBgNVHR8EIDAeMBygGqAYhhZodHRw
|
||||
Oi8veDEuYy5sZW5jci5vcmcvMCIGA1UdIAQbMBkwCAYGZ4EMAQIBMA0GCysGAQQB
|
||||
gt8TAQEBMA0GCSqGSIb3DQEBCwUAA4ICAQCFyk5HPqP3hUSFvNVneLKYY611TR6W
|
||||
PTNlclQtgaDqw+34IL9fzLdwALduO/ZelN7kIJ+m74uyA+eitRY8kc607TkC53wl
|
||||
ikfmZW4/RvTZ8M6UK+5UzhK8jCdLuMGYL6KvzXGRSgi3yLgjewQtCPkIVz6D2QQz
|
||||
CkcheAmCJ8MqyJu5zlzyZMjAvnnAT45tRAxekrsu94sQ4egdRCnbWSDtY7kh+BIm
|
||||
lJNXoB1lBMEKIq4QDUOXoRgffuDghje1WrG9ML+Hbisq/yFOGwXD9RiX8F6sw6W4
|
||||
avAuvDszue5L3sz85K+EC4Y/wFVDNvZo4TYXao6Z0f+lQKc0t8DQYzk1OXVu8rp2
|
||||
yJMC6alLbBfODALZvYH7n7do1AZls4I9d1P4jnkDrQoxB3UqQ9hVl3LEKQ73xF1O
|
||||
yK5GhDDX8oVfGKF5u+decIsH4YaTw7mP3GFxJSqv3+0lUFJoi5Lc5da149p90Ids
|
||||
hCExroL1+7mryIkXPeFM5TgO9r0rvZaBFOvV2z0gp35Z0+L4WPlbuEjN/lxPFin+
|
||||
HlUjr8gRsI3qfJOQFy/9rKIJR0Y/8Omwt/8oTWgy1mdeHmmjk7j1nYsvC9JSQ6Zv
|
||||
MldlTTKB3zhThV1+XWYp6rjd5JW1zbVWEkLNxE7GJThEUG3szgBVGP7pSWTUTsqX
|
||||
nLRbwHOoq7hHwg==
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
||||
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
||||
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
||||
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
||||
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
||||
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
||||
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
||||
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
||||
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
||||
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
||||
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
||||
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
||||
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
||||
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
||||
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
||||
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
||||
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
||||
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
||||
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
||||
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
||||
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
||||
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
||||
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
||||
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
||||
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
||||
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
||||
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
||||
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
||||
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
||||
-----END CERTIFICATE-----
|
||||
`
|
||||
|
||||
51
core/speedlimiter.go
Normal file
51
core/speedlimiter.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package openp2p
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// SpeedLimiter ...
|
||||
type SpeedLimiter struct {
|
||||
lastUpdate time.Time
|
||||
speed int // per second
|
||||
precision int // seconds
|
||||
freeCap int
|
||||
maxFreeCap int
|
||||
mtx sync.Mutex
|
||||
}
|
||||
|
||||
func newSpeedLimiter(speed int, precision int) *SpeedLimiter {
|
||||
return &SpeedLimiter{
|
||||
speed: speed,
|
||||
precision: precision,
|
||||
lastUpdate: time.Now(),
|
||||
maxFreeCap: speed * precision,
|
||||
freeCap: speed * precision,
|
||||
}
|
||||
}
|
||||
|
||||
// Add ...
|
||||
func (sl *SpeedLimiter) Add(increment int, wait bool) bool {
|
||||
if sl.speed <= 0 {
|
||||
return true
|
||||
}
|
||||
sl.mtx.Lock()
|
||||
defer sl.mtx.Unlock()
|
||||
sl.freeCap += int(time.Since(sl.lastUpdate) * time.Duration(sl.speed) / time.Second)
|
||||
if sl.freeCap > sl.maxFreeCap {
|
||||
sl.freeCap = sl.maxFreeCap
|
||||
}
|
||||
if !wait && sl.freeCap < increment {
|
||||
return false
|
||||
}
|
||||
sl.freeCap -= increment
|
||||
sl.lastUpdate = time.Now()
|
||||
if sl.freeCap < 0 {
|
||||
// sleep for the overflow
|
||||
fmt.Println("sleep ", time.Millisecond*time.Duration(-sl.freeCap*100)/time.Duration(sl.speed))
|
||||
time.Sleep(time.Millisecond * time.Duration(-sl.freeCap*1000) / time.Duration(sl.speed)) // sleep ms
|
||||
}
|
||||
return true
|
||||
}
|
||||
58
core/speedlimiter_test.go
Normal file
58
core/speedlimiter_test.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package openp2p
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestBandwidth(t *testing.T) {
|
||||
speed := 10 * 1024 * 1024 / 8 // 10mbps
|
||||
speedl := newSpeedLimiter(speed, 1)
|
||||
oneBuffSize := 4096
|
||||
writeNum := 5000
|
||||
expectTime := oneBuffSize * writeNum / speed
|
||||
startTs := time.Now()
|
||||
for i := 0; i < writeNum; i++ {
|
||||
speedl.Add(oneBuffSize, true)
|
||||
}
|
||||
t.Logf("cost %ds, expect %ds", time.Since(startTs)/time.Second, expectTime)
|
||||
if time.Since(startTs) > time.Duration(expectTime+1)*time.Second || time.Since(startTs) < time.Duration(expectTime-1)*time.Second {
|
||||
t.Error("error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSymmetric(t *testing.T) {
|
||||
speed := 20000 / 180
|
||||
speedl := newSpeedLimiter(speed, 180)
|
||||
oneBuffSize := 300
|
||||
writeNum := 100
|
||||
expectTime := (oneBuffSize*writeNum - 20000) / speed
|
||||
startTs := time.Now()
|
||||
for i := 0; i < writeNum; i++ {
|
||||
speedl.Add(oneBuffSize, true)
|
||||
}
|
||||
t.Logf("cost %ds, expect %ds", time.Since(startTs)/time.Second, expectTime)
|
||||
if time.Since(startTs) > time.Duration(expectTime+1)*time.Second || time.Since(startTs) < time.Duration(expectTime-1)*time.Second {
|
||||
t.Error("error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSymmetric2(t *testing.T) {
|
||||
speed := 30000 / 180
|
||||
speedl := newSpeedLimiter(speed, 180)
|
||||
oneBuffSize := 800
|
||||
writeNum := 50
|
||||
expectTime := (oneBuffSize*writeNum - 30000) / speed
|
||||
startTs := time.Now()
|
||||
for i := 0; i < writeNum; {
|
||||
if speedl.Add(oneBuffSize, true) {
|
||||
i++
|
||||
} else {
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
}
|
||||
t.Logf("cost %ds, expect %ds", time.Since(startTs)/time.Second, expectTime)
|
||||
if time.Since(startTs) > time.Duration(expectTime+1)*time.Second || time.Since(startTs) < time.Duration(expectTime-1)*time.Second {
|
||||
t.Error("error")
|
||||
}
|
||||
}
|
||||
35
core/totp.go
35
core/totp.go
@@ -1,35 +0,0 @@
|
||||
// Time-based One-time Password
|
||||
package openp2p
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
const TOTPStep = 30 // 30s
|
||||
func GenTOTP(token uint64, ts int64) uint64 {
|
||||
step := ts / TOTPStep
|
||||
tbuff := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(tbuff, token)
|
||||
mac := hmac.New(sha256.New, tbuff)
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(step))
|
||||
mac.Write(b)
|
||||
num := binary.LittleEndian.Uint64(mac.Sum(nil)[:8])
|
||||
// fmt.Printf("%x\n", mac.Sum(nil))
|
||||
return num
|
||||
}
|
||||
|
||||
func VerifyTOTP(code uint64, token uint64, ts int64) bool {
|
||||
if code == 0 {
|
||||
return false
|
||||
}
|
||||
if code == token {
|
||||
return true
|
||||
}
|
||||
if code == GenTOTP(token, ts) || code == GenTOTP(token, ts-TOTPStep) || code == GenTOTP(token, ts+TOTPStep) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
// Time-based One-time Password
|
||||
package openp2p
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestTOTP(t *testing.T) {
|
||||
for i := 0; i < 20; i++ {
|
||||
ts := time.Now().Unix()
|
||||
code := GenTOTP(13666999958022769123, ts)
|
||||
t.Log(code)
|
||||
if !VerifyTOTP(code, 13666999958022769123, ts) {
|
||||
t.Error("TOTP error")
|
||||
}
|
||||
if !VerifyTOTP(code, 13666999958022769123, ts-10) {
|
||||
t.Error("TOTP error")
|
||||
}
|
||||
if !VerifyTOTP(code, 13666999958022769123, ts+10) {
|
||||
t.Error("TOTP error")
|
||||
}
|
||||
if VerifyTOTP(code, 13666999958022769123, ts+60) {
|
||||
t.Error("TOTP error")
|
||||
}
|
||||
if VerifyTOTP(code, 13666999958022769124, ts+1) {
|
||||
t.Error("TOTP error")
|
||||
}
|
||||
if VerifyTOTP(code, 13666999958022769125, ts+1) {
|
||||
t.Error("TOTP error")
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
t.Log("round", i, " ", ts, " test ok")
|
||||
}
|
||||
|
||||
}
|
||||
@@ -18,10 +18,9 @@ func UDPWrite(conn *net.UDPConn, dst net.Addr, mainType uint16, subType uint16,
|
||||
return conn.WriteTo(msg, dst)
|
||||
}
|
||||
|
||||
func UDPRead(conn *net.UDPConn, timeout int) (ra net.Addr, head *openP2PHeader, result []byte, len int, err error) {
|
||||
func UDPRead(conn *net.UDPConn, timeout time.Duration) (ra net.Addr, head *openP2PHeader, result []byte, len int, err error) {
|
||||
if timeout > 0 {
|
||||
deadline := time.Now().Add(time.Millisecond * time.Duration(timeout))
|
||||
err = conn.SetReadDeadline(deadline)
|
||||
err = conn.SetReadDeadline(time.Now().Add(timeout))
|
||||
if err != nil {
|
||||
gLog.Println(LvERROR, "SetReadDeadline error")
|
||||
return nil, nil, nil, 0, err
|
||||
|
||||
@@ -1,16 +1,62 @@
|
||||
package openp2p
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
type underlay interface {
|
||||
Read([]byte) (int, error)
|
||||
Write([]byte) (int, error)
|
||||
ReadBuffer() (*openP2PHeader, []byte, error)
|
||||
WriteBytes(uint16, uint16, []byte) error
|
||||
WriteBuffer([]byte) error
|
||||
WriteMessage(uint16, uint16, interface{}) error
|
||||
Close() error
|
||||
WLock()
|
||||
WUnlock()
|
||||
SetReadDeadline(t time.Time) error
|
||||
SetWriteDeadline(t time.Time) error
|
||||
Protocol() string
|
||||
}
|
||||
|
||||
func DefaultReadBuffer(ul underlay) (*openP2PHeader, []byte, error) {
|
||||
headBuf := make([]byte, openP2PHeaderSize)
|
||||
_, err := io.ReadFull(ul, headBuf)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
head, err := decodeHeader(headBuf)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
dataBuf := make([]byte, head.DataLen)
|
||||
_, err = io.ReadFull(ul, dataBuf)
|
||||
return head, dataBuf, err
|
||||
}
|
||||
|
||||
func DefaultWriteBytes(ul underlay, mainType, subType uint16, data []byte) error {
|
||||
writeBytes := append(encodeHeader(mainType, subType, uint32(len(data))), data...)
|
||||
ul.WLock()
|
||||
_, err := ul.Write(writeBytes)
|
||||
ul.WUnlock()
|
||||
return err
|
||||
}
|
||||
|
||||
func DefaultWriteBuffer(ul underlay, data []byte) error {
|
||||
ul.WLock()
|
||||
_, err := ul.Write(data)
|
||||
ul.WUnlock()
|
||||
return err
|
||||
}
|
||||
|
||||
func DefaultWriteMessage(ul underlay, mainType uint16, subType uint16, packet interface{}) error {
|
||||
writeBytes, err := newMessage(mainType, subType, packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ul.WLock()
|
||||
_, err = ul.Write(writeBytes)
|
||||
ul.WUnlock()
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -6,19 +6,17 @@ import (
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/lucas-clemente/quic-go"
|
||||
"github.com/quic-go/quic-go"
|
||||
)
|
||||
|
||||
//quic.DialContext do not support version 44,disable it
|
||||
// quic.DialContext do not support version 44,disable it
|
||||
var quicVersion []quic.VersionNumber
|
||||
|
||||
type underlayQUIC struct {
|
||||
@@ -33,46 +31,19 @@ func (conn *underlayQUIC) Protocol() string {
|
||||
}
|
||||
|
||||
func (conn *underlayQUIC) ReadBuffer() (*openP2PHeader, []byte, error) {
|
||||
headBuf := make([]byte, openP2PHeaderSize)
|
||||
_, err := io.ReadFull(conn, headBuf)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
head, err := decodeHeader(headBuf)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
dataBuf := make([]byte, head.DataLen)
|
||||
_, err = io.ReadFull(conn, dataBuf)
|
||||
return head, dataBuf, err
|
||||
return DefaultReadBuffer(conn)
|
||||
}
|
||||
|
||||
func (conn *underlayQUIC) WriteBytes(mainType uint16, subType uint16, data []byte) error {
|
||||
writeBytes := append(encodeHeader(mainType, subType, uint32(len(data))), data...)
|
||||
conn.writeMtx.Lock()
|
||||
_, err := conn.Write(writeBytes)
|
||||
conn.writeMtx.Unlock()
|
||||
return err
|
||||
return DefaultWriteBytes(conn, mainType, subType, data)
|
||||
}
|
||||
|
||||
func (conn *underlayQUIC) WriteBuffer(data []byte) error {
|
||||
conn.writeMtx.Lock()
|
||||
_, err := conn.Write(data)
|
||||
conn.writeMtx.Unlock()
|
||||
return err
|
||||
return DefaultWriteBuffer(conn, data)
|
||||
}
|
||||
|
||||
func (conn *underlayQUIC) WriteMessage(mainType uint16, subType uint16, packet interface{}) error {
|
||||
// TODO: call newMessage
|
||||
data, err := json.Marshal(packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeBytes := append(encodeHeader(mainType, subType, uint32(len(data))), data...)
|
||||
conn.writeMtx.Lock()
|
||||
_, err = conn.Write(writeBytes)
|
||||
conn.writeMtx.Unlock()
|
||||
return err
|
||||
return DefaultWriteMessage(conn, mainType, subType, packet)
|
||||
}
|
||||
|
||||
func (conn *underlayQUIC) Close() error {
|
||||
@@ -80,6 +51,12 @@ func (conn *underlayQUIC) Close() error {
|
||||
conn.Connection.CloseWithError(0, "")
|
||||
return nil
|
||||
}
|
||||
func (conn *underlayQUIC) WLock() {
|
||||
conn.writeMtx.Lock()
|
||||
}
|
||||
func (conn *underlayQUIC) WUnlock() {
|
||||
conn.writeMtx.Unlock()
|
||||
}
|
||||
func (conn *underlayQUIC) CloseListener() {
|
||||
if conn.listener != nil {
|
||||
conn.listener.Close()
|
||||
@@ -87,7 +64,7 @@ func (conn *underlayQUIC) CloseListener() {
|
||||
}
|
||||
|
||||
func (conn *underlayQUIC) Accept() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), UnderlayConnectTimeout)
|
||||
defer cancel()
|
||||
sess, err := conn.listener.Accept(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
package openp2p
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -21,82 +20,82 @@ func (conn *underlayTCP) Protocol() string {
|
||||
}
|
||||
|
||||
func (conn *underlayTCP) ReadBuffer() (*openP2PHeader, []byte, error) {
|
||||
headBuf := make([]byte, openP2PHeaderSize)
|
||||
_, err := io.ReadFull(conn, headBuf)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
head, err := decodeHeader(headBuf)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
dataBuf := make([]byte, head.DataLen)
|
||||
_, err = io.ReadFull(conn, dataBuf)
|
||||
return head, dataBuf, err
|
||||
return DefaultReadBuffer(conn)
|
||||
}
|
||||
|
||||
func (conn *underlayTCP) WriteBytes(mainType uint16, subType uint16, data []byte) error {
|
||||
writeBytes := append(encodeHeader(mainType, subType, uint32(len(data))), data...)
|
||||
conn.writeMtx.Lock()
|
||||
_, err := conn.Write(writeBytes)
|
||||
conn.writeMtx.Unlock()
|
||||
return err
|
||||
return DefaultWriteBytes(conn, mainType, subType, data)
|
||||
}
|
||||
|
||||
func (conn *underlayTCP) WriteBuffer(data []byte) error {
|
||||
conn.writeMtx.Lock()
|
||||
_, err := conn.Write(data)
|
||||
conn.writeMtx.Unlock()
|
||||
return err
|
||||
return DefaultWriteBuffer(conn, data)
|
||||
}
|
||||
|
||||
func (conn *underlayTCP) WriteMessage(mainType uint16, subType uint16, packet interface{}) error {
|
||||
// TODO: call newMessage
|
||||
data, err := json.Marshal(packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeBytes := append(encodeHeader(mainType, subType, uint32(len(data))), data...)
|
||||
conn.writeMtx.Lock()
|
||||
_, err = conn.Write(writeBytes)
|
||||
conn.writeMtx.Unlock()
|
||||
return err
|
||||
return DefaultWriteMessage(conn, mainType, subType, packet)
|
||||
}
|
||||
|
||||
func (conn *underlayTCP) Close() error {
|
||||
return conn.Conn.Close()
|
||||
}
|
||||
func (conn *underlayTCP) WLock() {
|
||||
conn.writeMtx.Lock()
|
||||
}
|
||||
func (conn *underlayTCP) WUnlock() {
|
||||
conn.writeMtx.Unlock()
|
||||
}
|
||||
|
||||
func listenTCP(host string, port int, localPort int, mode string) (*underlayTCP, error) {
|
||||
func listenTCP(host string, port int, localPort int, mode string, t *P2PTunnel) (*underlayTCP, error) {
|
||||
if mode == LinkModeTCPPunch {
|
||||
c, err := reuse.DialTimeout("tcp", fmt.Sprintf("0.0.0.0:%d", localPort), fmt.Sprintf("%s:%d", host, port), SymmetricHandshakeAckTimeout) // TODO: timeout
|
||||
if compareVersion(t.config.peerVersion, SyncServerTimeVersion) == LESS {
|
||||
gLog.Printf(LvDEBUG, "peer version %s less than %s", t.config.peerVersion, SyncServerTimeVersion)
|
||||
} else {
|
||||
ts := time.Duration(int64(t.punchTs) + t.pn.dt - time.Now().UnixNano())
|
||||
gLog.Printf(LvDEBUG, "sleep %d ms", ts/time.Millisecond)
|
||||
time.Sleep(ts)
|
||||
}
|
||||
gLog.Println(LvDEBUG, " send tcp punch: ", fmt.Sprintf("0.0.0.0:%d", localPort), "-->", fmt.Sprintf("%s:%d", host, port))
|
||||
c, err := reuse.DialTimeout("tcp", fmt.Sprintf("0.0.0.0:%d", localPort), fmt.Sprintf("%s:%d", host, port), CheckActiveTimeout)
|
||||
if err != nil {
|
||||
gLog.Println(LvDEBUG, "send tcp punch: ", err)
|
||||
return nil, err
|
||||
}
|
||||
return &underlayTCP{writeMtx: &sync.Mutex{}, Conn: c}, nil
|
||||
utcp := &underlayTCP{writeMtx: &sync.Mutex{}, Conn: c}
|
||||
_, buff, err := utcp.ReadBuffer()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read start msg error:%s", err)
|
||||
}
|
||||
if buff != nil {
|
||||
gLog.Println(LvDEBUG, string(buff))
|
||||
}
|
||||
utcp.WriteBytes(MsgP2P, MsgTunnelHandshakeAck, buff)
|
||||
return utcp, nil
|
||||
}
|
||||
addr, _ := net.ResolveTCPAddr("tcp", fmt.Sprintf("0.0.0.0:%d", localPort))
|
||||
l, err := net.ListenTCP("tcp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
t.pn.push(t.config.PeerNode, MsgPushUnderlayConnect, nil)
|
||||
tid := t.id
|
||||
if compareVersion(t.config.peerVersion, PublicIPVersion) == LESS { // old version
|
||||
ipBytes := net.ParseIP(t.config.peerIP).To4()
|
||||
tid = uint64(binary.BigEndian.Uint32(ipBytes))
|
||||
gLog.Println(LvDEBUG, "compatible with old client, use ip as key:", tid)
|
||||
}
|
||||
l.SetDeadline(time.Now().Add(SymmetricHandshakeAckTimeout))
|
||||
c, err := l.Accept()
|
||||
defer l.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
utcp := v4l.getUnderlayTCP(tid)
|
||||
if utcp == nil {
|
||||
return nil, ErrConnectPublicV4
|
||||
}
|
||||
return &underlayTCP{writeMtx: &sync.Mutex{}, Conn: c}, nil
|
||||
return utcp, nil
|
||||
}
|
||||
|
||||
func dialTCP(host string, port int, localPort int, mode string) (*underlayTCP, error) {
|
||||
var c net.Conn
|
||||
var err error
|
||||
if mode == LinkModeTCPPunch {
|
||||
c, err = reuse.DialTimeout("tcp", fmt.Sprintf("0.0.0.0:%d", localPort), fmt.Sprintf("%s:%d", host, port), SymmetricHandshakeAckTimeout)
|
||||
gLog.Println(LvDEBUG, " send tcp punch: ", fmt.Sprintf("0.0.0.0:%d", localPort), "-->", fmt.Sprintf("%s:%d", host, port))
|
||||
if c, err = reuse.DialTimeout("tcp", fmt.Sprintf("0.0.0.0:%d", localPort), fmt.Sprintf("%s:%d", host, port), CheckActiveTimeout); err != nil {
|
||||
gLog.Println(LvDEBUG, "send tcp punch: ", err)
|
||||
}
|
||||
|
||||
} else {
|
||||
c, err = net.DialTimeout("tcp", fmt.Sprintf("%s:%d", host, port), SymmetricHandshakeAckTimeout)
|
||||
c, err = net.DialTimeout("tcp", fmt.Sprintf("%s:%d", host, port), CheckActiveTimeout)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
package openp2p
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -20,52 +18,30 @@ func (conn *underlayTCP6) Protocol() string {
|
||||
}
|
||||
|
||||
func (conn *underlayTCP6) ReadBuffer() (*openP2PHeader, []byte, error) {
|
||||
headBuf := make([]byte, openP2PHeaderSize)
|
||||
_, err := io.ReadFull(conn, headBuf)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
head, err := decodeHeader(headBuf)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
dataBuf := make([]byte, head.DataLen)
|
||||
_, err = io.ReadFull(conn, dataBuf)
|
||||
return head, dataBuf, err
|
||||
return DefaultReadBuffer(conn)
|
||||
}
|
||||
|
||||
func (conn *underlayTCP6) WriteBytes(mainType uint16, subType uint16, data []byte) error {
|
||||
writeBytes := append(encodeHeader(mainType, subType, uint32(len(data))), data...)
|
||||
conn.writeMtx.Lock()
|
||||
_, err := conn.Write(writeBytes)
|
||||
conn.writeMtx.Unlock()
|
||||
return err
|
||||
return DefaultWriteBytes(conn, mainType, subType, data)
|
||||
}
|
||||
|
||||
func (conn *underlayTCP6) WriteBuffer(data []byte) error {
|
||||
conn.writeMtx.Lock()
|
||||
_, err := conn.Write(data)
|
||||
conn.writeMtx.Unlock()
|
||||
return err
|
||||
return DefaultWriteBuffer(conn, data)
|
||||
}
|
||||
|
||||
func (conn *underlayTCP6) WriteMessage(mainType uint16, subType uint16, packet interface{}) error {
|
||||
// TODO: call newMessage
|
||||
data, err := json.Marshal(packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
writeBytes := append(encodeHeader(mainType, subType, uint32(len(data))), data...)
|
||||
conn.writeMtx.Lock()
|
||||
_, err = conn.Write(writeBytes)
|
||||
conn.writeMtx.Unlock()
|
||||
return err
|
||||
return DefaultWriteMessage(conn, mainType, subType, packet)
|
||||
}
|
||||
|
||||
func (conn *underlayTCP6) Close() error {
|
||||
return conn.Conn.Close()
|
||||
}
|
||||
|
||||
func (conn *underlayTCP6) WLock() {
|
||||
conn.writeMtx.Lock()
|
||||
}
|
||||
func (conn *underlayTCP6) WUnlock() {
|
||||
conn.writeMtx.Unlock()
|
||||
}
|
||||
func listenTCP6(port int, idleTimeout time.Duration) (*underlayTCP6, error) {
|
||||
addr, _ := net.ResolveTCPAddr("tcp6", fmt.Sprintf("[::]:%d", port))
|
||||
l, err := net.ListenTCP("tcp6", addr)
|
||||
@@ -73,7 +49,7 @@ func listenTCP6(port int, idleTimeout time.Duration) (*underlayTCP6, error) {
|
||||
return nil, err
|
||||
}
|
||||
defer l.Close()
|
||||
l.SetDeadline(time.Now().Add(SymmetricHandshakeAckTimeout))
|
||||
l.SetDeadline(time.Now().Add(UnderlayConnectTimeout))
|
||||
c, err := l.Accept()
|
||||
defer l.Close()
|
||||
if err != nil {
|
||||
@@ -83,7 +59,7 @@ func listenTCP6(port int, idleTimeout time.Duration) (*underlayTCP6, error) {
|
||||
}
|
||||
|
||||
func dialTCP6(host string, port int) (*underlayTCP6, error) {
|
||||
c, err := net.DialTimeout("tcp6", fmt.Sprintf("[%s]:%d", host, port), SymmetricHandshakeAckTimeout)
|
||||
c, err := net.DialTimeout("tcp6", fmt.Sprintf("[%s]:%d", host, port), UnderlayConnectTimeout)
|
||||
if err != nil {
|
||||
gLog.Printf(LvERROR, "Dial %s:%d error:%s", host, port, err)
|
||||
return nil, err
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"archive/zip"
|
||||
"compress/gzip"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -16,50 +17,59 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func update(host string, port int) {
|
||||
func update(host string, port int) error {
|
||||
gLog.Println(LvINFO, "update start")
|
||||
defer gLog.Println(LvINFO, "update end")
|
||||
caCertPool, err := x509.SystemCertPool()
|
||||
if err != nil {
|
||||
gLog.Println(LvERROR, "Failed to load system root CAs:", err)
|
||||
} else {
|
||||
caCertPool = x509.NewCertPool()
|
||||
}
|
||||
caCertPool.AppendCertsFromPEM([]byte(rootCA))
|
||||
caCertPool.AppendCertsFromPEM([]byte(ISRGRootX1))
|
||||
|
||||
c := http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
TLSClientConfig: &tls.Config{RootCAs: caCertPool,
|
||||
InsecureSkipVerify: false},
|
||||
},
|
||||
Timeout: time.Second * 30,
|
||||
}
|
||||
goos := runtime.GOOS
|
||||
goarch := runtime.GOARCH
|
||||
rsp, err := c.Get(fmt.Sprintf("https://%s:%d/api/v1/update?fromver=%s&os=%s&arch=%s", host, port, OpenP2PVersion, goos, goarch))
|
||||
rsp, err := c.Get(fmt.Sprintf("https://%s:%d/api/v1/update?fromver=%s&os=%s&arch=%s&user=%s&node=%s", host, port, OpenP2PVersion, goos, goarch, gConf.Network.User, gConf.Network.Node))
|
||||
if err != nil {
|
||||
gLog.Println(LvERROR, "update:query update list failed:", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
defer rsp.Body.Close()
|
||||
if rsp.StatusCode != http.StatusOK {
|
||||
gLog.Println(LvERROR, "get update info error:", rsp.Status)
|
||||
return
|
||||
return err
|
||||
}
|
||||
rspBuf, err := ioutil.ReadAll(rsp.Body)
|
||||
if err != nil {
|
||||
gLog.Println(LvERROR, "update:read update list failed:", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
updateInfo := UpdateInfo{}
|
||||
err = json.Unmarshal(rspBuf, &updateInfo)
|
||||
if err != nil {
|
||||
if err = json.Unmarshal(rspBuf, &updateInfo); err != nil {
|
||||
gLog.Println(LvERROR, rspBuf, " update info decode error:", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
if updateInfo.Error != 0 {
|
||||
gLog.Println(LvERROR, "update error:", updateInfo.Error, updateInfo.ErrorDetail)
|
||||
return
|
||||
return err
|
||||
}
|
||||
err = updateFile(updateInfo.Url, "", "openp2p")
|
||||
if err != nil {
|
||||
gLog.Println(LvERROR, "update: download failed:", err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// todo rollback on error
|
||||
func updateFile(url string, checksum string, dst string) error {
|
||||
gLog.Println(LvINFO, "download ", url)
|
||||
tmpFile := filepath.Dir(os.Args[0]) + "/openp2p.tmp"
|
||||
@@ -68,8 +78,18 @@ func updateFile(url string, checksum string, dst string) error {
|
||||
gLog.Printf(LvERROR, "OpenFile %s error:%s", tmpFile, err)
|
||||
return err
|
||||
}
|
||||
caCertPool, err := x509.SystemCertPool()
|
||||
if err != nil {
|
||||
gLog.Println(LvERROR, "Failed to load system root CAs:", err)
|
||||
} else {
|
||||
caCertPool = x509.NewCertPool()
|
||||
}
|
||||
caCertPool.AppendCertsFromPEM([]byte(rootCA))
|
||||
caCertPool.AppendCertsFromPEM([]byte(ISRGRootX1))
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
TLSClientConfig: &tls.Config{
|
||||
RootCAs: caCertPool,
|
||||
InsecureSkipVerify: false},
|
||||
}
|
||||
client := &http.Client{Transport: tr}
|
||||
response, err := client.Get(url)
|
||||
@@ -89,17 +109,22 @@ func updateFile(url string, checksum string, dst string) error {
|
||||
output.Close()
|
||||
gLog.Println(LvINFO, "download ", url, " ok")
|
||||
gLog.Printf(LvINFO, "size: %d bytes", n)
|
||||
|
||||
err = os.Rename(os.Args[0], os.Args[0]+"0")
|
||||
if err != nil && os.IsExist(err) {
|
||||
gLog.Printf(LvINFO, " rename %s error:%s", os.Args[0], err)
|
||||
backupFile := os.Args[0] + "0"
|
||||
err = os.Rename(os.Args[0], backupFile) // the old daemon process was using the 0 file, so it will prevent override it
|
||||
if err != nil {
|
||||
gLog.Printf(LvINFO, " rename %s error:%s, retry 1", os.Args[0], err)
|
||||
backupFile = os.Args[0] + "1"
|
||||
err = os.Rename(os.Args[0], backupFile)
|
||||
if err != nil {
|
||||
gLog.Printf(LvINFO, " rename %s error:%s", os.Args[0], err)
|
||||
}
|
||||
}
|
||||
// extract
|
||||
gLog.Println(LvINFO, "extract files")
|
||||
err = extract(filepath.Dir(os.Args[0]), tmpFile)
|
||||
if err != nil {
|
||||
gLog.Printf(LvERROR, "extract error:%s. revert rename", err)
|
||||
os.Rename(os.Args[0]+"0", os.Args[0])
|
||||
os.Rename(backupFile, os.Args[0])
|
||||
return err
|
||||
}
|
||||
os.Remove(tmpFile)
|
||||
@@ -191,3 +216,18 @@ func extractTgz(dst, src string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanTempFiles() {
|
||||
tmpFile := os.Args[0] + "0"
|
||||
if _, err := os.Stat(tmpFile); err == nil {
|
||||
if err := os.Remove(tmpFile); err != nil {
|
||||
gLog.Printf(LvDEBUG, " remove %s error:%s", tmpFile, err)
|
||||
}
|
||||
}
|
||||
tmpFile = os.Args[0] + "1"
|
||||
if _, err := os.Stat(tmpFile); err == nil {
|
||||
if err := os.Remove(tmpFile); err != nil {
|
||||
gLog.Printf(LvDEBUG, " remove %s error:%s", tmpFile, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
15
core/upnp.go
15
core/upnp.go
@@ -5,6 +5,7 @@ package openp2p
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -181,7 +182,12 @@ func localIPv4() string { // TODO: multi nic will wrong
|
||||
}
|
||||
|
||||
func getServiceURL(rootURL string) (url, urnDomain string, err error) {
|
||||
r, err := http.Get(rootURL)
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
},
|
||||
Timeout: time.Second * 3}
|
||||
r, err := client.Get(rootURL)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -352,11 +358,6 @@ func (n *upnpNAT) AddPortMapping(protocol string, externalPort, internalPort int
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: check response to see if the port was forwarded
|
||||
// log.Println(message, response)
|
||||
// JAE:
|
||||
// body, err := ioutil.ReadAll(response.Body)
|
||||
// fmt.Println(string(body), err)
|
||||
mappedExternalPort = externalPort
|
||||
_ = response
|
||||
return
|
||||
@@ -378,8 +379,6 @@ func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: check response to see if the port was deleted
|
||||
// log.Println(message, response)
|
||||
_ = response
|
||||
return
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ func setRLimit() error {
|
||||
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return err
|
||||
}
|
||||
limit.Cur = 10240
|
||||
limit.Cur = 65536
|
||||
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
69
core/util_freebsd.go
Normal file
69
core/util_freebsd.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package openp2p
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultInstallPath = "/usr/local/openp2p"
|
||||
defaultBinName = "openp2p"
|
||||
)
|
||||
|
||||
func getOsName() (osName string) {
|
||||
var sysnamePath string
|
||||
sysnamePath = "/etc/redhat-release"
|
||||
_, err := os.Stat(sysnamePath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
str := "PRETTY_NAME="
|
||||
f, err := os.Open("/etc/os-release")
|
||||
if err == nil {
|
||||
buf := bufio.NewReader(f)
|
||||
for {
|
||||
line, err := buf.ReadString('\n')
|
||||
if err == nil {
|
||||
line = strings.TrimSpace(line)
|
||||
pos := strings.Count(line, str)
|
||||
if pos > 0 {
|
||||
len1 := len([]rune(str)) + 1
|
||||
rs := []rune(line)
|
||||
osName = string(rs[len1 : (len(rs))-1])
|
||||
break
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
buff, err := ioutil.ReadFile(sysnamePath)
|
||||
if err == nil {
|
||||
osName = string(bytes.TrimSpace(buff))
|
||||
}
|
||||
}
|
||||
if osName == "" {
|
||||
osName = "FreeBSD"
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func setRLimit() error {
|
||||
var limit syscall.Rlimit
|
||||
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return err
|
||||
}
|
||||
limit.Max = 65536
|
||||
limit.Cur = limit.Max
|
||||
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setFirewall() {
|
||||
}
|
||||
@@ -64,7 +64,7 @@ func setRLimit() error {
|
||||
if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return err
|
||||
}
|
||||
limit.Max = 1024 * 1024
|
||||
limit.Max = 65536
|
||||
limit.Cur = limit.Max
|
||||
if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limit); err != nil {
|
||||
return err
|
||||
|
||||
@@ -45,9 +45,9 @@ func setFirewall() {
|
||||
}
|
||||
if isXP {
|
||||
exec.Command("cmd.exe", `/c`, fmt.Sprintf(`netsh firewall del allowedprogram "%s"`, fullPath)).Run()
|
||||
exec.Command("cmd.exe", `/c`, fmt.Sprintf(`netsh firewall add allowedprogram "%s" "%s" ENABLE`, ProducnName, fullPath)).Run()
|
||||
exec.Command("cmd.exe", `/c`, fmt.Sprintf(`netsh firewall add allowedprogram "%s" "%s" ENABLE`, ProductName, fullPath)).Run()
|
||||
} else { // win7 or later
|
||||
exec.Command("cmd.exe", `/c`, fmt.Sprintf(`netsh advfirewall firewall del rule name="%s"`, ProducnName)).Run()
|
||||
exec.Command("cmd.exe", `/c`, fmt.Sprintf(`netsh advfirewall firewall add rule name="%s" dir=in action=allow program="%s" enable=yes`, ProducnName, fullPath)).Run()
|
||||
exec.Command("cmd.exe", `/c`, fmt.Sprintf(`netsh advfirewall firewall del rule name="%s"`, ProductName)).Run()
|
||||
exec.Command("cmd.exe", `/c`, fmt.Sprintf(`netsh advfirewall firewall add rule name="%s" dir=in action=allow program="%s" enable=yes`, ProductName, fullPath)).Run()
|
||||
}
|
||||
}
|
||||
|
||||
82
core/v4listener.go
Normal file
82
core/v4listener.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package openp2p
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type v4Listener struct {
|
||||
conns sync.Map
|
||||
port int
|
||||
acceptCh chan bool
|
||||
}
|
||||
|
||||
func (vl *v4Listener) start() error {
|
||||
v4l.acceptCh = make(chan bool, 10)
|
||||
for {
|
||||
vl.listen()
|
||||
time.Sleep(time.Second * 5)
|
||||
}
|
||||
}
|
||||
|
||||
func (vl *v4Listener) listen() error {
|
||||
gLog.Printf(LvINFO, "listen %d start", vl.port)
|
||||
defer gLog.Printf(LvINFO, "listen %d end", vl.port)
|
||||
addr, _ := net.ResolveTCPAddr("tcp", fmt.Sprintf("0.0.0.0:%d", vl.port))
|
||||
l, err := net.ListenTCP("tcp", addr)
|
||||
if err != nil {
|
||||
gLog.Printf(LvERROR, "listen %d error:", vl.port, err)
|
||||
return err
|
||||
}
|
||||
defer l.Close()
|
||||
for {
|
||||
c, err := l.Accept()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
go vl.handleConnection(c)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (vl *v4Listener) handleConnection(c net.Conn) {
|
||||
gLog.Println(LvDEBUG, "v4Listener accept connection: ", c.RemoteAddr().String())
|
||||
utcp := &underlayTCP{writeMtx: &sync.Mutex{}, Conn: c}
|
||||
utcp.SetReadDeadline(time.Now().Add(time.Second * 5))
|
||||
_, buff, err := utcp.ReadBuffer()
|
||||
if err != nil {
|
||||
gLog.Printf(LvERROR, "utcp.ReadBuffer error:", err)
|
||||
}
|
||||
utcp.WriteBytes(MsgP2P, MsgTunnelHandshakeAck, buff)
|
||||
var tid uint64
|
||||
if string(buff) == "OpenP2P,hello" { // old client
|
||||
// save remoteIP as key
|
||||
remoteAddr := c.RemoteAddr().(*net.TCPAddr).IP
|
||||
ipBytes := remoteAddr.To4()
|
||||
tid = uint64(binary.BigEndian.Uint32(ipBytes)) // bytes not enough for uint64
|
||||
gLog.Println(LvDEBUG, "hello ", string(buff))
|
||||
} else {
|
||||
if len(buff) < 8 {
|
||||
return
|
||||
}
|
||||
tid = binary.LittleEndian.Uint64(buff[:8])
|
||||
gLog.Println(LvDEBUG, "hello ", tid)
|
||||
}
|
||||
vl.conns.Store(tid, utcp)
|
||||
vl.acceptCh <- true
|
||||
}
|
||||
|
||||
func (vl *v4Listener) getUnderlayTCP(tid uint64) *underlayTCP {
|
||||
for i := 0; i < 100; i++ {
|
||||
select {
|
||||
case <-time.After(time.Millisecond * 50):
|
||||
case <-vl.acceptCh:
|
||||
}
|
||||
if u, ok := vl.conns.LoadAndDelete(tid); ok {
|
||||
return u.(*underlayTCP)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
12
docker/Dockerfile
Executable file
12
docker/Dockerfile
Executable file
@@ -0,0 +1,12 @@
|
||||
FROM alpine:3.18.2
|
||||
|
||||
# Replace the default Alpine repositories with Aliyun mirrors
|
||||
RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories && \
|
||||
apk add --no-cache ca-certificates && \
|
||||
rm -rf /tmp/* /var/tmp/* /var/cache/apk/* /var/cache/distfiles/*
|
||||
|
||||
COPY get-client.sh /
|
||||
ARG DOCKER_VER="latest"
|
||||
RUN echo $TARGETPLATFORM && chmod +x /get-client.sh && ./get-client.sh
|
||||
|
||||
ENTRYPOINT ["/openp2p"]
|
||||
45
docker/get-client.sh
Executable file
45
docker/get-client.sh
Executable file
@@ -0,0 +1,45 @@
|
||||
#!/bin/sh
|
||||
|
||||
|
||||
echo "Building version:${DOCKER_VER}"
|
||||
echo "Running on platform: $TARGETPLATFORM"
|
||||
# TARGETPLATFORM=$(echo $TARGETPLATFORM | tr ',' '/')
|
||||
echo "Running on platform: $TARGETPLATFORM"
|
||||
sysType="linux-amd64"
|
||||
archType=$(uname -m)
|
||||
if [[ $archType == aarch64 ]] ;
|
||||
then
|
||||
sysType="linux-arm64"
|
||||
elif [[ $archType == arm* ]] ;
|
||||
then
|
||||
sysType="linux-arm"
|
||||
elif [[ $archType == i*86 ]] ;
|
||||
then
|
||||
sysType="linux-386"
|
||||
elif [[ $archType == mips ]] ;
|
||||
then
|
||||
sysType="linux-mipsle"
|
||||
ls /lib |grep mipsel
|
||||
if [[ $? -ne 0 ]]; then
|
||||
# mipsel not found, it's mipseb
|
||||
sysType="linux-mipsbe"
|
||||
fi
|
||||
fi
|
||||
url="https://openp2p.cn/download/v1/${DOCKER_VER}/openp2p-latest.$sysType.tar.gz"
|
||||
echo "download $url start"
|
||||
|
||||
if [ -f /usr/bin/curl ]; then
|
||||
curl -k -o openp2p.tar.gz $url
|
||||
else
|
||||
wget --no-check-certificate -O openp2p.tar.gz $url
|
||||
fi
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "download error $?"
|
||||
exit 9
|
||||
fi
|
||||
echo "download ok"
|
||||
tar -xzvf openp2p.tar.gz
|
||||
chmod +x openp2p
|
||||
pwd
|
||||
ls -l
|
||||
exit 0
|
||||
33
go.mod
33
go.mod
@@ -3,26 +3,27 @@ module openp2p
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/emirpasic/gods v1.18.1
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/kardianos/service v1.2.0
|
||||
github.com/lucas-clemente/quic-go v0.27.0
|
||||
github.com/openp2p-cn/go-reuseport v0.3.2
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f
|
||||
github.com/openp2p-cn/service v1.0.0
|
||||
github.com/openp2p-cn/totp v0.0.0-20230102121327-8e02f6b392ed
|
||||
github.com/quic-go/quic-go v0.34.0
|
||||
golang.org/x/sys v0.5.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/cheekybits/genny v1.0.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
|
||||
github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect
|
||||
github.com/marten-seemann/qtls-go1-17 v0.1.1 // indirect
|
||||
github.com/marten-seemann/qtls-go1-18 v0.1.1 // indirect
|
||||
github.com/nxadm/tail v1.4.8 // indirect
|
||||
github.com/onsi/ginkgo v1.16.4 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 // indirect
|
||||
golang.org/x/mobile v0.0.0-20221020085226-b36e6246172e // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect
|
||||
golang.org/x/tools v0.1.12 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
github.com/golang/mock v1.6.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect
|
||||
github.com/kardianos/service v1.2.2 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.2.0 // indirect
|
||||
github.com/quic-go/qtls-go1-19 v0.3.2 // indirect
|
||||
github.com/quic-go/qtls-go1-20 v0.2.2 // indirect
|
||||
golang.org/x/crypto v0.4.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20221205204356-47842c84f3db // indirect
|
||||
golang.org/x/mod v0.6.0 // indirect
|
||||
golang.org/x/net v0.7.0 // indirect
|
||||
golang.org/x/tools v0.2.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user