-
Notifications
You must be signed in to change notification settings - Fork 1.1k
This issue was moved to a discussion.
You can continue the conversation there. Go to discussion →
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
EnableHolePunching not working! #2878
Comments
ping... @MarcoPolo @wlynxg |
Take a look at our tests for hole punching at (WIP as there is some failure between rust<->go nodes): libp2p/test-plans#381. Specifically this folder https://github.com/libp2p/test-plans/tree/marco/go-holepunch-interop/hole-punch-interop. go-libp2p<->go-libp2p passes the tests. Compare your code with what's in there. The test is using a rust-libp2p relay, but that shouldn't matter, and it should be relatively easy to swap out a go-libp2p relay as well. |
@MarcoPolo HostA's console:
HostB's console:
How to ensure that it hole punching is working properly? |
Can you provide a reproducible test case that fails? Use the hole-punch interop as your base. |
You will definitely fail to drill because you haven't interacted with enough nodes. |
@MarcoPolo I'm using the sample code from the link you gave. I commented out the Redis-related code and passed in the relay server and peer ID via command line parameters. package main
import (
"bytes"
"context"
"crypto/rand"
"encoding/json"
"flag"
"fmt"
"log"
"os"
"os/signal"
"time"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p/core/event"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/p2p/protocol/identify"
"github.com/libp2p/go-libp2p/p2p/protocol/ping"
libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
"github.com/libp2p/go-libp2p/p2p/transport/tcp"
"github.com/multiformats/go-multiaddr"
)
var listenClientPeerID = flag.String("peer", "", "")
var relayAddr = flag.String("relay", "", "")
// const redisAddr = "redis:6379"
type resultInfo struct {
RttToHolePunchedPeerMillis int `json:"rtt_to_holepunched_peer_millis"`
}
func main() {
flag.Parse()
tpt := os.Getenv("TRANSPORT")
switch tpt {
case "tcp", "quic":
default:
log.Fatal("invalid transport")
}
mode := os.Getenv("MODE")
switch mode {
case "listen", "dial":
default:
log.Fatal("invalid mode")
}
// rClient := redis.NewClient(&redis.Options{
// Addr: redisAddr,
// Password: "",
// DB: 0,
// })
// defer rClient.Close()
testTimeout := 3 * time.Minute
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
defer cancel()
// waitForRedis(ctx, rClient)
// var err error
// var resultParts []string
// switch tpt {
// case "tcp":
// resultParts, err = rClient.BLPop(ctx, testTimeout, "RELAY_TCP_ADDRESS").Result()
// case "quic":
// resultParts, err = rClient.BLPop(ctx, testTimeout, "RELAY_QUIC_ADDRESS").Result()
// }
// if err != nil {
// log.Fatal("Failed to wait for listener to be ready")
// }
relayAddr := multiaddr.StringCast(*relayAddr)
ai, err := peer.AddrInfoFromP2pAddr(relayAddr)
if err != nil {
log.Fatal(err)
}
opts := []libp2p.Option{
libp2p.EnableAutoRelayWithStaticRelays([]peer.AddrInfo{*ai}),
libp2p.EnableHolePunching(),
libp2p.ForceReachabilityPrivate(),
libp2p.NATPortMap(),
}
switch tpt {
case "tcp":
opts = append(opts, libp2p.Transport(tcp.NewTCPTransport), libp2p.ListenAddrStrings("/ip4/0.0.0.0/tcp/0"))
case "quic":
opts = append(opts, libp2p.Transport(libp2pquic.NewTransport), libp2p.ListenAddrStrings("/ip4/0.0.0.0/udp/0/quic-v1"))
}
if mode == "listen" {
opts = append(opts, libp2p.EnableAutoRelayWithStaticRelays([]peer.AddrInfo{*ai}))
}
identify.ActivationThresh = 1 // We only have one relay, so we should activate immediately
h, err := libp2p.New(opts...)
if err != nil {
log.Fatal(err)
}
waitToConnectToRelay(ctx, h, *ai)
switch mode {
case "listen":
// Listen on the relay
e, err := h.EventBus().Emitter(new(event.EvtLocalReachabilityChanged))
if err != nil {
log.Fatal(err)
}
err = e.Emit(event.EvtLocalReachabilityChanged{Reachability: network.ReachabilityPrivate})
if err != nil {
log.Fatal(err)
}
timeoutTime := time.Now().Add(2 * time.Second)
for time.Now().Before(timeoutTime) {
log.Printf("Listening on %s", h.Addrs())
if len(h.Addrs()) > 0 {
break
}
time.Sleep(500 * time.Millisecond)
}
time.Sleep(time.Second) // ? sometimes the relay doesn't have the reservation yet?
fmt.Println("listen client ID:", h.ID())
// _, err = rClient.RPush(ctx, listenClientPeerID, h.ID().String()).Result()
// if err != nil {
// log.Fatal(err)
// }
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
<-c
case "dial":
// Block on getting the relay's peer ID
// parts, err := rClient.BLPop(ctx, 30*time.Second, listenClientPeerID).Result()
// if err != nil {
// log.Fatal(err)
// }
pid, err := peer.Decode(*listenClientPeerID)
if err != nil {
log.Fatal(err)
}
circuitAddr := relayAddr.Encapsulate(multiaddr.StringCast("/p2p-circuit/"))
err = h.Connect(ctx, peer.AddrInfo{
ID: pid,
Addrs: []multiaddr.Multiaddr{circuitAddr},
})
if err != nil {
log.Fatal(err)
}
log.Printf("Connected to relayed peer %s", pid)
// Wait for a direct conn
s, err := h.NewStream(ctx, pid, ping.ID)
if err != nil {
log.Fatal(err)
}
defer s.Close()
// Send a ping message. Implementing this ourselves since the ping protocol allows for pings over relay.
buf := [32]byte{}
rand.Read(buf[:])
start := time.Now()
_, err = s.Write(buf[:])
if err != nil {
log.Fatal(err)
}
log.Printf("Is conn limited? %v. %s", s.Conn().Stat().Limited, s.Conn().RemoteMultiaddr())
retBuf := [32]byte{}
_, err = s.Read(retBuf[:])
if err != nil {
log.Fatal(err)
}
if !bytes.Equal(buf[:], retBuf[:]) {
log.Fatal("Ping failed. Bytes did not match.")
}
result := resultInfo{
RttToHolePunchedPeerMillis: int(time.Since(start).Milliseconds()),
}
b, err := json.Marshal(result)
if err != nil {
log.Fatal(err)
}
fmt.Println(string(b))
}
}
// func waitForRedis(ctx context.Context, rClient *redis.Client) {
// for {
// if ctx.Err() != nil {
// log.Fatal("timeout waiting for redis")
// }
// // Wait for redis to be ready
// _, err := rClient.Ping(ctx).Result()
// if err == nil {
// break
// }
// time.Sleep(100 * time.Millisecond)
// }
// }
func waitToConnectToRelay(ctx context.Context, h host.Host, relayInfo peer.AddrInfo) {
try := 0
for {
log.Printf("Attempting to connect to relay %s. Try #%d", relayInfo.ID, try)
try++
if ctx.Err() != nil {
log.Fatal("timeout waiting for relay")
}
err := h.Connect(ctx, relayInfo)
if err == nil {
log.Printf("Connected to relay %s", relayInfo.ID)
break
}
time.Sleep(500 * time.Millisecond)
}
} |
@wlynxg How many nodes are needed at least? I have a relay node and two node nodes. These two node nodes are behind NAT. |
@MarcoPolo I have been working on this problem for over a week. Could you please take 5 minutes to run this code and see if the behavior is the same as mine? #2878 (comment) |
This issue was moved to a discussion.
You can continue the conversation there. Go to discussion →
I hope to get help.
I found similar issus, but my problem was not solved.
#2761
#2630
Below is the sample code I wrote and the running steps and results.
Run
./relay -op relay
on a server with a public IP.The terminal will output:
Using two hosts, Host A and Host B are behind different firewalls and both are Cone NAT.
They run simultaneously:
Host A and Host B, terminal Output:
The output addresses do not contain the addresses of the holepunching, they are all their own local addresses.
How should I modify this code to make HolePunching work ?
The text was updated successfully, but these errors were encountered: