mirror of
https://github.com/Control-D-Inc/ctrld.git
synced 2026-02-03 22:18:39 +00:00
The current transport setup is using mutex lock for synchronization. This could work ok in normal device, but on low capacity routers, this high contention may affect the performance, causing ctrld hangs. Instead of using mutex lock, using atomic operation for synchronization yield a better performance: - There's no lock, so other requests won't be blocked. And even theses requests use old broken transport, it would be fine, because the client will retry them later. - The setup transport is now done once, on demand when the transport is accessed, or when signal rebootsrapping. The first call to dohTransport will block others, but the transport is warmup before ctrld start serving requests, so client requests won't be affected. That helps ctrld handling the requests better when running on low capacity device. Further more, the transport configuration is also tweaked for better default performance: - MaxIdleConnsPerHost is set to 100 (default is 2), which allows more connections to be reused, reduce the load to open/close connections on demand. See [1] for a real example. - Due to the raising of MaxIdleConnsPerHost, once the transport is GC-ed, it must explicitly close its idle connections. - TLS client session cache is now enabled. Last but not least, the upstream ping process is also reworked. DoH transport is an HTTP transport, so doing a HEAD request is enough to warmup the transport, instead of doing a full DNS query. [1]: https://gitlab.com/gitlab-org/gitlab-pages/-/merge_requests/274
155 lines
3.9 KiB
Go
155 lines
3.9 KiB
Go
//go:build !qf
|
|
|
|
package ctrld
|
|
|
|
import (
|
|
"context"
|
|
"crypto/tls"
|
|
"errors"
|
|
"net"
|
|
"net/http"
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/miekg/dns"
|
|
"github.com/quic-go/quic-go"
|
|
"github.com/quic-go/quic-go/http3"
|
|
|
|
ctrldnet "github.com/Control-D-Inc/ctrld/internal/net"
|
|
)
|
|
|
|
func (uc *UpstreamConfig) setupDOH3Transport() {
|
|
switch uc.IPStack {
|
|
case IpStackBoth, "":
|
|
uc.http3RoundTripper = uc.newDOH3Transport(uc.bootstrapIPs)
|
|
case IpStackV4:
|
|
uc.http3RoundTripper = uc.newDOH3Transport(uc.bootstrapIPs4)
|
|
case IpStackV6:
|
|
uc.http3RoundTripper = uc.newDOH3Transport(uc.bootstrapIPs6)
|
|
case IpStackSplit:
|
|
uc.http3RoundTripper4 = uc.newDOH3Transport(uc.bootstrapIPs4)
|
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
|
defer cancel()
|
|
if ctrldnet.IPv6Available(ctx) {
|
|
uc.http3RoundTripper6 = uc.newDOH3Transport(uc.bootstrapIPs6)
|
|
} else {
|
|
uc.http3RoundTripper6 = uc.http3RoundTripper4
|
|
}
|
|
uc.http3RoundTripper = uc.newDOH3Transport(uc.bootstrapIPs)
|
|
}
|
|
}
|
|
|
|
func (uc *UpstreamConfig) newDOH3Transport(addrs []string) http.RoundTripper {
|
|
rt := &http3.RoundTripper{}
|
|
rt.TLSClientConfig = &tls.Config{RootCAs: uc.certPool}
|
|
rt.Dial = func(ctx context.Context, addr string, tlsCfg *tls.Config, cfg *quic.Config) (quic.EarlyConnection, error) {
|
|
domain := addr
|
|
_, port, _ := net.SplitHostPort(addr)
|
|
// if we have a bootstrap ip set, use it to avoid DNS lookup
|
|
if uc.BootstrapIP != "" {
|
|
addr = net.JoinHostPort(uc.BootstrapIP, port)
|
|
ProxyLog.Debug().Msgf("sending doh3 request to: %s", addr)
|
|
udpConn, err := net.ListenUDP("udp", nil)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
remoteAddr, err := net.ResolveUDPAddr("udp", addr)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return quic.DialEarlyContext(ctx, udpConn, remoteAddr, domain, tlsCfg, cfg)
|
|
}
|
|
dialAddrs := make([]string, len(addrs))
|
|
for i := range addrs {
|
|
dialAddrs[i] = net.JoinHostPort(addrs[i], port)
|
|
}
|
|
pd := &quicParallelDialer{}
|
|
conn, err := pd.Dial(ctx, domain, dialAddrs, tlsCfg, cfg)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
ProxyLog.Debug().Msgf("sending doh3 request to: %s", conn.RemoteAddr())
|
|
return conn, err
|
|
}
|
|
return rt
|
|
}
|
|
|
|
func (uc *UpstreamConfig) doh3Transport(dnsType uint16) http.RoundTripper {
|
|
uc.transportOnce.Do(func() {
|
|
uc.SetupTransport()
|
|
})
|
|
if uc.rebootstrap.CompareAndSwap(true, false) {
|
|
uc.SetupTransport()
|
|
}
|
|
switch uc.IPStack {
|
|
case IpStackBoth, IpStackV4, IpStackV6:
|
|
return uc.http3RoundTripper
|
|
case IpStackSplit:
|
|
switch dnsType {
|
|
case dns.TypeA:
|
|
return uc.http3RoundTripper4
|
|
default:
|
|
return uc.http3RoundTripper6
|
|
}
|
|
}
|
|
return uc.http3RoundTripper
|
|
}
|
|
|
|
// Putting the code for quic parallel dialer here:
|
|
//
|
|
// - quic dialer is different with net.Dialer
|
|
// - simplification for quic free version
|
|
type parallelDialerResult struct {
|
|
conn quic.EarlyConnection
|
|
err error
|
|
}
|
|
|
|
type quicParallelDialer struct{}
|
|
|
|
// Dial performs parallel dialing to the given address list.
|
|
func (d *quicParallelDialer) Dial(ctx context.Context, domain string, addrs []string, tlsCfg *tls.Config, cfg *quic.Config) (quic.EarlyConnection, error) {
|
|
if len(addrs) == 0 {
|
|
return nil, errors.New("empty addresses")
|
|
}
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
defer cancel()
|
|
|
|
ch := make(chan *parallelDialerResult, len(addrs))
|
|
var wg sync.WaitGroup
|
|
wg.Add(len(addrs))
|
|
go func() {
|
|
wg.Wait()
|
|
close(ch)
|
|
}()
|
|
|
|
udpConn, err := net.ListenUDP("udp", nil)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
for _, addr := range addrs {
|
|
go func(addr string) {
|
|
defer wg.Done()
|
|
remoteAddr, err := net.ResolveUDPAddr("udp", addr)
|
|
if err != nil {
|
|
ch <- ¶llelDialerResult{conn: nil, err: err}
|
|
return
|
|
}
|
|
|
|
conn, err := quic.DialEarlyContext(ctx, udpConn, remoteAddr, domain, tlsCfg, cfg)
|
|
ch <- ¶llelDialerResult{conn: conn, err: err}
|
|
}(addr)
|
|
}
|
|
|
|
errs := make([]error, 0, len(addrs))
|
|
for res := range ch {
|
|
if res.err == nil {
|
|
cancel()
|
|
return res.conn, res.err
|
|
}
|
|
errs = append(errs, res.err)
|
|
}
|
|
|
|
return nil, errors.Join(errs...)
|
|
}
|