Xray-core/transport/internet/splithttp/dialer.go

340 lines
9.5 KiB
Go
Raw Normal View History

2024-06-18 05:36:36 +00:00
package splithttp
import (
"context"
gotls "crypto/tls"
"net/http"
"net/url"
"strconv"
"sync"
"time"
"github.com/quic-go/quic-go"
"github.com/quic-go/quic-go/http3"
2024-06-18 05:36:36 +00:00
"github.com/xtls/xray-core/common"
"github.com/xtls/xray-core/common/buf"
"github.com/xtls/xray-core/common/errors"
2024-06-18 05:36:36 +00:00
"github.com/xtls/xray-core/common/net"
"github.com/xtls/xray-core/common/signal/semaphore"
"github.com/xtls/xray-core/common/uuid"
"github.com/xtls/xray-core/transport/internet"
"github.com/xtls/xray-core/transport/internet/browser_dialer"
2024-06-18 05:36:36 +00:00
"github.com/xtls/xray-core/transport/internet/stat"
"github.com/xtls/xray-core/transport/internet/tls"
"github.com/xtls/xray-core/transport/pipe"
"golang.org/x/net/http2"
)
// defines the maximum time an idle TCP session can survive in the tunnel, so
// it should be consistent across HTTP versions and with other transports.
const connIdleTimeout = 300 * time.Second
// consistent with quic-go
const h3KeepalivePeriod = 10 * time.Second
// consistent with chrome
const h2KeepalivePeriod = 45 * time.Second
2024-06-18 05:36:36 +00:00
type dialerConf struct {
net.Destination
*internet.MemoryStreamConfig
}
var (
globalDialerMap map[dialerConf]DialerClient
2024-06-18 05:36:36 +00:00
globalDialerAccess sync.Mutex
)
func getHTTPClient(ctx context.Context, dest net.Destination, streamSettings *internet.MemoryStreamConfig) DialerClient {
if browser_dialer.HasBrowserDialer() {
return &BrowserDialerClient{}
}
tlsConfig := tls.ConfigFromStreamSettings(streamSettings)
isH2 := tlsConfig != nil && !(len(tlsConfig.NextProtocol) == 1 && tlsConfig.NextProtocol[0] == "http/1.1")
isH3 := tlsConfig != nil && (len(tlsConfig.NextProtocol) == 1 && tlsConfig.NextProtocol[0] == "h3")
2024-06-18 05:36:36 +00:00
globalDialerAccess.Lock()
defer globalDialerAccess.Unlock()
if globalDialerMap == nil {
globalDialerMap = make(map[dialerConf]DialerClient)
2024-06-18 05:36:36 +00:00
}
if isH3 {
dest.Network = net.Network_UDP
}
2024-06-18 05:36:36 +00:00
if client, found := globalDialerMap[dialerConf{dest, streamSettings}]; found {
return client
}
var gotlsConfig *gotls.Config
if tlsConfig != nil {
gotlsConfig = tlsConfig.GetTLSConfig(tls.WithDestination(dest))
}
dialContext := func(ctxInner context.Context) (net.Conn, error) {
conn, err := internet.DialSystem(ctxInner, dest, streamSettings.SocketSettings)
2024-06-18 05:36:36 +00:00
if err != nil {
return nil, err
}
if gotlsConfig != nil {
if fingerprint := tls.GetFingerprint(tlsConfig.Fingerprint); fingerprint != nil {
conn = tls.UClient(conn, gotlsConfig, fingerprint)
if err := conn.(*tls.UConn).HandshakeContext(ctxInner); err != nil {
2024-06-18 05:36:36 +00:00
return nil, err
}
} else {
conn = tls.Client(conn, gotlsConfig)
}
}
return conn, nil
}
var downloadTransport http.RoundTripper
var uploadTransport http.RoundTripper
2024-06-18 05:36:36 +00:00
if isH3 {
quicConfig := &quic.Config{
MaxIdleTimeout: connIdleTimeout,
// these two are defaults of quic-go/http3. the default of quic-go (no
// http3) is different, so it is hardcoded here for clarity.
// https://github.com/quic-go/quic-go/blob/b8ea5c798155950fb5bbfdd06cad1939c9355878/http3/client.go#L36-L39
MaxIncomingStreams: -1,
KeepAlivePeriod: h3KeepalivePeriod,
}
roundTripper := &http3.RoundTripper{
QUICConfig: quicConfig,
TLSClientConfig: gotlsConfig,
Dial: func(ctx context.Context, addr string, tlsCfg *gotls.Config, cfg *quic.Config) (quic.EarlyConnection, error) {
conn, err := internet.DialSystem(ctx, dest, streamSettings.SocketSettings)
if err != nil {
return nil, err
}
var udpConn net.PacketConn
var udpAddr *net.UDPAddr
switch c := conn.(type) {
case *internet.PacketConnWrapper:
var ok bool
udpConn, ok = c.Conn.(*net.UDPConn)
if !ok {
return nil, errors.New("PacketConnWrapper does not contain a UDP connection")
}
udpAddr, err = net.ResolveUDPAddr("udp", c.Dest.String())
if err != nil {
return nil, err
}
case *net.UDPConn:
udpConn = c
udpAddr, err = net.ResolveUDPAddr("udp", c.RemoteAddr().String())
if err != nil {
return nil, err
}
default:
udpConn = &internet.FakePacketConn{c}
udpAddr, err = net.ResolveUDPAddr("udp", c.RemoteAddr().String())
if err != nil {
return nil, err
}
}
return quic.DialEarly(ctx, udpConn, udpAddr, tlsCfg, cfg)
},
}
downloadTransport = roundTripper
uploadTransport = roundTripper
} else if isH2 {
2024-06-18 05:36:36 +00:00
downloadTransport = &http2.Transport{
DialTLSContext: func(ctxInner context.Context, network string, addr string, cfg *gotls.Config) (net.Conn, error) {
return dialContext(ctxInner)
},
IdleConnTimeout: connIdleTimeout,
ReadIdleTimeout: h2KeepalivePeriod,
2024-06-18 05:36:36 +00:00
}
uploadTransport = downloadTransport
} else {
httpDialContext := func(ctxInner context.Context, network string, addr string) (net.Conn, error) {
return dialContext(ctxInner)
}
downloadTransport = &http.Transport{
DialTLSContext: httpDialContext,
DialContext: httpDialContext,
IdleConnTimeout: connIdleTimeout,
2024-06-18 05:36:36 +00:00
// chunked transfer download with keepalives is buggy with
// http.Client and our custom dial context.
DisableKeepAlives: true,
}
// we use uploadRawPool for that
uploadTransport = nil
}
client := &DefaultDialerClient{
transportConfig: streamSettings.ProtocolSettings.(*Config),
2024-06-18 05:36:36 +00:00
download: &http.Client{
Transport: downloadTransport,
},
upload: &http.Client{
Transport: uploadTransport,
},
isH2: isH2,
isH3: isH3,
2024-06-18 05:36:36 +00:00
uploadRawPool: &sync.Pool{},
dialUploadConn: dialContext,
}
globalDialerMap[dialerConf{dest, streamSettings}] = client
return client
}
func init() {
common.Must(internet.RegisterTransportDialer(protocolName, Dial))
}
func Dial(ctx context.Context, dest net.Destination, streamSettings *internet.MemoryStreamConfig) (stat.Connection, error) {
errors.LogInfo(ctx, "dialing splithttp to ", dest)
2024-06-18 05:36:36 +00:00
var requestURL url.URL
transportConfiguration := streamSettings.ProtocolSettings.(*Config)
tlsConfig := tls.ConfigFromStreamSettings(streamSettings)
scMaxConcurrentPosts := transportConfiguration.GetNormalizedScMaxConcurrentPosts()
scMaxEachPostBytes := transportConfiguration.GetNormalizedScMaxEachPostBytes()
scMinPostsIntervalMs := transportConfiguration.GetNormalizedScMinPostsIntervalMs()
2024-06-18 05:36:36 +00:00
if tlsConfig != nil {
requestURL.Scheme = "https"
} else {
requestURL.Scheme = "http"
}
requestURL.Host = transportConfiguration.Host
if requestURL.Host == "" {
requestURL.Host = dest.NetAddr()
}
sessionIdUuid := uuid.New()
requestURL.Path = transportConfiguration.GetNormalizedPath() + sessionIdUuid.String()
requestURL.RawQuery = transportConfiguration.GetNormalizedQuery()
httpClient := getHTTPClient(ctx, dest, streamSettings)
2024-06-18 05:36:36 +00:00
maxUploadSize := scMaxEachPostBytes.roll()
// WithSizeLimit(0) will still allow single bytes to pass, and a lot of
// code relies on this behavior. Subtract 1 so that together with
// uploadWriter wrapper, exact size limits can be enforced
uploadPipeReader, uploadPipeWriter := pipe.New(pipe.WithSizeLimit(maxUploadSize - 1))
2024-06-18 05:36:36 +00:00
go func() {
requestsLimiter := semaphore.New(int(scMaxConcurrentPosts.roll()))
2024-06-18 05:36:36 +00:00
var requestCounter int64
lastWrite := time.Now()
2024-06-18 05:36:36 +00:00
// by offloading the uploads into a buffered pipe, multiple conn.Write
// calls get automatically batched together into larger POST requests.
// without batching, bandwidth is extremely limited.
for {
chunk, err := uploadPipeReader.ReadMultiBuffer()
if err != nil {
break
}
<-requestsLimiter.Wait()
seq := requestCounter
2024-06-18 05:36:36 +00:00
requestCounter += 1
go func() {
defer requestsLimiter.Signal()
// this intentionally makes a shallow-copy of the struct so we
// can reassign Path (potentially concurrently)
url := requestURL
url.Path += "/" + strconv.FormatInt(seq, 10)
// reassign query to get different padding
url.RawQuery = transportConfiguration.GetNormalizedQuery()
err := httpClient.SendUploadRequest(
context.WithoutCancel(ctx),
url.String(),
&buf.MultiBufferContainer{MultiBuffer: chunk},
int64(chunk.Len()),
)
2024-06-18 05:36:36 +00:00
if err != nil {
errors.LogInfoInner(ctx, err, "failed to send upload")
2024-06-18 05:36:36 +00:00
uploadPipeReader.Interrupt()
}
}()
if scMinPostsIntervalMs.From > 0 {
roll := time.Duration(scMinPostsIntervalMs.roll()) * time.Millisecond
if time.Since(lastWrite) < roll {
time.Sleep(roll)
}
lastWrite = time.Now()
}
2024-06-18 05:36:36 +00:00
}
}()
lazyRawDownload, remoteAddr, localAddr, err := httpClient.OpenDownload(context.WithoutCancel(ctx), requestURL.String())
if err != nil {
return nil, err
}
2024-06-18 05:36:36 +00:00
reader := &stripOkReader{ReadCloser: lazyRawDownload}
writer := uploadWriter{
uploadPipeWriter,
maxUploadSize,
}
2024-06-18 05:36:36 +00:00
conn := splitConn{
writer: writer,
reader: reader,
2024-06-18 05:36:36 +00:00
remoteAddr: remoteAddr,
localAddr: localAddr,
}
return stat.Connection(&conn), nil
}
// A wrapper around pipe that ensures the size limit is exactly honored.
//
// The MultiBuffer pipe accepts any single WriteMultiBuffer call even if that
// single MultiBuffer exceeds the size limit, and then starts blocking on the
// next WriteMultiBuffer call. This means that ReadMultiBuffer can return more
// bytes than the size limit. We work around this by splitting a potentially
// too large write up into multiple.
type uploadWriter struct {
*pipe.Writer
maxLen int32
}
func (w uploadWriter) Write(b []byte) (int, error) {
capacity := int(w.maxLen - w.Len())
if capacity > 0 && capacity < len(b) {
b = b[:capacity]
}
buffer := buf.New()
n, err := buffer.Write(b)
if err != nil {
return 0, err
}
err = w.WriteMultiBuffer([]*buf.Buffer{buffer})
if err != nil {
return 0, err
}
return n, nil
}