This commit is contained in:
RPRX 2020-11-25 19:01:53 +08:00
parent 47d23e9972
commit c7f7c08ead
711 changed files with 82154 additions and 2 deletions

View file

@ -0,0 +1,185 @@
package inbound
import (
"context"
"github.com/xtls/xray-core/v1/app/proxyman"
"github.com/xtls/xray-core/v1/common"
"github.com/xtls/xray-core/v1/common/dice"
"github.com/xtls/xray-core/v1/common/errors"
"github.com/xtls/xray-core/v1/common/mux"
"github.com/xtls/xray-core/v1/common/net"
"github.com/xtls/xray-core/v1/core"
"github.com/xtls/xray-core/v1/features/policy"
"github.com/xtls/xray-core/v1/features/stats"
"github.com/xtls/xray-core/v1/proxy"
"github.com/xtls/xray-core/v1/transport/internet"
)
func getStatCounter(v *core.Instance, tag string) (stats.Counter, stats.Counter) {
var uplinkCounter stats.Counter
var downlinkCounter stats.Counter
policy := v.GetFeature(policy.ManagerType()).(policy.Manager)
if len(tag) > 0 && policy.ForSystem().Stats.InboundUplink {
statsManager := v.GetFeature(stats.ManagerType()).(stats.Manager)
name := "inbound>>>" + tag + ">>>traffic>>>uplink"
c, _ := stats.GetOrRegisterCounter(statsManager, name)
if c != nil {
uplinkCounter = c
}
}
if len(tag) > 0 && policy.ForSystem().Stats.InboundDownlink {
statsManager := v.GetFeature(stats.ManagerType()).(stats.Manager)
name := "inbound>>>" + tag + ">>>traffic>>>downlink"
c, _ := stats.GetOrRegisterCounter(statsManager, name)
if c != nil {
downlinkCounter = c
}
}
return uplinkCounter, downlinkCounter
}
type AlwaysOnInboundHandler struct {
proxy proxy.Inbound
workers []worker
mux *mux.Server
tag string
}
func NewAlwaysOnInboundHandler(ctx context.Context, tag string, receiverConfig *proxyman.ReceiverConfig, proxyConfig interface{}) (*AlwaysOnInboundHandler, error) {
rawProxy, err := common.CreateObject(ctx, proxyConfig)
if err != nil {
return nil, err
}
p, ok := rawProxy.(proxy.Inbound)
if !ok {
return nil, newError("not an inbound proxy.")
}
h := &AlwaysOnInboundHandler{
proxy: p,
mux: mux.NewServer(ctx),
tag: tag,
}
uplinkCounter, downlinkCounter := getStatCounter(core.MustFromContext(ctx), tag)
nl := p.Network()
pr := receiverConfig.PortRange
address := receiverConfig.Listen.AsAddress()
if address == nil {
address = net.AnyIP
}
mss, err := internet.ToMemoryStreamConfig(receiverConfig.StreamSettings)
if err != nil {
return nil, newError("failed to parse stream config").Base(err).AtWarning()
}
if receiverConfig.ReceiveOriginalDestination {
if mss.SocketSettings == nil {
mss.SocketSettings = &internet.SocketConfig{}
}
if mss.SocketSettings.Tproxy == internet.SocketConfig_Off {
mss.SocketSettings.Tproxy = internet.SocketConfig_Redirect
}
mss.SocketSettings.ReceiveOriginalDestAddress = true
}
if pr == nil {
if net.HasNetwork(nl, net.Network_UNIX) {
newError("creating unix domain socket worker on ", address).AtDebug().WriteToLog()
worker := &dsWorker{
address: address,
proxy: p,
stream: mss,
tag: tag,
dispatcher: h.mux,
sniffingConfig: receiverConfig.GetEffectiveSniffingSettings(),
uplinkCounter: uplinkCounter,
downlinkCounter: downlinkCounter,
ctx: ctx,
}
h.workers = append(h.workers, worker)
}
}
if pr != nil {
for port := pr.From; port <= pr.To; port++ {
if net.HasNetwork(nl, net.Network_TCP) {
newError("creating stream worker on ", address, ":", port).AtDebug().WriteToLog()
worker := &tcpWorker{
address: address,
port: net.Port(port),
proxy: p,
stream: mss,
recvOrigDest: receiverConfig.ReceiveOriginalDestination,
tag: tag,
dispatcher: h.mux,
sniffingConfig: receiverConfig.GetEffectiveSniffingSettings(),
uplinkCounter: uplinkCounter,
downlinkCounter: downlinkCounter,
ctx: ctx,
}
h.workers = append(h.workers, worker)
}
if net.HasNetwork(nl, net.Network_UDP) {
worker := &udpWorker{
tag: tag,
proxy: p,
address: address,
port: net.Port(port),
dispatcher: h.mux,
uplinkCounter: uplinkCounter,
downlinkCounter: downlinkCounter,
stream: mss,
}
h.workers = append(h.workers, worker)
}
}
}
return h, nil
}
// Start implements common.Runnable.
func (h *AlwaysOnInboundHandler) Start() error {
for _, worker := range h.workers {
if err := worker.Start(); err != nil {
return err
}
}
return nil
}
// Close implements common.Closable.
func (h *AlwaysOnInboundHandler) Close() error {
var errs []error
for _, worker := range h.workers {
errs = append(errs, worker.Close())
}
errs = append(errs, h.mux.Close())
if err := errors.Combine(errs...); err != nil {
return newError("failed to close all resources").Base(err)
}
return nil
}
func (h *AlwaysOnInboundHandler) GetRandomInboundProxy() (interface{}, net.Port, int) {
if len(h.workers) == 0 {
return nil, 0, 0
}
w := h.workers[dice.Roll(len(h.workers))]
return w.Proxy(), w.Port(), 9999
}
func (h *AlwaysOnInboundHandler) Tag() string {
return h.tag
}
func (h *AlwaysOnInboundHandler) GetInbound() proxy.Inbound {
return h.proxy
}

View file

@ -0,0 +1,201 @@
package inbound
import (
"context"
"sync"
"time"
"github.com/xtls/xray-core/v1/app/proxyman"
"github.com/xtls/xray-core/v1/common/dice"
"github.com/xtls/xray-core/v1/common/mux"
"github.com/xtls/xray-core/v1/common/net"
"github.com/xtls/xray-core/v1/common/task"
"github.com/xtls/xray-core/v1/core"
"github.com/xtls/xray-core/v1/proxy"
"github.com/xtls/xray-core/v1/transport/internet"
)
type DynamicInboundHandler struct {
tag string
v *core.Instance
proxyConfig interface{}
receiverConfig *proxyman.ReceiverConfig
streamSettings *internet.MemoryStreamConfig
portMutex sync.Mutex
portsInUse map[net.Port]bool
workerMutex sync.RWMutex
worker []worker
lastRefresh time.Time
mux *mux.Server
task *task.Periodic
ctx context.Context
}
func NewDynamicInboundHandler(ctx context.Context, tag string, receiverConfig *proxyman.ReceiverConfig, proxyConfig interface{}) (*DynamicInboundHandler, error) {
v := core.MustFromContext(ctx)
h := &DynamicInboundHandler{
tag: tag,
proxyConfig: proxyConfig,
receiverConfig: receiverConfig,
portsInUse: make(map[net.Port]bool),
mux: mux.NewServer(ctx),
v: v,
ctx: ctx,
}
mss, err := internet.ToMemoryStreamConfig(receiverConfig.StreamSettings)
if err != nil {
return nil, newError("failed to parse stream settings").Base(err).AtWarning()
}
if receiverConfig.ReceiveOriginalDestination {
if mss.SocketSettings == nil {
mss.SocketSettings = &internet.SocketConfig{}
}
if mss.SocketSettings.Tproxy == internet.SocketConfig_Off {
mss.SocketSettings.Tproxy = internet.SocketConfig_Redirect
}
mss.SocketSettings.ReceiveOriginalDestAddress = true
}
h.streamSettings = mss
h.task = &task.Periodic{
Interval: time.Minute * time.Duration(h.receiverConfig.AllocationStrategy.GetRefreshValue()),
Execute: h.refresh,
}
return h, nil
}
func (h *DynamicInboundHandler) allocatePort() net.Port {
from := int(h.receiverConfig.PortRange.From)
delta := int(h.receiverConfig.PortRange.To) - from + 1
h.portMutex.Lock()
defer h.portMutex.Unlock()
for {
r := dice.Roll(delta)
port := net.Port(from + r)
_, used := h.portsInUse[port]
if !used {
h.portsInUse[port] = true
return port
}
}
}
func (h *DynamicInboundHandler) closeWorkers(workers []worker) {
ports2Del := make([]net.Port, len(workers))
for idx, worker := range workers {
ports2Del[idx] = worker.Port()
if err := worker.Close(); err != nil {
newError("failed to close worker").Base(err).WriteToLog()
}
}
h.portMutex.Lock()
for _, port := range ports2Del {
delete(h.portsInUse, port)
}
h.portMutex.Unlock()
}
func (h *DynamicInboundHandler) refresh() error {
h.lastRefresh = time.Now()
timeout := time.Minute * time.Duration(h.receiverConfig.AllocationStrategy.GetRefreshValue()) * 2
concurrency := h.receiverConfig.AllocationStrategy.GetConcurrencyValue()
workers := make([]worker, 0, concurrency)
address := h.receiverConfig.Listen.AsAddress()
if address == nil {
address = net.AnyIP
}
uplinkCounter, downlinkCounter := getStatCounter(h.v, h.tag)
for i := uint32(0); i < concurrency; i++ {
port := h.allocatePort()
rawProxy, err := core.CreateObject(h.v, h.proxyConfig)
if err != nil {
newError("failed to create proxy instance").Base(err).AtWarning().WriteToLog()
continue
}
p := rawProxy.(proxy.Inbound)
nl := p.Network()
if net.HasNetwork(nl, net.Network_TCP) {
worker := &tcpWorker{
tag: h.tag,
address: address,
port: port,
proxy: p,
stream: h.streamSettings,
recvOrigDest: h.receiverConfig.ReceiveOriginalDestination,
dispatcher: h.mux,
sniffingConfig: h.receiverConfig.GetEffectiveSniffingSettings(),
uplinkCounter: uplinkCounter,
downlinkCounter: downlinkCounter,
ctx: h.ctx,
}
if err := worker.Start(); err != nil {
newError("failed to create TCP worker").Base(err).AtWarning().WriteToLog()
continue
}
workers = append(workers, worker)
}
if net.HasNetwork(nl, net.Network_UDP) {
worker := &udpWorker{
tag: h.tag,
proxy: p,
address: address,
port: port,
dispatcher: h.mux,
uplinkCounter: uplinkCounter,
downlinkCounter: downlinkCounter,
stream: h.streamSettings,
}
if err := worker.Start(); err != nil {
newError("failed to create UDP worker").Base(err).AtWarning().WriteToLog()
continue
}
workers = append(workers, worker)
}
}
h.workerMutex.Lock()
h.worker = workers
h.workerMutex.Unlock()
time.AfterFunc(timeout, func() {
h.closeWorkers(workers)
})
return nil
}
func (h *DynamicInboundHandler) Start() error {
return h.task.Start()
}
func (h *DynamicInboundHandler) Close() error {
return h.task.Close()
}
func (h *DynamicInboundHandler) GetRandomInboundProxy() (interface{}, net.Port, int) {
h.workerMutex.RLock()
defer h.workerMutex.RUnlock()
if len(h.worker) == 0 {
return nil, 0, 0
}
w := h.worker[dice.Roll(len(h.worker))]
expire := h.receiverConfig.AllocationStrategy.GetRefreshValue() - uint32(time.Since(h.lastRefresh)/time.Minute)
return w.Proxy(), w.Port(), int(expire)
}
func (h *DynamicInboundHandler) Tag() string {
return h.tag
}

View file

@ -0,0 +1,9 @@
package inbound
import "github.com/xtls/xray-core/v1/common/errors"
type errPathObjHolder struct{}
func newError(values ...interface{}) *errors.Error {
return errors.New(values...).WithPathObj(errPathObjHolder{})
}

View file

@ -0,0 +1,178 @@
package inbound
//go:generate go run github.com/xtls/xray-core/v1/common/errors/errorgen
import (
"context"
"sync"
"github.com/xtls/xray-core/v1/app/proxyman"
"github.com/xtls/xray-core/v1/common"
"github.com/xtls/xray-core/v1/common/serial"
"github.com/xtls/xray-core/v1/common/session"
"github.com/xtls/xray-core/v1/core"
"github.com/xtls/xray-core/v1/features/inbound"
)
// Manager is to manage all inbound handlers.
type Manager struct {
access sync.RWMutex
untaggedHandler []inbound.Handler
taggedHandlers map[string]inbound.Handler
running bool
}
// New returns a new Manager for inbound handlers.
func New(ctx context.Context, config *proxyman.InboundConfig) (*Manager, error) {
m := &Manager{
taggedHandlers: make(map[string]inbound.Handler),
}
return m, nil
}
// Type implements common.HasType.
func (*Manager) Type() interface{} {
return inbound.ManagerType()
}
// AddHandler implements inbound.Manager.
func (m *Manager) AddHandler(ctx context.Context, handler inbound.Handler) error {
m.access.Lock()
defer m.access.Unlock()
tag := handler.Tag()
if len(tag) > 0 {
m.taggedHandlers[tag] = handler
} else {
m.untaggedHandler = append(m.untaggedHandler, handler)
}
if m.running {
return handler.Start()
}
return nil
}
// GetHandler implements inbound.Manager.
func (m *Manager) GetHandler(ctx context.Context, tag string) (inbound.Handler, error) {
m.access.RLock()
defer m.access.RUnlock()
handler, found := m.taggedHandlers[tag]
if !found {
return nil, newError("handler not found: ", tag)
}
return handler, nil
}
// RemoveHandler implements inbound.Manager.
func (m *Manager) RemoveHandler(ctx context.Context, tag string) error {
if tag == "" {
return common.ErrNoClue
}
m.access.Lock()
defer m.access.Unlock()
if handler, found := m.taggedHandlers[tag]; found {
if err := handler.Close(); err != nil {
newError("failed to close handler ", tag).Base(err).AtWarning().WriteToLog(session.ExportIDToError(ctx))
}
delete(m.taggedHandlers, tag)
return nil
}
return common.ErrNoClue
}
// Start implements common.Runnable.
func (m *Manager) Start() error {
m.access.Lock()
defer m.access.Unlock()
m.running = true
for _, handler := range m.taggedHandlers {
if err := handler.Start(); err != nil {
return err
}
}
for _, handler := range m.untaggedHandler {
if err := handler.Start(); err != nil {
return err
}
}
return nil
}
// Close implements common.Closable.
func (m *Manager) Close() error {
m.access.Lock()
defer m.access.Unlock()
m.running = false
var errors []interface{}
for _, handler := range m.taggedHandlers {
if err := handler.Close(); err != nil {
errors = append(errors, err)
}
}
for _, handler := range m.untaggedHandler {
if err := handler.Close(); err != nil {
errors = append(errors, err)
}
}
if len(errors) > 0 {
return newError("failed to close all handlers").Base(newError(serial.Concat(errors...)))
}
return nil
}
// NewHandler creates a new inbound.Handler based on the given config.
func NewHandler(ctx context.Context, config *core.InboundHandlerConfig) (inbound.Handler, error) {
rawReceiverSettings, err := config.ReceiverSettings.GetInstance()
if err != nil {
return nil, err
}
proxySettings, err := config.ProxySettings.GetInstance()
if err != nil {
return nil, err
}
tag := config.Tag
receiverSettings, ok := rawReceiverSettings.(*proxyman.ReceiverConfig)
if !ok {
return nil, newError("not a ReceiverConfig").AtError()
}
streamSettings := receiverSettings.StreamSettings
if streamSettings != nil && streamSettings.SocketSettings != nil {
ctx = session.ContextWithSockopt(ctx, &session.Sockopt{
Mark: streamSettings.SocketSettings.Mark,
})
}
allocStrategy := receiverSettings.AllocationStrategy
if allocStrategy == nil || allocStrategy.Type == proxyman.AllocationStrategy_Always {
return NewAlwaysOnInboundHandler(ctx, tag, receiverSettings, proxySettings)
}
if allocStrategy.Type == proxyman.AllocationStrategy_Random {
return NewDynamicInboundHandler(ctx, tag, receiverSettings, proxySettings)
}
return nil, newError("unknown allocation strategy: ", receiverSettings.AllocationStrategy.Type).AtError()
}
func init() {
common.Must(common.RegisterConfig((*proxyman.InboundConfig)(nil), func(ctx context.Context, config interface{}) (interface{}, error) {
return New(ctx, config.(*proxyman.InboundConfig))
}))
common.Must(common.RegisterConfig((*core.InboundHandlerConfig)(nil), func(ctx context.Context, config interface{}) (interface{}, error) {
return NewHandler(ctx, config.(*core.InboundHandlerConfig))
}))
}

View file

@ -0,0 +1,483 @@
package inbound
import (
"context"
"sync"
"sync/atomic"
"time"
"github.com/xtls/xray-core/v1/app/proxyman"
"github.com/xtls/xray-core/v1/common"
"github.com/xtls/xray-core/v1/common/buf"
"github.com/xtls/xray-core/v1/common/net"
"github.com/xtls/xray-core/v1/common/serial"
"github.com/xtls/xray-core/v1/common/session"
"github.com/xtls/xray-core/v1/common/signal/done"
"github.com/xtls/xray-core/v1/common/task"
"github.com/xtls/xray-core/v1/features/routing"
"github.com/xtls/xray-core/v1/features/stats"
"github.com/xtls/xray-core/v1/proxy"
"github.com/xtls/xray-core/v1/transport/internet"
"github.com/xtls/xray-core/v1/transport/internet/tcp"
"github.com/xtls/xray-core/v1/transport/internet/udp"
"github.com/xtls/xray-core/v1/transport/pipe"
)
type worker interface {
Start() error
Close() error
Port() net.Port
Proxy() proxy.Inbound
}
type tcpWorker struct {
address net.Address
port net.Port
proxy proxy.Inbound
stream *internet.MemoryStreamConfig
recvOrigDest bool
tag string
dispatcher routing.Dispatcher
sniffingConfig *proxyman.SniffingConfig
uplinkCounter stats.Counter
downlinkCounter stats.Counter
hub internet.Listener
ctx context.Context
}
func getTProxyType(s *internet.MemoryStreamConfig) internet.SocketConfig_TProxyMode {
if s == nil || s.SocketSettings == nil {
return internet.SocketConfig_Off
}
return s.SocketSettings.Tproxy
}
func (w *tcpWorker) callback(conn internet.Connection) {
ctx, cancel := context.WithCancel(w.ctx)
sid := session.NewID()
ctx = session.ContextWithID(ctx, sid)
if w.recvOrigDest {
var dest net.Destination
switch getTProxyType(w.stream) {
case internet.SocketConfig_Redirect:
d, err := tcp.GetOriginalDestination(conn)
if err != nil {
newError("failed to get original destination").Base(err).WriteToLog(session.ExportIDToError(ctx))
} else {
dest = d
}
case internet.SocketConfig_TProxy:
dest = net.DestinationFromAddr(conn.LocalAddr())
}
if dest.IsValid() {
ctx = session.ContextWithOutbound(ctx, &session.Outbound{
Target: dest,
})
}
}
ctx = session.ContextWithInbound(ctx, &session.Inbound{
Source: net.DestinationFromAddr(conn.RemoteAddr()),
Gateway: net.TCPDestination(w.address, w.port),
Tag: w.tag,
})
content := new(session.Content)
if w.sniffingConfig != nil {
content.SniffingRequest.Enabled = w.sniffingConfig.Enabled
content.SniffingRequest.OverrideDestinationForProtocol = w.sniffingConfig.DestinationOverride
}
ctx = session.ContextWithContent(ctx, content)
if w.uplinkCounter != nil || w.downlinkCounter != nil {
conn = &internet.StatCouterConnection{
Connection: conn,
ReadCounter: w.uplinkCounter,
WriteCounter: w.downlinkCounter,
}
}
if err := w.proxy.Process(ctx, net.Network_TCP, conn, w.dispatcher); err != nil {
newError("connection ends").Base(err).WriteToLog(session.ExportIDToError(ctx))
}
cancel()
if err := conn.Close(); err != nil {
newError("failed to close connection").Base(err).WriteToLog(session.ExportIDToError(ctx))
}
}
func (w *tcpWorker) Proxy() proxy.Inbound {
return w.proxy
}
func (w *tcpWorker) Start() error {
ctx := context.Background()
hub, err := internet.ListenTCP(ctx, w.address, w.port, w.stream, func(conn internet.Connection) {
go w.callback(conn)
})
if err != nil {
return newError("failed to listen TCP on ", w.port).AtWarning().Base(err)
}
w.hub = hub
return nil
}
func (w *tcpWorker) Close() error {
var errors []interface{}
if w.hub != nil {
if err := common.Close(w.hub); err != nil {
errors = append(errors, err)
}
if err := common.Close(w.proxy); err != nil {
errors = append(errors, err)
}
}
if len(errors) > 0 {
return newError("failed to close all resources").Base(newError(serial.Concat(errors...)))
}
return nil
}
func (w *tcpWorker) Port() net.Port {
return w.port
}
type udpConn struct {
lastActivityTime int64 // in seconds
reader buf.Reader
writer buf.Writer
output func([]byte) (int, error)
remote net.Addr
local net.Addr
done *done.Instance
uplink stats.Counter
downlink stats.Counter
}
func (c *udpConn) updateActivity() {
atomic.StoreInt64(&c.lastActivityTime, time.Now().Unix())
}
// ReadMultiBuffer implements buf.Reader
func (c *udpConn) ReadMultiBuffer() (buf.MultiBuffer, error) {
mb, err := c.reader.ReadMultiBuffer()
if err != nil {
return nil, err
}
c.updateActivity()
if c.uplink != nil {
c.uplink.Add(int64(mb.Len()))
}
return mb, nil
}
func (c *udpConn) Read(buf []byte) (int, error) {
panic("not implemented")
}
// Write implements io.Writer.
func (c *udpConn) Write(buf []byte) (int, error) {
n, err := c.output(buf)
if c.downlink != nil {
c.downlink.Add(int64(n))
}
if err == nil {
c.updateActivity()
}
return n, err
}
func (c *udpConn) Close() error {
common.Must(c.done.Close())
common.Must(common.Close(c.writer))
return nil
}
func (c *udpConn) RemoteAddr() net.Addr {
return c.remote
}
func (c *udpConn) LocalAddr() net.Addr {
return c.local
}
func (*udpConn) SetDeadline(time.Time) error {
return nil
}
func (*udpConn) SetReadDeadline(time.Time) error {
return nil
}
func (*udpConn) SetWriteDeadline(time.Time) error {
return nil
}
type connID struct {
src net.Destination
dest net.Destination
}
type udpWorker struct {
sync.RWMutex
proxy proxy.Inbound
hub *udp.Hub
address net.Address
port net.Port
tag string
stream *internet.MemoryStreamConfig
dispatcher routing.Dispatcher
uplinkCounter stats.Counter
downlinkCounter stats.Counter
checker *task.Periodic
activeConn map[connID]*udpConn
}
func (w *udpWorker) getConnection(id connID) (*udpConn, bool) {
w.Lock()
defer w.Unlock()
if conn, found := w.activeConn[id]; found && !conn.done.Done() {
return conn, true
}
pReader, pWriter := pipe.New(pipe.DiscardOverflow(), pipe.WithSizeLimit(16*1024))
conn := &udpConn{
reader: pReader,
writer: pWriter,
output: func(b []byte) (int, error) {
return w.hub.WriteTo(b, id.src)
},
remote: &net.UDPAddr{
IP: id.src.Address.IP(),
Port: int(id.src.Port),
},
local: &net.UDPAddr{
IP: w.address.IP(),
Port: int(w.port),
},
done: done.New(),
uplink: w.uplinkCounter,
downlink: w.downlinkCounter,
}
w.activeConn[id] = conn
conn.updateActivity()
return conn, false
}
func (w *udpWorker) callback(b *buf.Buffer, source net.Destination, originalDest net.Destination) {
id := connID{
src: source,
}
if originalDest.IsValid() {
id.dest = originalDest
}
conn, existing := w.getConnection(id)
// payload will be discarded in pipe is full.
conn.writer.WriteMultiBuffer(buf.MultiBuffer{b})
if !existing {
common.Must(w.checker.Start())
go func() {
ctx := context.Background()
sid := session.NewID()
ctx = session.ContextWithID(ctx, sid)
if originalDest.IsValid() {
ctx = session.ContextWithOutbound(ctx, &session.Outbound{
Target: originalDest,
})
}
ctx = session.ContextWithInbound(ctx, &session.Inbound{
Source: source,
Gateway: net.UDPDestination(w.address, w.port),
Tag: w.tag,
})
if err := w.proxy.Process(ctx, net.Network_UDP, conn, w.dispatcher); err != nil {
newError("connection ends").Base(err).WriteToLog(session.ExportIDToError(ctx))
}
conn.Close()
w.removeConn(id)
}()
}
}
func (w *udpWorker) removeConn(id connID) {
w.Lock()
delete(w.activeConn, id)
w.Unlock()
}
func (w *udpWorker) handlePackets() {
receive := w.hub.Receive()
for payload := range receive {
w.callback(payload.Payload, payload.Source, payload.Target)
}
}
func (w *udpWorker) clean() error {
nowSec := time.Now().Unix()
w.Lock()
defer w.Unlock()
if len(w.activeConn) == 0 {
return newError("no more connections. stopping...")
}
for addr, conn := range w.activeConn {
if nowSec-atomic.LoadInt64(&conn.lastActivityTime) > 8 { // TODO Timeout too small
delete(w.activeConn, addr)
conn.Close()
}
}
if len(w.activeConn) == 0 {
w.activeConn = make(map[connID]*udpConn, 16)
}
return nil
}
func (w *udpWorker) Start() error {
w.activeConn = make(map[connID]*udpConn, 16)
ctx := context.Background()
h, err := udp.ListenUDP(ctx, w.address, w.port, w.stream, udp.HubCapacity(256))
if err != nil {
return err
}
w.checker = &task.Periodic{
Interval: time.Second * 16,
Execute: w.clean,
}
w.hub = h
go w.handlePackets()
return nil
}
func (w *udpWorker) Close() error {
w.Lock()
defer w.Unlock()
var errors []interface{}
if w.hub != nil {
if err := w.hub.Close(); err != nil {
errors = append(errors, err)
}
}
if w.checker != nil {
if err := w.checker.Close(); err != nil {
errors = append(errors, err)
}
}
if err := common.Close(w.proxy); err != nil {
errors = append(errors, err)
}
if len(errors) > 0 {
return newError("failed to close all resources").Base(newError(serial.Concat(errors...)))
}
return nil
}
func (w *udpWorker) Port() net.Port {
return w.port
}
func (w *udpWorker) Proxy() proxy.Inbound {
return w.proxy
}
type dsWorker struct {
address net.Address
proxy proxy.Inbound
stream *internet.MemoryStreamConfig
tag string
dispatcher routing.Dispatcher
sniffingConfig *proxyman.SniffingConfig
uplinkCounter stats.Counter
downlinkCounter stats.Counter
hub internet.Listener
ctx context.Context
}
func (w *dsWorker) callback(conn internet.Connection) {
ctx, cancel := context.WithCancel(w.ctx)
sid := session.NewID()
ctx = session.ContextWithID(ctx, sid)
ctx = session.ContextWithInbound(ctx, &session.Inbound{
Source: net.DestinationFromAddr(conn.RemoteAddr()),
Gateway: net.UnixDestination(w.address),
Tag: w.tag,
})
content := new(session.Content)
if w.sniffingConfig != nil {
content.SniffingRequest.Enabled = w.sniffingConfig.Enabled
content.SniffingRequest.OverrideDestinationForProtocol = w.sniffingConfig.DestinationOverride
}
ctx = session.ContextWithContent(ctx, content)
if w.uplinkCounter != nil || w.downlinkCounter != nil {
conn = &internet.StatCouterConnection{
Connection: conn,
ReadCounter: w.uplinkCounter,
WriteCounter: w.downlinkCounter,
}
}
if err := w.proxy.Process(ctx, net.Network_UNIX, conn, w.dispatcher); err != nil {
newError("connection ends").Base(err).WriteToLog(session.ExportIDToError(ctx))
}
cancel()
if err := conn.Close(); err != nil {
newError("failed to close connection").Base(err).WriteToLog(session.ExportIDToError(ctx))
}
}
func (w *dsWorker) Proxy() proxy.Inbound {
return w.proxy
}
func (w *dsWorker) Port() net.Port {
return net.Port(0)
}
func (w *dsWorker) Start() error {
ctx := context.Background()
hub, err := internet.ListenUnix(ctx, w.address, w.stream, func(conn internet.Connection) {
go w.callback(conn)
})
if err != nil {
return newError("failed to listen Unix Domain Socket on ", w.address).AtWarning().Base(err)
}
w.hub = hub
return nil
}
func (w *dsWorker) Close() error {
var errors []interface{}
if w.hub != nil {
if err := common.Close(w.hub); err != nil {
errors = append(errors, err)
}
if err := common.Close(w.proxy); err != nil {
errors = append(errors, err)
}
}
if len(errors) > 0 {
return newError("failed to close all resources").Base(newError(serial.Concat(errors...)))
}
return nil
}