Skip to content

fix: solve the problem that DNS dialer not perform switching & udp traffic stucks #782

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
May 23, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 26 additions & 9 deletions control/dns_control.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import (
"strconv"
"strings"
"sync"
"sync/atomic"
"time"

"github.com/daeuniverse/dae/common/consts"
Expand Down Expand Up @@ -79,7 +80,12 @@ type DnsController struct {
dnsCacheMu sync.Mutex
dnsCache map[string]*DnsCache
dnsForwarderCacheMu sync.Mutex
dnsForwarderCache map[string]DnsForwarder
dnsForwarderCache map[dnsForwarderKey]DnsForwarder
}

type handlingState struct {
mu sync.Mutex
ref uint32
}

func parseIpVersionPreference(prefer int) (uint16, error) {
Expand Down Expand Up @@ -117,7 +123,7 @@ func NewDnsController(routing *dns.Dns, option *DnsControllerOption) (c *DnsCont
dnsCacheMu: sync.Mutex{},
dnsCache: make(map[string]*DnsCache),
dnsForwarderCacheMu: sync.Mutex{},
dnsForwarderCache: make(map[string]DnsForwarder),
dnsForwarderCache: make(map[dnsForwarderKey]DnsForwarder),
}, nil
}

Expand Down Expand Up @@ -346,6 +352,11 @@ type dialArgument struct {
mptcp bool
}

type dnsForwarderKey struct {
upstream string
dialArgument dialArgument
}

func (c *DnsController) Handle_(dnsMessage *dnsmessage.Msg, req *udpRequest) (err error) {
if c.log.IsLevelEnabled(logrus.TraceLevel) && len(dnsMessage.Question) > 0 {
q := dnsMessage.Question[0]
Expand Down Expand Up @@ -448,11 +459,17 @@ func (c *DnsController) handle_(
}

// No parallel for the same lookup.
_mu, _ := c.handling.LoadOrStore(cacheKey, new(sync.Mutex))
mu := _mu.(*sync.Mutex)
mu.Lock()
defer mu.Unlock()
defer c.handling.Delete(cacheKey)
handlingState_, _ := c.handling.LoadOrStore(cacheKey, new(handlingState))
handlingState := handlingState_.(*handlingState)
atomic.AddUint32(&handlingState.ref, 1)
handlingState.mu.Lock()
defer func() {
handlingState.mu.Unlock()
atomic.AddUint32(&handlingState.ref, ^uint32(0))
if atomic.LoadUint32(&handlingState.ref) == 0 {
c.handling.Delete(cacheKey)
}
}()

if resp := c.LookupDnsRespCache_(dnsMessage, cacheKey, false); resp != nil {
// Send cache to client directly.
Expand Down Expand Up @@ -562,14 +579,14 @@ func (c *DnsController) dialSend(invokingDepth int, req *udpRequest, data []byte

// get forwarder from cache
c.dnsForwarderCacheMu.Lock()
forwarder, ok := c.dnsForwarderCache[upstreamName]
forwarder, ok := c.dnsForwarderCache[dnsForwarderKey{upstream: upstream.String(), dialArgument: *dialArgument}]
if !ok {
forwarder, err = newDnsForwarder(upstream, *dialArgument)
if err != nil {
c.dnsForwarderCacheMu.Unlock()
return err
}
c.dnsForwarderCache[upstreamName] = forwarder
c.dnsForwarderCache[dnsForwarderKey{upstream: upstream.String(), dialArgument: *dialArgument}] = forwarder
}
c.dnsForwarderCacheMu.Unlock()

Expand Down
80 changes: 25 additions & 55 deletions control/udp_task_pool.go
Original file line number Diff line number Diff line change
@@ -1,15 +1,11 @@
package control

import (
"context"
"sync"
"sync/atomic"
"time"

ants "github.com/panjf2000/ants/v2"
)

var isTest = false

const UdpTaskQueueLength = 128

type UdpTask = func()
Expand All @@ -21,35 +17,19 @@ type UdpTaskQueue struct {
ch chan UdpTask
timer *time.Timer
agingTime time.Duration
closed atomic.Bool
freed chan struct{}
}

func (q *UdpTaskQueue) Push(task UdpTask) {
q.timer.Reset(q.agingTime)
q.ch <- task
ctx context.Context
closed chan struct{}
}

func (q *UdpTaskQueue) convoy() {
for {
if q.closed.Load() {
clearloop:
for {
select {
case t := <-q.ch:
// Emit it back due to closed q.
ReemitWorkers.Submit(func() {
q.p.EmitTask(q.key, t)
})
default:
break clearloop
}
}
close(q.freed)
select {
case <-q.ctx.Done():
close(q.closed)
return
} else {
t := <-q.ch
t()
case task := <-q.ch:
task()
q.timer.Reset(q.agingTime)
}
}
}
Expand Down Expand Up @@ -78,49 +58,39 @@ func (p *UdpTaskPool) EmitTask(key string, task UdpTask) {
q, ok := p.m[key]
if !ok {
ch := p.queueChPool.Get().(chan UdpTask)
ctx, cancel := context.WithCancel(context.Background())
q = &UdpTaskQueue{
key: key,
p: p,
ch: ch,
timer: nil,
agingTime: DefaultNatTimeout,
closed: atomic.Bool{},
freed: make(chan struct{}),
ctx: ctx,
closed: make(chan struct{}),
}
q.timer = time.AfterFunc(q.agingTime, func() {
// This func may be invoked twice due to concurrent Reset.
if !q.closed.CompareAndSwap(false, true) {
return
}
if isTest {
time.Sleep(3 * time.Microsecond)
}
// if timer executed, there should no task in queue.
// q.closed should not blocking things.
p.mu.Lock()
defer p.mu.Unlock()
if p.m[key] == q {
delete(p.m, key)
cancel()
delete(p.m, key)
p.mu.Unlock()
<-q.closed
if len(ch) == 0 { // Otherwise let it be GCed
p.queueChPool.Put(ch)
}
// Trigger next loop in func convoy
q.ch <- func() {}
<-q.freed
p.queueChPool.Put(ch)
})
p.m[key] = q
go q.convoy()
}
p.mu.Unlock()
q.Push(task)
// if task cannot be executed within 180s(DefaultNatTimeout), GC may be triggered, so skip the task when GC occurs
select {
case q.ch <- task:
case <-q.ctx.Done():
}
}

var (
DefaultUdpTaskPool = NewUdpTaskPool()
ReemitWorkers *ants.Pool
)

func init() {
var err error
ReemitWorkers, err = ants.NewPool(UdpTaskQueueLength/2, ants.WithExpiryDuration(AnyfromTimeout))
if err != nil {
panic(err)
}
}
10 changes: 6 additions & 4 deletions control/udp_task_pool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,18 +13,20 @@ import (
"github.com/stretchr/testify/require"
)

// Should run successfully in less than 3.2 seconds.
func TestUdpTaskPool(t *testing.T) {
isTest = true
c, err := cpu.Times(false)
require.NoError(t, err)
t.Log(c)
DefaultNatTimeout = 1000 * time.Microsecond
for i := 0; i < 100; i++ {
DefaultUdpTaskPool.EmitTask("testkey", func() {
})
DefaultUdpTaskPool.EmitTask("testkey", func() { time.Sleep(100 * time.Microsecond) })
time.Sleep(99 * time.Microsecond)
}
time.Sleep(5 * time.Second)
time.Sleep(1 * time.Second)
DefaultUdpTaskPool.EmitTask("testkey", func() { time.Sleep(100 * time.Second) })
time.Sleep(2 * time.Second)
DefaultUdpTaskPool.EmitTask("testkey", func() { time.Sleep(100 * time.Second) })
c, err = cpu.Times(false)
require.NoError(t, err)
t.Log(c)
Expand Down
1 change: 0 additions & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ require (
github.com/miekg/dns v1.1.61
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826
github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd
github.com/panjf2000/ants/v2 v2.0.0
github.com/safchain/ethtool v0.4.1
github.com/shirou/gopsutil/v4 v4.24.6
github.com/sirupsen/logrus v1.9.3
Expand Down
2 changes: 0 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -150,8 +150,6 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
github.com/panjf2000/ants/v2 v2.0.0 h1:MvUd+EfTcLl9l8Mh6nQkMQaE4cLAewd3bv97ajOyldQ=
github.com/panjf2000/ants/v2 v2.0.0/go.mod h1:1GFm8bV8nyCQvU5K4WvBCTG1/YBFOD2VzjffD8fV55A=
github.com/pierrec/lz4/v4 v4.1.2/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
Expand Down
Loading