Files
headscale/integration/embedded_derp_test.go
Kristoffer Dalby bca6e6334d integration: add custom subnet support and fix exit node tests
Add NetworkSpec struct with optional Subnet field to ScenarioSpec.Networks.
When Subnet is set, the Docker network is created with that specific CIDR
instead of Docker's auto-assigned RFC1918 range.

Fix all exit node integration tests to use curl + traceroute. Tailscale
exit nodes strip locally-connected subnets from their forwarding filter
(shrinkDefaultRoute + localInterfaceRoutes), so exit nodes cannot
forward to IPs on their Docker network via the default route alone.
This is by design: exit nodes provide internet access, not LAN access.
To also get LAN access, the subnet must be explicitly advertised as a
route — matching real-world Tailscale deployment requirements.

- TestSubnetRouterMultiNetworkExitNode: advertise usernet1 subnet
  alongside exit route, upgraded from ping to curl + traceroute
- TestGrantViaExitNodeSteering: usernet1 subnet in via grants and
  auto-approvers alongside autogroup:internet
- TestGrantViaMixedSteering: externet subnet in auto-approvers and
  route advertisement for exit traffic

Updates #2180
2026-04-01 14:10:42 +01:00

212 lines
6.5 KiB
Go

package integration
import (
"testing"
"time"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/integrationutil"
"github.com/juanfont/headscale/integration/tsic"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
)
type ClientsSpec struct {
Plain int
WebsocketDERP int
}
func TestDERPServerScenario(t *testing.T) {
spec := ScenarioSpec{
NodesPerUser: 1,
Users: []string{"user1", "user2", "user3"},
Networks: map[string]NetworkSpec{
"usernet1": {Users: []string{"user1"}},
"usernet2": {Users: []string{"user2"}},
"usernet3": {Users: []string{"user3"}},
},
}
derpServerScenario(t, spec, "derp-tcp", false, func(scenario *Scenario) {
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
t.Logf("checking %d clients for websocket connections", len(allClients))
for _, client := range allClients {
if didClientUseWebsocketForDERP(t, client) {
t.Logf(
"client %q used websocket a connection, but was not expected to",
client.Hostname(),
)
t.Fail()
}
}
hsServer, err := scenario.Headscale()
requireNoErrGetHeadscale(t, err)
derpRegion := tailcfg.DERPRegion{
RegionCode: "test-derpverify",
RegionName: "TestDerpVerify",
Nodes: []*tailcfg.DERPNode{
{
Name: "TestDerpVerify",
RegionID: 900,
HostName: hsServer.GetHostname(),
STUNPort: 3478,
STUNOnly: false,
DERPPort: 443,
InsecureForTests: true,
},
},
}
fakeKey := key.NewNode()
DERPVerify(t, fakeKey, derpRegion, false)
})
}
func TestDERPServerWebsocketScenario(t *testing.T) {
spec := ScenarioSpec{
NodesPerUser: 1,
Users: []string{"user1", "user2", "user3"},
Networks: map[string]NetworkSpec{
"usernet1": {Users: []string{"user1"}},
"usernet2": {Users: []string{"user2"}},
"usernet3": {Users: []string{"user3"}},
},
}
derpServerScenario(t, spec, "derp-ws", true, func(scenario *Scenario) {
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
t.Logf("checking %d clients for websocket connections", len(allClients))
for _, client := range allClients {
if !didClientUseWebsocketForDERP(t, client) {
t.Logf(
"client %q does not seem to have used a websocket connection, even though it was expected to do so",
client.Hostname(),
)
t.Fail()
}
}
})
}
// This function implements the common parts of a DERP scenario,
// we *want* it to show up in stacktraces,
// so marking it as a test helper would be counterproductive.
//
//nolint:thelper
func derpServerScenario(
t *testing.T,
spec ScenarioSpec,
testName string,
websocket bool,
furtherAssertions ...func(*Scenario),
) {
IntegrationSkip(t)
scenario, err := NewScenario(spec)
require.NoError(t, err)
defer scenario.ShutdownAssertNoPanics(t)
err = scenario.CreateHeadscaleEnv(
[]tsic.Option{
tsic.WithWebsocketDERP(websocket),
},
hsic.WithTestName(testName),
// Expose STUN port for DERP NAT traversal.
hsic.WithExtraPorts([]string{"3478/udp"}),
// DERP clients expect the server on the standard HTTPS port.
hsic.WithPort(443),
hsic.WithConfigEnv(map[string]string{
"HEADSCALE_DERP_AUTO_UPDATE_ENABLED": "true",
"HEADSCALE_DERP_UPDATE_FREQUENCY": "10s",
"HEADSCALE_LISTEN_ADDR": "0.0.0.0:443",
"HEADSCALE_DERP_SERVER_VERIFY_CLIENTS": "true",
}),
)
requireNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
requireNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
requireNoErrSync(t, err)
allHostnames, err := scenario.ListTailscaleClientsFQDNs()
requireNoErrListFQDN(t, err)
for _, client := range allClients {
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
status, err := client.Status()
assert.NoError(ct, err, "Failed to get status for client %s", client.Hostname())
for _, health := range status.Health {
assert.NotContains(ct, health, "could not connect to any relay server",
"Client %s should be connected to DERP relay", client.Hostname())
assert.NotContains(ct, health, "could not connect to the 'Headscale Embedded DERP' relay server.",
"Client %s should be connected to Headscale Embedded DERP", client.Hostname())
}
}, integrationutil.ScaledTimeout(30*time.Second), 2*time.Second)
}
success := pingDerpAllHelper(t, allClients, allHostnames)
if len(allHostnames)*len(allClients) > success {
t.FailNow()
return
}
for _, client := range allClients {
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
status, err := client.Status()
assert.NoError(ct, err, "Failed to get status for client %s", client.Hostname())
for _, health := range status.Health {
assert.NotContains(ct, health, "could not connect to any relay server",
"Client %s should be connected to DERP relay after first run", client.Hostname())
assert.NotContains(ct, health, "could not connect to the 'Headscale Embedded DERP' relay server.",
"Client %s should be connected to Headscale Embedded DERP after first run", client.Hostname())
}
}, integrationutil.ScaledTimeout(30*time.Second), 2*time.Second)
}
t.Logf("Run 1: %d successful pings out of %d", success, len(allClients)*len(allHostnames))
// Let the DERP updater run a couple of times to ensure it does not
// break the DERPMap. The updater runs on a 10s interval by default.
//nolint:forbidigo // Intentional delay: must wait for DERP updater to run multiple times (interval-based)
time.Sleep(30 * time.Second)
success = pingDerpAllHelper(t, allClients, allHostnames)
if len(allHostnames)*len(allClients) > success {
t.Fail()
}
for _, client := range allClients {
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
status, err := client.Status()
assert.NoError(ct, err, "Failed to get status for client %s", client.Hostname())
for _, health := range status.Health {
assert.NotContains(ct, health, "could not connect to any relay server",
"Client %s should be connected to DERP relay after second run", client.Hostname())
assert.NotContains(ct, health, "could not connect to the 'Headscale Embedded DERP' relay server.",
"Client %s should be connected to Headscale Embedded DERP after second run", client.Hostname())
}
}, integrationutil.ScaledTimeout(30*time.Second), 2*time.Second)
}
t.Logf("Run2: %d successful pings out of %d", success, len(allClients)*len(allHostnames))
for _, check := range furtherAssertions {
check(scenario)
}
}