mirror of
				https://github.com/juanfont/headscale.git
				synced 2025-10-31 04:57:45 +09:00 
			
		
		
		
	feat: add autogroup:self (#2789)
This commit is contained in:
		
							
								
								
									
										2
									
								
								.github/workflows/test-integration.yaml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/test-integration.yaml
									
									
									
									
										vendored
									
									
								
							| @@ -23,6 +23,7 @@ jobs: | |||||||
|           - TestPolicyUpdateWhileRunningWithCLIInDatabase |           - TestPolicyUpdateWhileRunningWithCLIInDatabase | ||||||
|           - TestACLAutogroupMember |           - TestACLAutogroupMember | ||||||
|           - TestACLAutogroupTagged |           - TestACLAutogroupTagged | ||||||
|  |           - TestACLAutogroupSelf | ||||||
|           - TestAuthKeyLogoutAndReloginSameUser |           - TestAuthKeyLogoutAndReloginSameUser | ||||||
|           - TestAuthKeyLogoutAndReloginNewUser |           - TestAuthKeyLogoutAndReloginNewUser | ||||||
|           - TestAuthKeyLogoutAndReloginSameUserExpiredKey |           - TestAuthKeyLogoutAndReloginSameUserExpiredKey | ||||||
| @@ -82,6 +83,7 @@ jobs: | |||||||
|           - TestSSHNoSSHConfigured |           - TestSSHNoSSHConfigured | ||||||
|           - TestSSHIsBlockedInACL |           - TestSSHIsBlockedInACL | ||||||
|           - TestSSHUserOnlyIsolation |           - TestSSHUserOnlyIsolation | ||||||
|  |           - TestSSHAutogroupSelf | ||||||
|     uses: ./.github/workflows/integration-test-template.yml |     uses: ./.github/workflows/integration-test-template.yml | ||||||
|     with: |     with: | ||||||
|       test: ${{ matrix.test }} |       test: ${{ matrix.test }} | ||||||
|   | |||||||
| @@ -95,6 +95,8 @@ upstream is changed. | |||||||
|   [#2764](https://github.com/juanfont/headscale/pull/2764) |   [#2764](https://github.com/juanfont/headscale/pull/2764) | ||||||
| - Add FAQ entry on how to recover from an invalid policy in the database | - Add FAQ entry on how to recover from an invalid policy in the database | ||||||
|   [#2776](https://github.com/juanfont/headscale/pull/2776) |   [#2776](https://github.com/juanfont/headscale/pull/2776) | ||||||
|  | - EXPERIMENTAL: Add support for `autogroup:self`  | ||||||
|  |   [#2789](https://github.com/juanfont/headscale/pull/2789) | ||||||
|  |  | ||||||
| ## 0.26.1 (2025-06-06) | ## 0.26.1 (2025-06-06) | ||||||
|  |  | ||||||
| @@ -252,6 +254,7 @@ working in v1 and not tested might be broken in v2 (and vice versa). | |||||||
| - Add documentation for routes | - Add documentation for routes | ||||||
|   [#2496](https://github.com/juanfont/headscale/pull/2496) |   [#2496](https://github.com/juanfont/headscale/pull/2496) | ||||||
|  |  | ||||||
|  |  | ||||||
| ## 0.25.1 (2025-02-25) | ## 0.25.1 (2025-02-25) | ||||||
|  |  | ||||||
| ### Changes | ### Changes | ||||||
|   | |||||||
| @@ -23,7 +23,7 @@ provides on overview of Headscale's feature and compatibility with the Tailscale | |||||||
| - [x] Access control lists ([GitHub label "policy"](https://github.com/juanfont/headscale/labels/policy%20%F0%9F%93%9D)) | - [x] Access control lists ([GitHub label "policy"](https://github.com/juanfont/headscale/labels/policy%20%F0%9F%93%9D)) | ||||||
|     - [x] ACL management via API |     - [x] ACL management via API | ||||||
|     - [x] Some [Autogroups](https://tailscale.com/kb/1396/targets#autogroups), currently: `autogroup:internet`, |     - [x] Some [Autogroups](https://tailscale.com/kb/1396/targets#autogroups), currently: `autogroup:internet`, | ||||||
|       `autogroup:nonroot`, `autogroup:member`, `autogroup:tagged` |       `autogroup:nonroot`, `autogroup:member`, `autogroup:tagged`, `autogroup:self` | ||||||
|     - [x] [Auto approvers](https://tailscale.com/kb/1337/acl-syntax#auto-approvers) for [subnet |     - [x] [Auto approvers](https://tailscale.com/kb/1337/acl-syntax#auto-approvers) for [subnet | ||||||
|       routers](../ref/routes.md#automatically-approve-routes-of-a-subnet-router) and [exit |       routers](../ref/routes.md#automatically-approve-routes-of-a-subnet-router) and [exit | ||||||
|       nodes](../ref/routes.md#automatically-approve-an-exit-node-with-auto-approvers) |       nodes](../ref/routes.md#automatically-approve-an-exit-node-with-auto-approvers) | ||||||
|   | |||||||
| @@ -194,13 +194,93 @@ Here are the ACL's to implement the same permissions as above: | |||||||
|       "dst": ["tag:dev-app-servers:80,443"] |       "dst": ["tag:dev-app-servers:80,443"] | ||||||
|     }, |     }, | ||||||
|  |  | ||||||
|     // We still have to allow internal users communications since nothing guarantees that each user have |     // Allow users to access their own devices using autogroup:self (see below for more details about performance impact) | ||||||
|     // their own users. |     { | ||||||
|     { "action": "accept", "src": ["boss@"], "dst": ["boss@:*"] }, |       "action": "accept", | ||||||
|     { "action": "accept", "src": ["dev1@"], "dst": ["dev1@:*"] }, |       "src": ["autogroup:member"], | ||||||
|     { "action": "accept", "src": ["dev2@"], "dst": ["dev2@:*"] }, |       "dst": ["autogroup:self:*"] | ||||||
|     { "action": "accept", "src": ["admin1@"], "dst": ["admin1@:*"] }, |     } | ||||||
|     { "action": "accept", "src": ["intern1@"], "dst": ["intern1@:*"] } |  | ||||||
|   ] |   ] | ||||||
| } | } | ||||||
| ``` | ``` | ||||||
|  |  | ||||||
|  | ## Autogroups | ||||||
|  |  | ||||||
|  | Headscale supports several autogroups that automatically include users, destinations, or devices with specific properties. Autogroups provide a convenient way to write ACL rules without manually listing individual users or devices. | ||||||
|  |  | ||||||
|  | ### `autogroup:internet` | ||||||
|  |  | ||||||
|  | Allows access to the internet through [exit nodes](routes.md#exit-node). Can only be used in ACL destinations.   | ||||||
|  |  | ||||||
|  | ```json | ||||||
|  | { | ||||||
|  |   "action": "accept", | ||||||
|  |   "src": ["group:users"], | ||||||
|  |   "dst": ["autogroup:internet:*"] | ||||||
|  | } | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | ### `autogroup:member` | ||||||
|  |  | ||||||
|  | Includes all users who are direct members of the tailnet. Does not include users from shared devices. | ||||||
|  |  | ||||||
|  | ```json | ||||||
|  | { | ||||||
|  |   "action": "accept", | ||||||
|  |   "src": ["autogroup:member"], | ||||||
|  |   "dst": ["tag:prod-app-servers:80,443"] | ||||||
|  | } | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | ### `autogroup:tagged` | ||||||
|  |  | ||||||
|  | Includes all devices that have at least one tag. | ||||||
|  |  | ||||||
|  | ```json | ||||||
|  | { | ||||||
|  |   "action": "accept", | ||||||
|  |   "src": ["autogroup:tagged"], | ||||||
|  |   "dst": ["tag:monitoring:9090"] | ||||||
|  | } | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | ### `autogroup:self`  | ||||||
|  | **(EXPERIMENTAL)** | ||||||
|  |  | ||||||
|  | !!! warning "The current implementation of `autogroup:self` is inefficient"   | ||||||
|  |  | ||||||
|  | Includes devices where the same user is authenticated on both the source and destination. Does not include tagged devices. Can only be used in ACL destinations. | ||||||
|  |  | ||||||
|  | ```json | ||||||
|  | { | ||||||
|  |   "action": "accept", | ||||||
|  |   "src": ["autogroup:member"], | ||||||
|  |   "dst": ["autogroup:self:*"] | ||||||
|  | } | ||||||
|  | ``` | ||||||
|  | *Using `autogroup:self` may cause performance degradation on the Headscale coordinator server in large deployments, as filter rules must be compiled per-node rather than globally and the current implementation is not very efficient.* | ||||||
|  |  | ||||||
|  | If you experience performance issues, consider using more specific ACL rules or limiting the use of `autogroup:self`.   | ||||||
|  | ```json     | ||||||
|  | { | ||||||
|  | // To allow internal users communications to their own nodes we can do following rules to allow access in case autogroup:self is causing performance issues. | ||||||
|  | { "action": "accept", "src": ["boss@"], "dst": ["boss@:"] }, | ||||||
|  | { "action": "accept", "src": ["dev1@"], "dst": ["dev1@:*"] }, | ||||||
|  | { "action": "accept", "src": ["dev2@"], "dst": ["dev2@:"] }, | ||||||
|  | { "action": "accept", "src": ["admin1@"], "dst": ["admin1@:"] }, | ||||||
|  | { "action": "accept", "src": ["intern1@"], "dst": ["intern1@:"] } | ||||||
|  | } | ||||||
|  | ``` | ||||||
|  |  | ||||||
|  | ### `autogroup:nonroot` | ||||||
|  |  | ||||||
|  | Used in Tailscale SSH rules to allow access to any user except root. Can only be used in the `users` field of SSH rules. | ||||||
|  |  | ||||||
|  | ```json | ||||||
|  | { | ||||||
|  |   "action": "accept", | ||||||
|  |   "src": ["autogroup:member"], | ||||||
|  |   "dst": ["autogroup:self"], | ||||||
|  |   "users": ["autogroup:nonroot"] | ||||||
|  | } | ||||||
|  | ``` | ||||||
|   | |||||||
| @@ -7,6 +7,7 @@ import ( | |||||||
| 	"time" | 	"time" | ||||||
|  |  | ||||||
| 	"github.com/juanfont/headscale/hscontrol/policy" | 	"github.com/juanfont/headscale/hscontrol/policy" | ||||||
|  | 	"github.com/juanfont/headscale/hscontrol/policy/matcher" | ||||||
| 	"github.com/juanfont/headscale/hscontrol/types" | 	"github.com/juanfont/headscale/hscontrol/types" | ||||||
| 	"tailscale.com/tailcfg" | 	"tailscale.com/tailcfg" | ||||||
| 	"tailscale.com/types/views" | 	"tailscale.com/types/views" | ||||||
| @@ -180,7 +181,11 @@ func (b *MapResponseBuilder) WithPacketFilters() *MapResponseBuilder { | |||||||
| 		return b | 		return b | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	filter, _ := b.mapper.state.Filter() | 	filter, err := b.mapper.state.FilterForNode(node) | ||||||
|  | 	if err != nil { | ||||||
|  | 		b.addError(err) | ||||||
|  | 		return b | ||||||
|  | 	} | ||||||
|  |  | ||||||
| 	// CapVer 81: 2023-11-17: MapResponse.PacketFilters (incremental packet filter updates) | 	// CapVer 81: 2023-11-17: MapResponse.PacketFilters (incremental packet filter updates) | ||||||
| 	// Currently, we do not send incremental package filters, however using the | 	// Currently, we do not send incremental package filters, however using the | ||||||
| @@ -226,7 +231,13 @@ func (b *MapResponseBuilder) buildTailPeers(peers views.Slice[types.NodeView]) ( | |||||||
| 		return nil, errors.New("node not found") | 		return nil, errors.New("node not found") | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	filter, matchers := b.mapper.state.Filter() | 	// Use per-node filter to handle autogroup:self | ||||||
|  | 	filter, err := b.mapper.state.FilterForNode(node) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	matchers := matcher.MatchesFromFilterRules(filter) | ||||||
|  |  | ||||||
| 	// If there are filter rules present, see if there are any nodes that cannot | 	// If there are filter rules present, see if there are any nodes that cannot | ||||||
| 	// access each-other at all and remove them from the peers. | 	// access each-other at all and remove them from the peers. | ||||||
|   | |||||||
| @@ -13,6 +13,8 @@ import ( | |||||||
| type PolicyManager interface { | type PolicyManager interface { | ||||||
| 	// Filter returns the current filter rules for the entire tailnet and the associated matchers. | 	// Filter returns the current filter rules for the entire tailnet and the associated matchers. | ||||||
| 	Filter() ([]tailcfg.FilterRule, []matcher.Match) | 	Filter() ([]tailcfg.FilterRule, []matcher.Match) | ||||||
|  | 	// FilterForNode returns filter rules for a specific node, handling autogroup:self | ||||||
|  | 	FilterForNode(node types.NodeView) ([]tailcfg.FilterRule, error) | ||||||
| 	SSHPolicy(types.NodeView) (*tailcfg.SSHPolicy, error) | 	SSHPolicy(types.NodeView) (*tailcfg.SSHPolicy, error) | ||||||
| 	SetPolicy([]byte) (bool, error) | 	SetPolicy([]byte) (bool, error) | ||||||
| 	SetUsers(users []types.User) (bool, error) | 	SetUsers(users []types.User) (bool, error) | ||||||
|   | |||||||
| @@ -82,6 +82,159 @@ func (pol *Policy) compileFilterRules( | |||||||
| 	return rules, nil | 	return rules, nil | ||||||
| } | } | ||||||
|  |  | ||||||
|  | // compileFilterRulesForNode compiles filter rules for a specific node. | ||||||
|  | func (pol *Policy) compileFilterRulesForNode( | ||||||
|  | 	users types.Users, | ||||||
|  | 	node types.NodeView, | ||||||
|  | 	nodes views.Slice[types.NodeView], | ||||||
|  | ) ([]tailcfg.FilterRule, error) { | ||||||
|  | 	if pol == nil { | ||||||
|  | 		return tailcfg.FilterAllowAll, nil | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	var rules []tailcfg.FilterRule | ||||||
|  |  | ||||||
|  | 	for _, acl := range pol.ACLs { | ||||||
|  | 		if acl.Action != ActionAccept { | ||||||
|  | 			return nil, ErrInvalidAction | ||||||
|  | 		} | ||||||
|  |  | ||||||
|  | 		rule, err := pol.compileACLWithAutogroupSelf(acl, users, node, nodes) | ||||||
|  | 		if err != nil { | ||||||
|  | 			log.Trace().Err(err).Msgf("compiling ACL") | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  |  | ||||||
|  | 		if rule != nil { | ||||||
|  | 			rules = append(rules, *rule) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	return rules, nil | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // compileACLWithAutogroupSelf compiles a single ACL rule, handling | ||||||
|  | // autogroup:self per-node while supporting all other alias types normally. | ||||||
|  | func (pol *Policy) compileACLWithAutogroupSelf( | ||||||
|  | 	acl ACL, | ||||||
|  | 	users types.Users, | ||||||
|  | 	node types.NodeView, | ||||||
|  | 	nodes views.Slice[types.NodeView], | ||||||
|  | ) (*tailcfg.FilterRule, error) { | ||||||
|  | 	// Check if any destination uses autogroup:self | ||||||
|  | 	hasAutogroupSelfInDst := false | ||||||
|  |  | ||||||
|  | 	for _, dest := range acl.Destinations { | ||||||
|  | 		if ag, ok := dest.Alias.(*AutoGroup); ok && ag.Is(AutoGroupSelf) { | ||||||
|  | 			hasAutogroupSelfInDst = true | ||||||
|  | 			break | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	var srcIPs netipx.IPSetBuilder | ||||||
|  |  | ||||||
|  | 	// Resolve sources to only include devices from the same user as the target node. | ||||||
|  | 	for _, src := range acl.Sources { | ||||||
|  | 		// autogroup:self is not allowed in sources | ||||||
|  | 		if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) { | ||||||
|  | 			return nil, fmt.Errorf("autogroup:self cannot be used in sources") | ||||||
|  | 		} | ||||||
|  |  | ||||||
|  | 		ips, err := src.Resolve(pol, users, nodes) | ||||||
|  | 		if err != nil { | ||||||
|  | 			log.Trace().Err(err).Msgf("resolving source ips") | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  |  | ||||||
|  | 		if ips != nil { | ||||||
|  | 			if hasAutogroupSelfInDst { | ||||||
|  | 				// Instead of iterating all addresses (which could be millions), | ||||||
|  | 				// check each node's IPs against the source set | ||||||
|  | 				for _, n := range nodes.All() { | ||||||
|  | 					if n.User().ID == node.User().ID && !n.IsTagged() { | ||||||
|  | 						// Check if any of this node's IPs are in the source set | ||||||
|  | 						for _, nodeIP := range n.IPs() { | ||||||
|  | 							if ips.Contains(nodeIP) { | ||||||
|  | 								n.AppendToIPSet(&srcIPs) | ||||||
|  | 								break // Found this node, move to next | ||||||
|  | 							} | ||||||
|  | 						} | ||||||
|  | 					} | ||||||
|  | 				} | ||||||
|  | 			} else { | ||||||
|  | 				// No autogroup:self in destination, use all resolved sources | ||||||
|  | 				srcIPs.AddSet(ips) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	srcSet, err := srcIPs.IPSet() | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	if srcSet == nil || len(srcSet.Prefixes()) == 0 { | ||||||
|  | 		// No sources resolved, skip this rule | ||||||
|  | 		return nil, nil //nolint:nilnil | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	protocols, _ := acl.Protocol.parseProtocol() | ||||||
|  |  | ||||||
|  | 	var destPorts []tailcfg.NetPortRange | ||||||
|  |  | ||||||
|  | 	for _, dest := range acl.Destinations { | ||||||
|  | 		if ag, ok := dest.Alias.(*AutoGroup); ok && ag.Is(AutoGroupSelf) { | ||||||
|  | 			for _, n := range nodes.All() { | ||||||
|  | 				if n.User().ID == node.User().ID && !n.IsTagged() { | ||||||
|  | 					for _, port := range dest.Ports { | ||||||
|  | 						for _, ip := range n.IPs() { | ||||||
|  | 							pr := tailcfg.NetPortRange{ | ||||||
|  | 								IP:    ip.String(), | ||||||
|  | 								Ports: port, | ||||||
|  | 							} | ||||||
|  | 							destPorts = append(destPorts, pr) | ||||||
|  | 						} | ||||||
|  | 					} | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 		} else { | ||||||
|  | 			ips, err := dest.Resolve(pol, users, nodes) | ||||||
|  | 			if err != nil { | ||||||
|  | 				log.Trace().Err(err).Msgf("resolving destination ips") | ||||||
|  | 				continue | ||||||
|  | 			} | ||||||
|  |  | ||||||
|  | 			if ips == nil { | ||||||
|  | 				log.Debug().Msgf("destination resolved to nil ips: %v", dest) | ||||||
|  | 				continue | ||||||
|  | 			} | ||||||
|  |  | ||||||
|  | 			prefixes := ips.Prefixes() | ||||||
|  |  | ||||||
|  | 			for _, pref := range prefixes { | ||||||
|  | 				for _, port := range dest.Ports { | ||||||
|  | 					pr := tailcfg.NetPortRange{ | ||||||
|  | 						IP:    pref.String(), | ||||||
|  | 						Ports: port, | ||||||
|  | 					} | ||||||
|  | 					destPorts = append(destPorts, pr) | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	if len(destPorts) == 0 { | ||||||
|  | 		// No destinations resolved, skip this rule | ||||||
|  | 		return nil, nil //nolint:nilnil | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	return &tailcfg.FilterRule{ | ||||||
|  | 		SrcIPs:   ipSetToPrefixStringList(srcSet), | ||||||
|  | 		DstPorts: destPorts, | ||||||
|  | 		IPProto:  protocols, | ||||||
|  | 	}, nil | ||||||
|  | } | ||||||
|  |  | ||||||
| func sshAction(accept bool, duration time.Duration) tailcfg.SSHAction { | func sshAction(accept bool, duration time.Duration) tailcfg.SSHAction { | ||||||
| 	return tailcfg.SSHAction{ | 	return tailcfg.SSHAction{ | ||||||
| 		Reject:                    !accept, | 		Reject:                    !accept, | ||||||
| @@ -107,14 +260,39 @@ func (pol *Policy) compileSSHPolicy( | |||||||
| 	var rules []*tailcfg.SSHRule | 	var rules []*tailcfg.SSHRule | ||||||
|  |  | ||||||
| 	for index, rule := range pol.SSHs { | 	for index, rule := range pol.SSHs { | ||||||
|  | 		// Check if any destination uses autogroup:self | ||||||
|  | 		hasAutogroupSelfInDst := false | ||||||
|  | 		for _, dst := range rule.Destinations { | ||||||
|  | 			if ag, ok := dst.(*AutoGroup); ok && ag.Is(AutoGroupSelf) { | ||||||
|  | 				hasAutogroupSelfInDst = true | ||||||
|  | 				break | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  |  | ||||||
|  | 		// If autogroup:self is used, skip tagged nodes | ||||||
|  | 		if hasAutogroupSelfInDst && node.IsTagged() { | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  |  | ||||||
| 		var dest netipx.IPSetBuilder | 		var dest netipx.IPSetBuilder | ||||||
| 		for _, src := range rule.Destinations { | 		for _, src := range rule.Destinations { | ||||||
|  | 			// Handle autogroup:self specially | ||||||
|  | 			if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) { | ||||||
|  | 				// For autogroup:self, only include the target user's untagged devices | ||||||
|  | 				for _, n := range nodes.All() { | ||||||
|  | 					if n.User().ID == node.User().ID && !n.IsTagged() { | ||||||
|  | 						n.AppendToIPSet(&dest) | ||||||
|  | 					} | ||||||
|  | 				} | ||||||
|  | 			} else { | ||||||
| 				ips, err := src.Resolve(pol, users, nodes) | 				ips, err := src.Resolve(pol, users, nodes) | ||||||
| 				if err != nil { | 				if err != nil { | ||||||
| 					log.Trace().Caller().Err(err).Msgf("resolving destination ips") | 					log.Trace().Caller().Err(err).Msgf("resolving destination ips") | ||||||
|  | 					continue | ||||||
| 				} | 				} | ||||||
| 				dest.AddSet(ips) | 				dest.AddSet(ips) | ||||||
| 			} | 			} | ||||||
|  | 		} | ||||||
|  |  | ||||||
| 		destSet, err := dest.IPSet() | 		destSet, err := dest.IPSet() | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| @@ -142,6 +320,33 @@ func (pol *Policy) compileSSHPolicy( | |||||||
| 			continue // Skip this rule if we can't resolve sources | 			continue // Skip this rule if we can't resolve sources | ||||||
| 		} | 		} | ||||||
|  |  | ||||||
|  | 		// If autogroup:self is in destinations, filter sources to same user only | ||||||
|  | 		if hasAutogroupSelfInDst { | ||||||
|  | 			var filteredSrcIPs netipx.IPSetBuilder | ||||||
|  | 			// Instead of iterating all addresses, check each node's IPs | ||||||
|  | 			for _, n := range nodes.All() { | ||||||
|  | 				if n.User().ID == node.User().ID && !n.IsTagged() { | ||||||
|  | 					// Check if any of this node's IPs are in the source set | ||||||
|  | 					for _, nodeIP := range n.IPs() { | ||||||
|  | 						if srcIPs.Contains(nodeIP) { | ||||||
|  | 							n.AppendToIPSet(&filteredSrcIPs) | ||||||
|  | 							break // Found this node, move to next | ||||||
|  | 						} | ||||||
|  | 					} | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  |  | ||||||
|  | 			srcIPs, err = filteredSrcIPs.IPSet() | ||||||
|  | 			if err != nil { | ||||||
|  | 				return nil, err | ||||||
|  | 			} | ||||||
|  |  | ||||||
|  | 			if srcIPs == nil || len(srcIPs.Prefixes()) == 0 { | ||||||
|  | 				// No valid sources after filtering, skip this rule | ||||||
|  | 				continue | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  |  | ||||||
| 		for addr := range util.IPSetAddrIter(srcIPs) { | 		for addr := range util.IPSetAddrIter(srcIPs) { | ||||||
| 			principals = append(principals, &tailcfg.SSHPrincipal{ | 			principals = append(principals, &tailcfg.SSHPrincipal{ | ||||||
| 				NodeIP: addr.String(), | 				NodeIP: addr.String(), | ||||||
|   | |||||||
| @@ -3,6 +3,7 @@ package v2 | |||||||
| import ( | import ( | ||||||
| 	"encoding/json" | 	"encoding/json" | ||||||
| 	"net/netip" | 	"net/netip" | ||||||
|  | 	"strings" | ||||||
| 	"testing" | 	"testing" | ||||||
| 	"time" | 	"time" | ||||||
|  |  | ||||||
| @@ -15,6 +16,14 @@ import ( | |||||||
| 	"tailscale.com/tailcfg" | 	"tailscale.com/tailcfg" | ||||||
| ) | ) | ||||||
|  |  | ||||||
|  | // aliasWithPorts creates an AliasWithPorts structure from an alias and ports. | ||||||
|  | func aliasWithPorts(alias Alias, ports ...tailcfg.PortRange) AliasWithPorts { | ||||||
|  | 	return AliasWithPorts{ | ||||||
|  | 		Alias: alias, | ||||||
|  | 		Ports: ports, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  |  | ||||||
| func TestParsing(t *testing.T) { | func TestParsing(t *testing.T) { | ||||||
| 	users := types.Users{ | 	users := types.Users{ | ||||||
| 		{Model: gorm.Model{ID: 1}, Name: "testuser"}, | 		{Model: gorm.Model{ID: 1}, Name: "testuser"}, | ||||||
| @@ -786,8 +795,548 @@ func TestSSHJSONSerialization(t *testing.T) { | |||||||
| 	assert.NotContains(t, string(jsonData), `"sshUsers": null`, "SSH users should not be null") | 	assert.NotContains(t, string(jsonData), `"sshUsers": null`, "SSH users should not be null") | ||||||
| } | } | ||||||
|  |  | ||||||
|  | func TestCompileFilterRulesForNodeWithAutogroupSelf(t *testing.T) { | ||||||
|  | 	users := types.Users{ | ||||||
|  | 		{Model: gorm.Model{ID: 1}, Name: "user1"}, | ||||||
|  | 		{Model: gorm.Model{ID: 2}, Name: "user2"}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	nodes := types.Nodes{ | ||||||
|  | 		{ | ||||||
|  | 			User: users[0], | ||||||
|  | 			IPv4: ap("100.64.0.1"), | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			User: users[0], | ||||||
|  | 			IPv4: ap("100.64.0.2"), | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			User: users[1], | ||||||
|  | 			IPv4: ap("100.64.0.3"), | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			User: users[1], | ||||||
|  | 			IPv4: ap("100.64.0.4"), | ||||||
|  | 		}, | ||||||
|  | 		// Tagged device for user1 | ||||||
|  | 		{ | ||||||
|  | 			User:       users[0], | ||||||
|  | 			IPv4:       ap("100.64.0.5"), | ||||||
|  | 			ForcedTags: []string{"tag:test"}, | ||||||
|  | 		}, | ||||||
|  | 		// Tagged device for user2 | ||||||
|  | 		{ | ||||||
|  | 			User:       users[1], | ||||||
|  | 			IPv4:       ap("100.64.0.6"), | ||||||
|  | 			ForcedTags: []string{"tag:test"}, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	// Test: Tailscale intended usage pattern (autogroup:member + autogroup:self) | ||||||
|  | 	policy2 := &Policy{ | ||||||
|  | 		ACLs: []ACL{ | ||||||
|  | 			{ | ||||||
|  | 				Action:  "accept", | ||||||
|  | 				Sources: []Alias{agp("autogroup:member")}, | ||||||
|  | 				Destinations: []AliasWithPorts{ | ||||||
|  | 					aliasWithPorts(agp("autogroup:self"), tailcfg.PortRangeAny), | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	err := policy2.validate() | ||||||
|  | 	if err != nil { | ||||||
|  | 		t.Fatalf("policy validation failed: %v", err) | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	// Test compilation for user1's first node | ||||||
|  | 	node1 := nodes[0].View() | ||||||
|  |  | ||||||
|  | 	rules, err := policy2.compileFilterRulesForNode(users, node1, nodes.ViewSlice()) | ||||||
|  |  | ||||||
|  | 	if err != nil { | ||||||
|  | 		t.Fatalf("unexpected error: %v", err) | ||||||
|  | 	} | ||||||
|  | 	if len(rules) != 1 { | ||||||
|  | 		t.Fatalf("expected 1 rule, got %d", len(rules)) | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	// Check that the rule includes: | ||||||
|  | 	// - Sources: only user1's untagged devices (filtered by autogroup:self semantics) | ||||||
|  | 	// - Destinations: only user1's untagged devices (autogroup:self) | ||||||
|  | 	rule := rules[0] | ||||||
|  |  | ||||||
|  | 	// Sources should ONLY include user1's untagged devices (100.64.0.1, 100.64.0.2) | ||||||
|  | 	expectedSourceIPs := []string{"100.64.0.1", "100.64.0.2"} | ||||||
|  |  | ||||||
|  | 	for _, expectedIP := range expectedSourceIPs { | ||||||
|  | 		found := false | ||||||
|  |  | ||||||
|  | 		addr := netip.MustParseAddr(expectedIP) | ||||||
|  | 		for _, prefix := range rule.SrcIPs { | ||||||
|  | 			pref := netip.MustParsePrefix(prefix) | ||||||
|  | 			if pref.Contains(addr) { | ||||||
|  | 				found = true | ||||||
|  | 				break | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  |  | ||||||
|  | 		if !found { | ||||||
|  | 			t.Errorf("expected source IP %s to be covered by generated prefixes %v", expectedIP, rule.SrcIPs) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	// Verify that other users' devices and tagged devices are not included in sources | ||||||
|  | 	excludedSourceIPs := []string{"100.64.0.3", "100.64.0.4", "100.64.0.5", "100.64.0.6"} | ||||||
|  | 	for _, excludedIP := range excludedSourceIPs { | ||||||
|  | 		addr := netip.MustParseAddr(excludedIP) | ||||||
|  | 		for _, prefix := range rule.SrcIPs { | ||||||
|  | 			pref := netip.MustParsePrefix(prefix) | ||||||
|  | 			if pref.Contains(addr) { | ||||||
|  | 				t.Errorf("SECURITY VIOLATION: source IP %s should not be included but found in prefix %s", excludedIP, prefix) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	expectedDestIPs := []string{"100.64.0.1", "100.64.0.2"} | ||||||
|  |  | ||||||
|  | 	actualDestIPs := make([]string, 0, len(rule.DstPorts)) | ||||||
|  | 	for _, dst := range rule.DstPorts { | ||||||
|  | 		actualDestIPs = append(actualDestIPs, dst.IP) | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	for _, expectedIP := range expectedDestIPs { | ||||||
|  | 		found := false | ||||||
|  |  | ||||||
|  | 		for _, actualIP := range actualDestIPs { | ||||||
|  | 			if actualIP == expectedIP { | ||||||
|  | 				found = true | ||||||
|  | 				break | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  |  | ||||||
|  | 		if !found { | ||||||
|  | 			t.Errorf("expected destination IP %s to be included, got: %v", expectedIP, actualDestIPs) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	// Verify that other users' devices and tagged devices are not in destinations | ||||||
|  | 	excludedDestIPs := []string{"100.64.0.3", "100.64.0.4", "100.64.0.5", "100.64.0.6"} | ||||||
|  | 	for _, excludedIP := range excludedDestIPs { | ||||||
|  | 		for _, actualIP := range actualDestIPs { | ||||||
|  | 			if actualIP == excludedIP { | ||||||
|  | 				t.Errorf("SECURITY: destination IP %s should not be included but found in destinations", excludedIP) | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  |  | ||||||
|  | func TestAutogroupSelfInSourceIsRejected(t *testing.T) { | ||||||
|  | 	// Test that autogroup:self cannot be used in sources (per Tailscale spec) | ||||||
|  | 	policy := &Policy{ | ||||||
|  | 		ACLs: []ACL{ | ||||||
|  | 			{ | ||||||
|  | 				Action:  "accept", | ||||||
|  | 				Sources: []Alias{agp("autogroup:self")}, | ||||||
|  | 				Destinations: []AliasWithPorts{ | ||||||
|  | 					aliasWithPorts(agp("autogroup:member"), tailcfg.PortRangeAny), | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	err := policy.validate() | ||||||
|  | 	if err == nil { | ||||||
|  | 		t.Error("expected validation error when using autogroup:self in sources") | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	if !strings.Contains(err.Error(), "autogroup:self") { | ||||||
|  | 		t.Errorf("expected error message to mention autogroup:self, got: %v", err) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // TestAutogroupSelfWithSpecificUserSource verifies that when autogroup:self is in | ||||||
|  | // the destination and a specific user is in the source, only that user's devices | ||||||
|  | // are allowed (and only if they match the target user). | ||||||
|  | func TestAutogroupSelfWithSpecificUserSource(t *testing.T) { | ||||||
|  | 	users := types.Users{ | ||||||
|  | 		{Model: gorm.Model{ID: 1}, Name: "user1"}, | ||||||
|  | 		{Model: gorm.Model{ID: 2}, Name: "user2"}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	nodes := types.Nodes{ | ||||||
|  | 		{User: users[0], IPv4: ap("100.64.0.1")}, | ||||||
|  | 		{User: users[0], IPv4: ap("100.64.0.2")}, | ||||||
|  | 		{User: users[1], IPv4: ap("100.64.0.3")}, | ||||||
|  | 		{User: users[1], IPv4: ap("100.64.0.4")}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	policy := &Policy{ | ||||||
|  | 		ACLs: []ACL{ | ||||||
|  | 			{ | ||||||
|  | 				Action:  "accept", | ||||||
|  | 				Sources: []Alias{up("user1@")}, | ||||||
|  | 				Destinations: []AliasWithPorts{ | ||||||
|  | 					aliasWithPorts(agp("autogroup:self"), tailcfg.PortRangeAny), | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	err := policy.validate() | ||||||
|  | 	require.NoError(t, err) | ||||||
|  |  | ||||||
|  | 	// For user1's node: sources should be user1's devices | ||||||
|  | 	node1 := nodes[0].View() | ||||||
|  | 	rules, err := policy.compileFilterRulesForNode(users, node1, nodes.ViewSlice()) | ||||||
|  | 	require.NoError(t, err) | ||||||
|  | 	require.Len(t, rules, 1) | ||||||
|  |  | ||||||
|  | 	expectedSourceIPs := []string{"100.64.0.1", "100.64.0.2"} | ||||||
|  | 	for _, expectedIP := range expectedSourceIPs { | ||||||
|  | 		found := false | ||||||
|  | 		addr := netip.MustParseAddr(expectedIP) | ||||||
|  |  | ||||||
|  | 		for _, prefix := range rules[0].SrcIPs { | ||||||
|  | 			pref := netip.MustParsePrefix(prefix) | ||||||
|  | 			if pref.Contains(addr) { | ||||||
|  | 				found = true | ||||||
|  | 				break | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  |  | ||||||
|  | 		assert.True(t, found, "expected source IP %s to be present", expectedIP) | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	actualDestIPs := make([]string, 0, len(rules[0].DstPorts)) | ||||||
|  | 	for _, dst := range rules[0].DstPorts { | ||||||
|  | 		actualDestIPs = append(actualDestIPs, dst.IP) | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	assert.ElementsMatch(t, expectedSourceIPs, actualDestIPs) | ||||||
|  |  | ||||||
|  | 	node2 := nodes[2].View() | ||||||
|  | 	rules2, err := policy.compileFilterRulesForNode(users, node2, nodes.ViewSlice()) | ||||||
|  | 	require.NoError(t, err) | ||||||
|  | 	assert.Empty(t, rules2, "user2's node should have no rules (user1@ devices can't match user2's self)") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // TestAutogroupSelfWithGroupSource verifies that when a group is used as source | ||||||
|  | // and autogroup:self as destination, only group members who are the same user | ||||||
|  | // as the target are allowed. | ||||||
|  | func TestAutogroupSelfWithGroupSource(t *testing.T) { | ||||||
|  | 	users := types.Users{ | ||||||
|  | 		{Model: gorm.Model{ID: 1}, Name: "user1"}, | ||||||
|  | 		{Model: gorm.Model{ID: 2}, Name: "user2"}, | ||||||
|  | 		{Model: gorm.Model{ID: 3}, Name: "user3"}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	nodes := types.Nodes{ | ||||||
|  | 		{User: users[0], IPv4: ap("100.64.0.1")}, | ||||||
|  | 		{User: users[0], IPv4: ap("100.64.0.2")}, | ||||||
|  | 		{User: users[1], IPv4: ap("100.64.0.3")}, | ||||||
|  | 		{User: users[1], IPv4: ap("100.64.0.4")}, | ||||||
|  | 		{User: users[2], IPv4: ap("100.64.0.5")}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	policy := &Policy{ | ||||||
|  | 		Groups: Groups{ | ||||||
|  | 			Group("group:admins"): []Username{Username("user1@"), Username("user2@")}, | ||||||
|  | 		}, | ||||||
|  | 		ACLs: []ACL{ | ||||||
|  | 			{ | ||||||
|  | 				Action:  "accept", | ||||||
|  | 				Sources: []Alias{gp("group:admins")}, | ||||||
|  | 				Destinations: []AliasWithPorts{ | ||||||
|  | 					aliasWithPorts(agp("autogroup:self"), tailcfg.PortRangeAny), | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	err := policy.validate() | ||||||
|  | 	require.NoError(t, err) | ||||||
|  |  | ||||||
|  | 	// (group:admins has user1+user2, but autogroup:self filters to same user) | ||||||
|  | 	node1 := nodes[0].View() | ||||||
|  | 	rules, err := policy.compileFilterRulesForNode(users, node1, nodes.ViewSlice()) | ||||||
|  | 	require.NoError(t, err) | ||||||
|  | 	require.Len(t, rules, 1) | ||||||
|  |  | ||||||
|  | 	expectedSrcIPs := []string{"100.64.0.1", "100.64.0.2"} | ||||||
|  | 	for _, expectedIP := range expectedSrcIPs { | ||||||
|  | 		found := false | ||||||
|  | 		addr := netip.MustParseAddr(expectedIP) | ||||||
|  |  | ||||||
|  | 		for _, prefix := range rules[0].SrcIPs { | ||||||
|  | 			pref := netip.MustParsePrefix(prefix) | ||||||
|  | 			if pref.Contains(addr) { | ||||||
|  | 				found = true | ||||||
|  | 				break | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  |  | ||||||
|  | 		assert.True(t, found, "expected source IP %s for user1", expectedIP) | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	node3 := nodes[4].View() | ||||||
|  | 	rules3, err := policy.compileFilterRulesForNode(users, node3, nodes.ViewSlice()) | ||||||
|  | 	require.NoError(t, err) | ||||||
|  | 	assert.Empty(t, rules3, "user3 should have no rules") | ||||||
|  | } | ||||||
|  |  | ||||||
| // Helper function to create IP addresses for testing | // Helper function to create IP addresses for testing | ||||||
| func createAddr(ip string) *netip.Addr { | func createAddr(ip string) *netip.Addr { | ||||||
| 	addr, _ := netip.ParseAddr(ip) | 	addr, _ := netip.ParseAddr(ip) | ||||||
| 	return &addr | 	return &addr | ||||||
| } | } | ||||||
|  |  | ||||||
|  | // TestSSHWithAutogroupSelfInDestination verifies that SSH policies work correctly | ||||||
|  | // with autogroup:self in destinations | ||||||
|  | func TestSSHWithAutogroupSelfInDestination(t *testing.T) { | ||||||
|  | 	users := types.Users{ | ||||||
|  | 		{Model: gorm.Model{ID: 1}, Name: "user1"}, | ||||||
|  | 		{Model: gorm.Model{ID: 2}, Name: "user2"}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	nodes := types.Nodes{ | ||||||
|  | 		// User1's nodes | ||||||
|  | 		{User: users[0], IPv4: ap("100.64.0.1"), Hostname: "user1-node1"}, | ||||||
|  | 		{User: users[0], IPv4: ap("100.64.0.2"), Hostname: "user1-node2"}, | ||||||
|  | 		// User2's nodes | ||||||
|  | 		{User: users[1], IPv4: ap("100.64.0.3"), Hostname: "user2-node1"}, | ||||||
|  | 		{User: users[1], IPv4: ap("100.64.0.4"), Hostname: "user2-node2"}, | ||||||
|  | 		// Tagged node for user1 (should be excluded) | ||||||
|  | 		{User: users[0], IPv4: ap("100.64.0.5"), Hostname: "user1-tagged", ForcedTags: []string{"tag:server"}}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	policy := &Policy{ | ||||||
|  | 		SSHs: []SSH{ | ||||||
|  | 			{ | ||||||
|  | 				Action:       "accept", | ||||||
|  | 				Sources:      SSHSrcAliases{agp("autogroup:member")}, | ||||||
|  | 				Destinations: SSHDstAliases{agp("autogroup:self")}, | ||||||
|  | 				Users:        []SSHUser{"autogroup:nonroot"}, | ||||||
|  | 			}, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	err := policy.validate() | ||||||
|  | 	require.NoError(t, err) | ||||||
|  |  | ||||||
|  | 	// Test for user1's first node | ||||||
|  | 	node1 := nodes[0].View() | ||||||
|  | 	sshPolicy, err := policy.compileSSHPolicy(users, node1, nodes.ViewSlice()) | ||||||
|  | 	require.NoError(t, err) | ||||||
|  | 	require.NotNil(t, sshPolicy) | ||||||
|  | 	require.Len(t, sshPolicy.Rules, 1) | ||||||
|  |  | ||||||
|  | 	rule := sshPolicy.Rules[0] | ||||||
|  |  | ||||||
|  | 	// Principals should only include user1's untagged devices | ||||||
|  | 	require.Len(t, rule.Principals, 2, "should have 2 principals (user1's 2 untagged nodes)") | ||||||
|  |  | ||||||
|  | 	principalIPs := make([]string, len(rule.Principals)) | ||||||
|  | 	for i, p := range rule.Principals { | ||||||
|  | 		principalIPs[i] = p.NodeIP | ||||||
|  | 	} | ||||||
|  | 	assert.ElementsMatch(t, []string{"100.64.0.1", "100.64.0.2"}, principalIPs) | ||||||
|  |  | ||||||
|  | 	// Test for user2's first node | ||||||
|  | 	node3 := nodes[2].View() | ||||||
|  | 	sshPolicy2, err := policy.compileSSHPolicy(users, node3, nodes.ViewSlice()) | ||||||
|  | 	require.NoError(t, err) | ||||||
|  | 	require.NotNil(t, sshPolicy2) | ||||||
|  | 	require.Len(t, sshPolicy2.Rules, 1) | ||||||
|  |  | ||||||
|  | 	rule2 := sshPolicy2.Rules[0] | ||||||
|  |  | ||||||
|  | 	// Principals should only include user2's untagged devices | ||||||
|  | 	require.Len(t, rule2.Principals, 2, "should have 2 principals (user2's 2 untagged nodes)") | ||||||
|  |  | ||||||
|  | 	principalIPs2 := make([]string, len(rule2.Principals)) | ||||||
|  | 	for i, p := range rule2.Principals { | ||||||
|  | 		principalIPs2[i] = p.NodeIP | ||||||
|  | 	} | ||||||
|  | 	assert.ElementsMatch(t, []string{"100.64.0.3", "100.64.0.4"}, principalIPs2) | ||||||
|  |  | ||||||
|  | 	// Test for tagged node (should have no SSH rules) | ||||||
|  | 	node5 := nodes[4].View() | ||||||
|  | 	sshPolicy3, err := policy.compileSSHPolicy(users, node5, nodes.ViewSlice()) | ||||||
|  | 	require.NoError(t, err) | ||||||
|  | 	if sshPolicy3 != nil { | ||||||
|  | 		assert.Empty(t, sshPolicy3.Rules, "tagged nodes should not get SSH rules with autogroup:self") | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // TestSSHWithAutogroupSelfAndSpecificUser verifies that when a specific user | ||||||
|  | // is in the source and autogroup:self in destination, only that user's devices | ||||||
|  | // can SSH (and only if they match the target user) | ||||||
|  | func TestSSHWithAutogroupSelfAndSpecificUser(t *testing.T) { | ||||||
|  | 	users := types.Users{ | ||||||
|  | 		{Model: gorm.Model{ID: 1}, Name: "user1"}, | ||||||
|  | 		{Model: gorm.Model{ID: 2}, Name: "user2"}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	nodes := types.Nodes{ | ||||||
|  | 		{User: users[0], IPv4: ap("100.64.0.1")}, | ||||||
|  | 		{User: users[0], IPv4: ap("100.64.0.2")}, | ||||||
|  | 		{User: users[1], IPv4: ap("100.64.0.3")}, | ||||||
|  | 		{User: users[1], IPv4: ap("100.64.0.4")}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	policy := &Policy{ | ||||||
|  | 		SSHs: []SSH{ | ||||||
|  | 			{ | ||||||
|  | 				Action:       "accept", | ||||||
|  | 				Sources:      SSHSrcAliases{up("user1@")}, | ||||||
|  | 				Destinations: SSHDstAliases{agp("autogroup:self")}, | ||||||
|  | 				Users:        []SSHUser{"ubuntu"}, | ||||||
|  | 			}, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	err := policy.validate() | ||||||
|  | 	require.NoError(t, err) | ||||||
|  |  | ||||||
|  | 	// For user1's node: should allow SSH from user1's devices | ||||||
|  | 	node1 := nodes[0].View() | ||||||
|  | 	sshPolicy, err := policy.compileSSHPolicy(users, node1, nodes.ViewSlice()) | ||||||
|  | 	require.NoError(t, err) | ||||||
|  | 	require.NotNil(t, sshPolicy) | ||||||
|  | 	require.Len(t, sshPolicy.Rules, 1) | ||||||
|  |  | ||||||
|  | 	rule := sshPolicy.Rules[0] | ||||||
|  | 	require.Len(t, rule.Principals, 2, "user1 should have 2 principals") | ||||||
|  |  | ||||||
|  | 	principalIPs := make([]string, len(rule.Principals)) | ||||||
|  | 	for i, p := range rule.Principals { | ||||||
|  | 		principalIPs[i] = p.NodeIP | ||||||
|  | 	} | ||||||
|  | 	assert.ElementsMatch(t, []string{"100.64.0.1", "100.64.0.2"}, principalIPs) | ||||||
|  |  | ||||||
|  | 	// For user2's node: should have no rules (user1's devices can't match user2's self) | ||||||
|  | 	node3 := nodes[2].View() | ||||||
|  | 	sshPolicy2, err := policy.compileSSHPolicy(users, node3, nodes.ViewSlice()) | ||||||
|  | 	require.NoError(t, err) | ||||||
|  | 	if sshPolicy2 != nil { | ||||||
|  | 		assert.Empty(t, sshPolicy2.Rules, "user2 should have no SSH rules since source is user1") | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // TestSSHWithAutogroupSelfAndGroup verifies SSH with group sources and autogroup:self destinations | ||||||
|  | func TestSSHWithAutogroupSelfAndGroup(t *testing.T) { | ||||||
|  | 	users := types.Users{ | ||||||
|  | 		{Model: gorm.Model{ID: 1}, Name: "user1"}, | ||||||
|  | 		{Model: gorm.Model{ID: 2}, Name: "user2"}, | ||||||
|  | 		{Model: gorm.Model{ID: 3}, Name: "user3"}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	nodes := types.Nodes{ | ||||||
|  | 		{User: users[0], IPv4: ap("100.64.0.1")}, | ||||||
|  | 		{User: users[0], IPv4: ap("100.64.0.2")}, | ||||||
|  | 		{User: users[1], IPv4: ap("100.64.0.3")}, | ||||||
|  | 		{User: users[1], IPv4: ap("100.64.0.4")}, | ||||||
|  | 		{User: users[2], IPv4: ap("100.64.0.5")}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	policy := &Policy{ | ||||||
|  | 		Groups: Groups{ | ||||||
|  | 			Group("group:admins"): []Username{Username("user1@"), Username("user2@")}, | ||||||
|  | 		}, | ||||||
|  | 		SSHs: []SSH{ | ||||||
|  | 			{ | ||||||
|  | 				Action:       "accept", | ||||||
|  | 				Sources:      SSHSrcAliases{gp("group:admins")}, | ||||||
|  | 				Destinations: SSHDstAliases{agp("autogroup:self")}, | ||||||
|  | 				Users:        []SSHUser{"root"}, | ||||||
|  | 			}, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	err := policy.validate() | ||||||
|  | 	require.NoError(t, err) | ||||||
|  |  | ||||||
|  | 	// For user1's node: should allow SSH from user1's devices only (not user2's) | ||||||
|  | 	node1 := nodes[0].View() | ||||||
|  | 	sshPolicy, err := policy.compileSSHPolicy(users, node1, nodes.ViewSlice()) | ||||||
|  | 	require.NoError(t, err) | ||||||
|  | 	require.NotNil(t, sshPolicy) | ||||||
|  | 	require.Len(t, sshPolicy.Rules, 1) | ||||||
|  |  | ||||||
|  | 	rule := sshPolicy.Rules[0] | ||||||
|  | 	require.Len(t, rule.Principals, 2, "user1 should have 2 principals (only user1's nodes)") | ||||||
|  |  | ||||||
|  | 	principalIPs := make([]string, len(rule.Principals)) | ||||||
|  | 	for i, p := range rule.Principals { | ||||||
|  | 		principalIPs[i] = p.NodeIP | ||||||
|  | 	} | ||||||
|  | 	assert.ElementsMatch(t, []string{"100.64.0.1", "100.64.0.2"}, principalIPs) | ||||||
|  |  | ||||||
|  | 	// For user3's node: should have no rules (not in group:admins) | ||||||
|  | 	node5 := nodes[4].View() | ||||||
|  | 	sshPolicy2, err := policy.compileSSHPolicy(users, node5, nodes.ViewSlice()) | ||||||
|  | 	require.NoError(t, err) | ||||||
|  | 	if sshPolicy2 != nil { | ||||||
|  | 		assert.Empty(t, sshPolicy2.Rules, "user3 should have no SSH rules (not in group)") | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // TestSSHWithAutogroupSelfExcludesTaggedDevices verifies that tagged devices | ||||||
|  | // are excluded from both sources and destinations when autogroup:self is used | ||||||
|  | func TestSSHWithAutogroupSelfExcludesTaggedDevices(t *testing.T) { | ||||||
|  | 	users := types.Users{ | ||||||
|  | 		{Model: gorm.Model{ID: 1}, Name: "user1"}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	nodes := types.Nodes{ | ||||||
|  | 		{User: users[0], IPv4: ap("100.64.0.1"), Hostname: "untagged1"}, | ||||||
|  | 		{User: users[0], IPv4: ap("100.64.0.2"), Hostname: "untagged2"}, | ||||||
|  | 		{User: users[0], IPv4: ap("100.64.0.3"), Hostname: "tagged1", ForcedTags: []string{"tag:server"}}, | ||||||
|  | 		{User: users[0], IPv4: ap("100.64.0.4"), Hostname: "tagged2", ForcedTags: []string{"tag:web"}}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	policy := &Policy{ | ||||||
|  | 		TagOwners: TagOwners{ | ||||||
|  | 			Tag("tag:server"): Owners{up("user1@")}, | ||||||
|  | 			Tag("tag:web"):    Owners{up("user1@")}, | ||||||
|  | 		}, | ||||||
|  | 		SSHs: []SSH{ | ||||||
|  | 			{ | ||||||
|  | 				Action:       "accept", | ||||||
|  | 				Sources:      SSHSrcAliases{agp("autogroup:member")}, | ||||||
|  | 				Destinations: SSHDstAliases{agp("autogroup:self")}, | ||||||
|  | 				Users:        []SSHUser{"admin"}, | ||||||
|  | 			}, | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	err := policy.validate() | ||||||
|  | 	require.NoError(t, err) | ||||||
|  |  | ||||||
|  | 	// For untagged node: should only get principals from other untagged nodes | ||||||
|  | 	node1 := nodes[0].View() | ||||||
|  | 	sshPolicy, err := policy.compileSSHPolicy(users, node1, nodes.ViewSlice()) | ||||||
|  | 	require.NoError(t, err) | ||||||
|  | 	require.NotNil(t, sshPolicy) | ||||||
|  | 	require.Len(t, sshPolicy.Rules, 1) | ||||||
|  |  | ||||||
|  | 	rule := sshPolicy.Rules[0] | ||||||
|  | 	require.Len(t, rule.Principals, 2, "should only have 2 principals (untagged nodes)") | ||||||
|  |  | ||||||
|  | 	principalIPs := make([]string, len(rule.Principals)) | ||||||
|  | 	for i, p := range rule.Principals { | ||||||
|  | 		principalIPs[i] = p.NodeIP | ||||||
|  | 	} | ||||||
|  | 	assert.ElementsMatch(t, []string{"100.64.0.1", "100.64.0.2"}, principalIPs, | ||||||
|  | 		"should only include untagged devices") | ||||||
|  |  | ||||||
|  | 	// For tagged node: should get no SSH rules | ||||||
|  | 	node3 := nodes[2].View() | ||||||
|  | 	sshPolicy2, err := policy.compileSSHPolicy(users, node3, nodes.ViewSlice()) | ||||||
|  | 	require.NoError(t, err) | ||||||
|  | 	if sshPolicy2 != nil { | ||||||
|  | 		assert.Empty(t, sshPolicy2.Rules, "tagged node should get no SSH rules with autogroup:self") | ||||||
|  | 	} | ||||||
|  | } | ||||||
|   | |||||||
| @@ -38,6 +38,10 @@ type PolicyManager struct { | |||||||
|  |  | ||||||
| 	// Lazy map of SSH policies | 	// Lazy map of SSH policies | ||||||
| 	sshPolicyMap map[types.NodeID]*tailcfg.SSHPolicy | 	sshPolicyMap map[types.NodeID]*tailcfg.SSHPolicy | ||||||
|  |  | ||||||
|  | 	// Lazy map of per-node filter rules (when autogroup:self is used) | ||||||
|  | 	filterRulesMap    map[types.NodeID][]tailcfg.FilterRule | ||||||
|  | 	usesAutogroupSelf bool | ||||||
| } | } | ||||||
|  |  | ||||||
| // NewPolicyManager creates a new PolicyManager from a policy file and a list of users and nodes. | // NewPolicyManager creates a new PolicyManager from a policy file and a list of users and nodes. | ||||||
| @@ -54,6 +58,8 @@ func NewPolicyManager(b []byte, users []types.User, nodes views.Slice[types.Node | |||||||
| 		users:             users, | 		users:             users, | ||||||
| 		nodes:             nodes, | 		nodes:             nodes, | ||||||
| 		sshPolicyMap:      make(map[types.NodeID]*tailcfg.SSHPolicy, nodes.Len()), | 		sshPolicyMap:      make(map[types.NodeID]*tailcfg.SSHPolicy, nodes.Len()), | ||||||
|  | 		filterRulesMap:    make(map[types.NodeID][]tailcfg.FilterRule, nodes.Len()), | ||||||
|  | 		usesAutogroupSelf: policy.usesAutogroupSelf(), | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	_, err = pm.updateLocked() | 	_, err = pm.updateLocked() | ||||||
| @@ -72,8 +78,17 @@ func (pm *PolicyManager) updateLocked() (bool, error) { | |||||||
| 	// policies for nodes that have changed. Particularly if the only difference is | 	// policies for nodes that have changed. Particularly if the only difference is | ||||||
| 	// that nodes has been added or removed. | 	// that nodes has been added or removed. | ||||||
| 	clear(pm.sshPolicyMap) | 	clear(pm.sshPolicyMap) | ||||||
|  | 	clear(pm.filterRulesMap) | ||||||
|  |  | ||||||
| 	filter, err := pm.pol.compileFilterRules(pm.users, pm.nodes) | 	// Check if policy uses autogroup:self | ||||||
|  | 	pm.usesAutogroupSelf = pm.pol.usesAutogroupSelf() | ||||||
|  |  | ||||||
|  | 	var filter []tailcfg.FilterRule | ||||||
|  |  | ||||||
|  | 	var err error | ||||||
|  |  | ||||||
|  | 	// Standard compilation for all policies | ||||||
|  | 	filter, err = pm.pol.compileFilterRules(pm.users, pm.nodes) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return false, fmt.Errorf("compiling filter rules: %w", err) | 		return false, fmt.Errorf("compiling filter rules: %w", err) | ||||||
| 	} | 	} | ||||||
| @@ -218,6 +233,35 @@ func (pm *PolicyManager) Filter() ([]tailcfg.FilterRule, []matcher.Match) { | |||||||
| 	return pm.filter, pm.matchers | 	return pm.filter, pm.matchers | ||||||
| } | } | ||||||
|  |  | ||||||
|  | // FilterForNode returns the filter rules for a specific node. | ||||||
|  | // If the policy uses autogroup:self, this returns node-specific rules for security. | ||||||
|  | // Otherwise, it returns the global filter rules for efficiency. | ||||||
|  | func (pm *PolicyManager) FilterForNode(node types.NodeView) ([]tailcfg.FilterRule, error) { | ||||||
|  | 	if pm == nil { | ||||||
|  | 		return nil, nil | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	pm.mu.Lock() | ||||||
|  | 	defer pm.mu.Unlock() | ||||||
|  |  | ||||||
|  | 	if !pm.usesAutogroupSelf { | ||||||
|  | 		return pm.filter, nil | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	if rules, ok := pm.filterRulesMap[node.ID()]; ok { | ||||||
|  | 		return rules, nil | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	rules, err := pm.pol.compileFilterRulesForNode(pm.users, node, pm.nodes) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, fmt.Errorf("compiling filter rules for node: %w", err) | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	pm.filterRulesMap[node.ID()] = rules | ||||||
|  |  | ||||||
|  | 	return rules, nil | ||||||
|  | } | ||||||
|  |  | ||||||
| // SetUsers updates the users in the policy manager and updates the filter rules. | // SetUsers updates the users in the policy manager and updates the filter rules. | ||||||
| func (pm *PolicyManager) SetUsers(users []types.User) (bool, error) { | func (pm *PolicyManager) SetUsers(users []types.User) (bool, error) { | ||||||
| 	if pm == nil { | 	if pm == nil { | ||||||
| @@ -255,6 +299,20 @@ func (pm *PolicyManager) SetNodes(nodes views.Slice[types.NodeView]) (bool, erro | |||||||
|  |  | ||||||
| 	pm.mu.Lock() | 	pm.mu.Lock() | ||||||
| 	defer pm.mu.Unlock() | 	defer pm.mu.Unlock() | ||||||
|  |  | ||||||
|  | 	// Clear cache based on what actually changed | ||||||
|  | 	if pm.usesAutogroupSelf { | ||||||
|  | 		// For autogroup:self, we need granular invalidation since rules depend on: | ||||||
|  | 		// - User ownership (node.User().ID) | ||||||
|  | 		// - Tag status (node.IsTagged()) | ||||||
|  | 		// - IP addresses (node.IPs()) | ||||||
|  | 		// - Node existence (added/removed) | ||||||
|  | 		pm.invalidateAutogroupSelfCache(pm.nodes, nodes) | ||||||
|  | 	} else { | ||||||
|  | 		// For non-autogroup:self policies, we can clear everything | ||||||
|  | 		clear(pm.filterRulesMap) | ||||||
|  | 	} | ||||||
|  |  | ||||||
| 	pm.nodes = nodes | 	pm.nodes = nodes | ||||||
|  |  | ||||||
| 	return pm.updateLocked() | 	return pm.updateLocked() | ||||||
| @@ -399,3 +457,113 @@ func (pm *PolicyManager) DebugString() string { | |||||||
|  |  | ||||||
| 	return sb.String() | 	return sb.String() | ||||||
| } | } | ||||||
|  |  | ||||||
|  | // invalidateAutogroupSelfCache intelligently clears only the cache entries that need to be | ||||||
|  | // invalidated when using autogroup:self policies. This is much more efficient than clearing | ||||||
|  | // the entire cache. | ||||||
|  | func (pm *PolicyManager) invalidateAutogroupSelfCache(oldNodes, newNodes views.Slice[types.NodeView]) { | ||||||
|  | 	// Build maps for efficient lookup | ||||||
|  | 	oldNodeMap := make(map[types.NodeID]types.NodeView) | ||||||
|  | 	for _, node := range oldNodes.All() { | ||||||
|  | 		oldNodeMap[node.ID()] = node | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	newNodeMap := make(map[types.NodeID]types.NodeView) | ||||||
|  | 	for _, node := range newNodes.All() { | ||||||
|  | 		newNodeMap[node.ID()] = node | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	// Track which users are affected by changes | ||||||
|  | 	affectedUsers := make(map[uint]struct{}) | ||||||
|  |  | ||||||
|  | 	// Check for removed nodes | ||||||
|  | 	for nodeID, oldNode := range oldNodeMap { | ||||||
|  | 		if _, exists := newNodeMap[nodeID]; !exists { | ||||||
|  | 			affectedUsers[oldNode.User().ID] = struct{}{} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	// Check for added nodes | ||||||
|  | 	for nodeID, newNode := range newNodeMap { | ||||||
|  | 		if _, exists := oldNodeMap[nodeID]; !exists { | ||||||
|  | 			affectedUsers[newNode.User().ID] = struct{}{} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	// Check for modified nodes (user changes, tag changes, IP changes) | ||||||
|  | 	for nodeID, newNode := range newNodeMap { | ||||||
|  | 		if oldNode, exists := oldNodeMap[nodeID]; exists { | ||||||
|  | 			// Check if user changed | ||||||
|  | 			if oldNode.User().ID != newNode.User().ID { | ||||||
|  | 				affectedUsers[oldNode.User().ID] = struct{}{} | ||||||
|  | 				affectedUsers[newNode.User().ID] = struct{}{} | ||||||
|  | 			} | ||||||
|  |  | ||||||
|  | 			// Check if tag status changed | ||||||
|  | 			if oldNode.IsTagged() != newNode.IsTagged() { | ||||||
|  | 				affectedUsers[newNode.User().ID] = struct{}{} | ||||||
|  | 			} | ||||||
|  |  | ||||||
|  | 			// Check if IPs changed (simple check - could be more sophisticated) | ||||||
|  | 			oldIPs := oldNode.IPs() | ||||||
|  | 			newIPs := newNode.IPs() | ||||||
|  | 			if len(oldIPs) != len(newIPs) { | ||||||
|  | 				affectedUsers[newNode.User().ID] = struct{}{} | ||||||
|  | 			} else { | ||||||
|  | 				// Check if any IPs are different | ||||||
|  | 				for i, oldIP := range oldIPs { | ||||||
|  | 					if i >= len(newIPs) || oldIP != newIPs[i] { | ||||||
|  | 						affectedUsers[newNode.User().ID] = struct{}{} | ||||||
|  | 						break | ||||||
|  | 					} | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	// Clear cache entries for affected users only | ||||||
|  | 	// For autogroup:self, we need to clear all nodes belonging to affected users | ||||||
|  | 	// because autogroup:self rules depend on the entire user's device set | ||||||
|  | 	for nodeID := range pm.filterRulesMap { | ||||||
|  | 		// Find the user for this cached node | ||||||
|  | 		var nodeUserID uint | ||||||
|  | 		found := false | ||||||
|  |  | ||||||
|  | 		// Check in new nodes first | ||||||
|  | 		for _, node := range newNodes.All() { | ||||||
|  | 			if node.ID() == nodeID { | ||||||
|  | 				nodeUserID = node.User().ID | ||||||
|  | 				found = true | ||||||
|  | 				break | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  |  | ||||||
|  | 		// If not found in new nodes, check old nodes | ||||||
|  | 		if !found { | ||||||
|  | 			for _, node := range oldNodes.All() { | ||||||
|  | 				if node.ID() == nodeID { | ||||||
|  | 					nodeUserID = node.User().ID | ||||||
|  | 					found = true | ||||||
|  | 					break | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  |  | ||||||
|  | 		// If we found the user and they're affected, clear this cache entry | ||||||
|  | 		if found { | ||||||
|  | 			if _, affected := affectedUsers[nodeUserID]; affected { | ||||||
|  | 				delete(pm.filterRulesMap, nodeID) | ||||||
|  | 			} | ||||||
|  | 		} else { | ||||||
|  | 			// Node not found in either old or new list, clear it | ||||||
|  | 			delete(pm.filterRulesMap, nodeID) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	if len(affectedUsers) > 0 { | ||||||
|  | 		log.Debug(). | ||||||
|  | 			Int("affected_users", len(affectedUsers)). | ||||||
|  | 			Int("remaining_cache_entries", len(pm.filterRulesMap)). | ||||||
|  | 			Msg("Selectively cleared autogroup:self cache for affected users") | ||||||
|  | 	} | ||||||
|  | } | ||||||
|   | |||||||
| @@ -66,3 +66,141 @@ func TestPolicyManager(t *testing.T) { | |||||||
| 		}) | 		}) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
|  | func TestInvalidateAutogroupSelfCache(t *testing.T) { | ||||||
|  | 	users := types.Users{ | ||||||
|  | 		{Model: gorm.Model{ID: 1}, Name: "user1", Email: "user1@headscale.net"}, | ||||||
|  | 		{Model: gorm.Model{ID: 2}, Name: "user2", Email: "user2@headscale.net"}, | ||||||
|  | 		{Model: gorm.Model{ID: 3}, Name: "user3", Email: "user3@headscale.net"}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	policy := `{ | ||||||
|  | 		"acls": [ | ||||||
|  | 			{ | ||||||
|  | 				"action": "accept", | ||||||
|  | 				"src": ["autogroup:member"], | ||||||
|  | 				"dst": ["autogroup:self:*"] | ||||||
|  | 			} | ||||||
|  | 		] | ||||||
|  | 	}` | ||||||
|  |  | ||||||
|  | 	initialNodes := types.Nodes{ | ||||||
|  | 		node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil), | ||||||
|  | 		node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[0], nil), | ||||||
|  | 		node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil), | ||||||
|  | 		node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil), | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	for i, n := range initialNodes { | ||||||
|  | 		n.ID = types.NodeID(i + 1) | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	pm, err := NewPolicyManager([]byte(policy), users, initialNodes.ViewSlice()) | ||||||
|  | 	require.NoError(t, err) | ||||||
|  |  | ||||||
|  | 	// Add to cache by calling FilterForNode for each node | ||||||
|  | 	for _, n := range initialNodes { | ||||||
|  | 		_, err := pm.FilterForNode(n.View()) | ||||||
|  | 		require.NoError(t, err) | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	require.Equal(t, len(initialNodes), len(pm.filterRulesMap)) | ||||||
|  |  | ||||||
|  | 	tests := []struct { | ||||||
|  | 		name            string | ||||||
|  | 		newNodes        types.Nodes | ||||||
|  | 		expectedCleared int | ||||||
|  | 		description     string | ||||||
|  | 	}{ | ||||||
|  | 		{ | ||||||
|  | 			name: "no_changes", | ||||||
|  | 			newNodes: types.Nodes{ | ||||||
|  | 				node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil), | ||||||
|  | 				node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[0], nil), | ||||||
|  | 				node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil), | ||||||
|  | 				node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil), | ||||||
|  | 			}, | ||||||
|  | 			expectedCleared: 0, | ||||||
|  | 			description:     "No changes should clear no cache entries", | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			name: "node_added", | ||||||
|  | 			newNodes: types.Nodes{ | ||||||
|  | 				node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil), | ||||||
|  | 				node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[0], nil), | ||||||
|  | 				node("user1-node3", "100.64.0.5", "fd7a:115c:a1e0::5", users[0], nil), // New node | ||||||
|  | 				node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil), | ||||||
|  | 				node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil), | ||||||
|  | 			}, | ||||||
|  | 			expectedCleared: 2, // user1's existing nodes should be cleared | ||||||
|  | 			description:     "Adding a node should clear cache for that user's existing nodes", | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			name: "node_removed", | ||||||
|  | 			newNodes: types.Nodes{ | ||||||
|  | 				node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil), | ||||||
|  | 				// user1-node2 removed | ||||||
|  | 				node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil), | ||||||
|  | 				node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil), | ||||||
|  | 			}, | ||||||
|  | 			expectedCleared: 2, // user1's remaining node + removed node should be cleared | ||||||
|  | 			description:     "Removing a node should clear cache for that user's remaining nodes", | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			name: "user_changed", | ||||||
|  | 			newNodes: types.Nodes{ | ||||||
|  | 				node("user1-node1", "100.64.0.1", "fd7a:115c:a1e0::1", users[0], nil), | ||||||
|  | 				node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[2], nil), // Changed to user3 | ||||||
|  | 				node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil), | ||||||
|  | 				node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil), | ||||||
|  | 			}, | ||||||
|  | 			expectedCleared: 3, // user1's node + user2's node + user3's nodes should be cleared | ||||||
|  | 			description:     "Changing a node's user should clear cache for both old and new users", | ||||||
|  | 		}, | ||||||
|  | 		{ | ||||||
|  | 			name: "ip_changed", | ||||||
|  | 			newNodes: types.Nodes{ | ||||||
|  | 				node("user1-node1", "100.64.0.10", "fd7a:115c:a1e0::10", users[0], nil), // IP changed | ||||||
|  | 				node("user1-node2", "100.64.0.2", "fd7a:115c:a1e0::2", users[0], nil), | ||||||
|  | 				node("user2-node1", "100.64.0.3", "fd7a:115c:a1e0::3", users[1], nil), | ||||||
|  | 				node("user3-node1", "100.64.0.4", "fd7a:115c:a1e0::4", users[2], nil), | ||||||
|  | 			}, | ||||||
|  | 			expectedCleared: 2, // user1's nodes should be cleared | ||||||
|  | 			description:     "Changing a node's IP should clear cache for that user's nodes", | ||||||
|  | 		}, | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	for _, tt := range tests { | ||||||
|  | 		t.Run(tt.name, func(t *testing.T) { | ||||||
|  | 			for i, n := range tt.newNodes { | ||||||
|  | 				found := false | ||||||
|  | 				for _, origNode := range initialNodes { | ||||||
|  | 					if n.Hostname == origNode.Hostname { | ||||||
|  | 						n.ID = origNode.ID | ||||||
|  | 						found = true | ||||||
|  | 						break | ||||||
|  | 					} | ||||||
|  | 				} | ||||||
|  | 				if !found { | ||||||
|  | 					n.ID = types.NodeID(len(initialNodes) + i + 1) | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  |  | ||||||
|  | 			pm.filterRulesMap = make(map[types.NodeID][]tailcfg.FilterRule) | ||||||
|  | 			for _, n := range initialNodes { | ||||||
|  | 				_, err := pm.FilterForNode(n.View()) | ||||||
|  | 				require.NoError(t, err) | ||||||
|  | 			} | ||||||
|  |  | ||||||
|  | 			initialCacheSize := len(pm.filterRulesMap) | ||||||
|  | 			require.Equal(t, len(initialNodes), initialCacheSize) | ||||||
|  |  | ||||||
|  | 			pm.invalidateAutogroupSelfCache(initialNodes.ViewSlice(), tt.newNodes.ViewSlice()) | ||||||
|  |  | ||||||
|  | 			// Verify the expected number of cache entries were cleared | ||||||
|  | 			finalCacheSize := len(pm.filterRulesMap) | ||||||
|  | 			clearedEntries := initialCacheSize - finalCacheSize | ||||||
|  | 			require.Equal(t, tt.expectedCleared, clearedEntries, tt.description) | ||||||
|  | 		}) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|   | |||||||
| @@ -32,6 +32,8 @@ var policyJSONOpts = []json.Options{ | |||||||
|  |  | ||||||
| const Wildcard = Asterix(0) | const Wildcard = Asterix(0) | ||||||
|  |  | ||||||
|  | var ErrAutogroupSelfRequiresPerNodeResolution = errors.New("autogroup:self requires per-node resolution and cannot be resolved in this context") | ||||||
|  |  | ||||||
| type Asterix int | type Asterix int | ||||||
|  |  | ||||||
| func (a Asterix) Validate() error { | func (a Asterix) Validate() error { | ||||||
| @@ -485,8 +487,6 @@ const ( | |||||||
| 	AutoGroupMember   AutoGroup = "autogroup:member" | 	AutoGroupMember   AutoGroup = "autogroup:member" | ||||||
| 	AutoGroupNonRoot  AutoGroup = "autogroup:nonroot" | 	AutoGroupNonRoot  AutoGroup = "autogroup:nonroot" | ||||||
| 	AutoGroupTagged   AutoGroup = "autogroup:tagged" | 	AutoGroupTagged   AutoGroup = "autogroup:tagged" | ||||||
|  |  | ||||||
| 	// These are not yet implemented. |  | ||||||
| 	AutoGroupSelf     AutoGroup = "autogroup:self" | 	AutoGroupSelf     AutoGroup = "autogroup:self" | ||||||
| ) | ) | ||||||
|  |  | ||||||
| @@ -495,6 +495,7 @@ var autogroups = []AutoGroup{ | |||||||
| 	AutoGroupMember, | 	AutoGroupMember, | ||||||
| 	AutoGroupNonRoot, | 	AutoGroupNonRoot, | ||||||
| 	AutoGroupTagged, | 	AutoGroupTagged, | ||||||
|  | 	AutoGroupSelf, | ||||||
| } | } | ||||||
|  |  | ||||||
| func (ag AutoGroup) Validate() error { | func (ag AutoGroup) Validate() error { | ||||||
| @@ -590,6 +591,12 @@ func (ag AutoGroup) Resolve(p *Policy, users types.Users, nodes views.Slice[type | |||||||
|  |  | ||||||
| 		return build.IPSet() | 		return build.IPSet() | ||||||
|  |  | ||||||
|  | 	case AutoGroupSelf: | ||||||
|  | 		// autogroup:self represents all devices owned by the same user. | ||||||
|  | 		// This cannot be resolved in the general context and should be handled | ||||||
|  | 		// specially during policy compilation per-node for security. | ||||||
|  | 		return nil, ErrAutogroupSelfRequiresPerNodeResolution | ||||||
|  |  | ||||||
| 	default: | 	default: | ||||||
| 		return nil, fmt.Errorf("unknown autogroup %q", ag) | 		return nil, fmt.Errorf("unknown autogroup %q", ag) | ||||||
| 	} | 	} | ||||||
| @@ -1586,11 +1593,11 @@ type Policy struct { | |||||||
| var ( | var ( | ||||||
| 	// TODO(kradalby): Add these checks for tagOwners and autoApprovers. | 	// TODO(kradalby): Add these checks for tagOwners and autoApprovers. | ||||||
| 	autogroupForSrc       = []AutoGroup{AutoGroupMember, AutoGroupTagged} | 	autogroupForSrc       = []AutoGroup{AutoGroupMember, AutoGroupTagged} | ||||||
| 	autogroupForDst       = []AutoGroup{AutoGroupInternet, AutoGroupMember, AutoGroupTagged} | 	autogroupForDst       = []AutoGroup{AutoGroupInternet, AutoGroupMember, AutoGroupTagged, AutoGroupSelf} | ||||||
| 	autogroupForSSHSrc    = []AutoGroup{AutoGroupMember, AutoGroupTagged} | 	autogroupForSSHSrc    = []AutoGroup{AutoGroupMember, AutoGroupTagged} | ||||||
| 	autogroupForSSHDst    = []AutoGroup{AutoGroupMember, AutoGroupTagged} | 	autogroupForSSHDst    = []AutoGroup{AutoGroupMember, AutoGroupTagged, AutoGroupSelf} | ||||||
| 	autogroupForSSHUser   = []AutoGroup{AutoGroupNonRoot} | 	autogroupForSSHUser   = []AutoGroup{AutoGroupNonRoot} | ||||||
| 	autogroupNotSupported = []AutoGroup{AutoGroupSelf} | 	autogroupNotSupported = []AutoGroup{} | ||||||
| ) | ) | ||||||
|  |  | ||||||
| func validateAutogroupSupported(ag *AutoGroup) error { | func validateAutogroupSupported(ag *AutoGroup) error { | ||||||
| @@ -1614,6 +1621,10 @@ func validateAutogroupForSrc(src *AutoGroup) error { | |||||||
| 		return errors.New(`"autogroup:internet" used in source, it can only be used in ACL destinations`) | 		return errors.New(`"autogroup:internet" used in source, it can only be used in ACL destinations`) | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | 	if src.Is(AutoGroupSelf) { | ||||||
|  | 		return errors.New(`"autogroup:self" used in source, it can only be used in ACL destinations`) | ||||||
|  | 	} | ||||||
|  |  | ||||||
| 	if !slices.Contains(autogroupForSrc, *src) { | 	if !slices.Contains(autogroupForSrc, *src) { | ||||||
| 		return fmt.Errorf("autogroup %q is not supported for ACL sources, can be %v", *src, autogroupForSrc) | 		return fmt.Errorf("autogroup %q is not supported for ACL sources, can be %v", *src, autogroupForSrc) | ||||||
| 	} | 	} | ||||||
| @@ -2112,3 +2123,40 @@ func validateProtocolPortCompatibility(protocol Protocol, destinations []AliasWi | |||||||
|  |  | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
|  |  | ||||||
|  | // usesAutogroupSelf checks if the policy uses autogroup:self in any ACL or SSH rules. | ||||||
|  | func (p *Policy) usesAutogroupSelf() bool { | ||||||
|  | 	if p == nil { | ||||||
|  | 		return false | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	// Check ACL rules | ||||||
|  | 	for _, acl := range p.ACLs { | ||||||
|  | 		for _, src := range acl.Sources { | ||||||
|  | 			if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) { | ||||||
|  | 				return true | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		for _, dest := range acl.Destinations { | ||||||
|  | 			if ag, ok := dest.Alias.(*AutoGroup); ok && ag.Is(AutoGroupSelf) { | ||||||
|  | 				return true | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	// Check SSH rules | ||||||
|  | 	for _, ssh := range p.SSHs { | ||||||
|  | 		for _, src := range ssh.Sources { | ||||||
|  | 			if ag, ok := src.(*AutoGroup); ok && ag.Is(AutoGroupSelf) { | ||||||
|  | 				return true | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		for _, dest := range ssh.Destinations { | ||||||
|  | 			if ag, ok := dest.(*AutoGroup); ok && ag.Is(AutoGroupSelf) { | ||||||
|  | 				return true | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|   | |||||||
| @@ -459,7 +459,7 @@ func TestUnmarshalPolicy(t *testing.T) { | |||||||
| 	], | 	], | ||||||
| } | } | ||||||
| `, | `, | ||||||
| 			wantErr: `AutoGroup is invalid, got: "autogroup:invalid", must be one of [autogroup:internet autogroup:member autogroup:nonroot autogroup:tagged]`, | 			wantErr: `AutoGroup is invalid, got: "autogroup:invalid", must be one of [autogroup:internet autogroup:member autogroup:nonroot autogroup:tagged autogroup:self]`, | ||||||
| 		}, | 		}, | ||||||
| 		{ | 		{ | ||||||
| 			name: "undefined-hostname-errors-2490", | 			name: "undefined-hostname-errors-2490", | ||||||
| @@ -1881,6 +1881,38 @@ func TestResolvePolicy(t *testing.T) { | |||||||
| 				mp("100.100.101.7/32"), // Multiple forced tags | 				mp("100.100.101.7/32"), // Multiple forced tags | ||||||
| 			}, | 			}, | ||||||
| 		}, | 		}, | ||||||
|  | 		{ | ||||||
|  | 			name:      "autogroup-self", | ||||||
|  | 			toResolve: ptr.To(AutoGroupSelf), | ||||||
|  | 			nodes: types.Nodes{ | ||||||
|  | 				{ | ||||||
|  | 					User: users["testuser"], | ||||||
|  | 					IPv4: ap("100.100.101.1"), | ||||||
|  | 				}, | ||||||
|  | 				{ | ||||||
|  | 					User: users["testuser2"], | ||||||
|  | 					IPv4: ap("100.100.101.2"), | ||||||
|  | 				}, | ||||||
|  | 				{ | ||||||
|  | 					User:       users["testuser"], | ||||||
|  | 					ForcedTags: []string{"tag:test"}, | ||||||
|  | 					IPv4:       ap("100.100.101.3"), | ||||||
|  | 				}, | ||||||
|  | 				{ | ||||||
|  | 					User: users["testuser2"], | ||||||
|  | 					Hostinfo: &tailcfg.Hostinfo{ | ||||||
|  | 						RequestTags: []string{"tag:test"}, | ||||||
|  | 					}, | ||||||
|  | 					IPv4: ap("100.100.101.4"), | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			pol: &Policy{ | ||||||
|  | 				TagOwners: TagOwners{ | ||||||
|  | 					Tag("tag:test"): Owners{ptr.To(Username("testuser@"))}, | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			wantErr: "autogroup:self requires per-node resolution", | ||||||
|  | 		}, | ||||||
| 		{ | 		{ | ||||||
| 			name:      "autogroup-invalid", | 			name:      "autogroup-invalid", | ||||||
| 			toResolve: ptr.To(AutoGroup("autogroup:invalid")), | 			toResolve: ptr.To(AutoGroup("autogroup:invalid")), | ||||||
|   | |||||||
| @@ -793,6 +793,11 @@ func (s *State) Filter() ([]tailcfg.FilterRule, []matcher.Match) { | |||||||
| 	return s.polMan.Filter() | 	return s.polMan.Filter() | ||||||
| } | } | ||||||
|  |  | ||||||
|  | // FilterForNode returns filter rules for a specific node, handling autogroup:self per-node. | ||||||
|  | func (s *State) FilterForNode(node types.NodeView) ([]tailcfg.FilterRule, error) { | ||||||
|  | 	return s.polMan.FilterForNode(node) | ||||||
|  | } | ||||||
|  |  | ||||||
| // NodeCanHaveTag checks if a node is allowed to have a specific tag. | // NodeCanHaveTag checks if a node is allowed to have a specific tag. | ||||||
| func (s *State) NodeCanHaveTag(node types.NodeView, tag string) bool { | func (s *State) NodeCanHaveTag(node types.NodeView, tag string) bool { | ||||||
| 	return s.polMan.NodeCanHaveTag(node, tag) | 	return s.polMan.NodeCanHaveTag(node, tag) | ||||||
|   | |||||||
| @@ -1536,3 +1536,100 @@ func TestACLAutogroupTagged(t *testing.T) { | |||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
|  | // Test that only devices owned by the same user can access each other and cannot access devices of other users | ||||||
|  | func TestACLAutogroupSelf(t *testing.T) { | ||||||
|  | 	IntegrationSkip(t) | ||||||
|  |  | ||||||
|  | 	scenario := aclScenario(t, | ||||||
|  | 		&policyv2.Policy{ | ||||||
|  | 			ACLs: []policyv2.ACL{ | ||||||
|  | 				{ | ||||||
|  | 					Action:  "accept", | ||||||
|  | 					Sources: []policyv2.Alias{ptr.To(policyv2.AutoGroupMember)}, | ||||||
|  | 					Destinations: []policyv2.AliasWithPorts{ | ||||||
|  | 						aliasWithPorts(ptr.To(policyv2.AutoGroupSelf), tailcfg.PortRangeAny), | ||||||
|  | 					}, | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 		}, | ||||||
|  | 		2, | ||||||
|  | 	) | ||||||
|  | 	defer scenario.ShutdownAssertNoPanics(t) | ||||||
|  |  | ||||||
|  | 	err := scenario.WaitForTailscaleSyncWithPeerCount(1, integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval()) | ||||||
|  | 	require.NoError(t, err) | ||||||
|  |  | ||||||
|  | 	user1Clients, err := scenario.GetClients("user1") | ||||||
|  | 	require.NoError(t, err) | ||||||
|  |  | ||||||
|  | 	user2Clients, err := scenario.GetClients("user2") | ||||||
|  | 	require.NoError(t, err) | ||||||
|  |  | ||||||
|  | 	// Test that user1's devices can access each other | ||||||
|  | 	for _, client := range user1Clients { | ||||||
|  | 		for _, peer := range user1Clients { | ||||||
|  | 			if client.Hostname() == peer.Hostname() { | ||||||
|  | 				continue | ||||||
|  | 			} | ||||||
|  |  | ||||||
|  | 			fqdn, err := peer.FQDN() | ||||||
|  | 			require.NoError(t, err) | ||||||
|  |  | ||||||
|  | 			url := fmt.Sprintf("http://%s/etc/hostname", fqdn) | ||||||
|  | 			t.Logf("url from %s (user1) to %s (user1)", client.Hostname(), fqdn) | ||||||
|  |  | ||||||
|  | 			result, err := client.Curl(url) | ||||||
|  | 			assert.Len(t, result, 13) | ||||||
|  | 			require.NoError(t, err) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	// Test that user2's devices can access each other | ||||||
|  | 	for _, client := range user2Clients { | ||||||
|  | 		for _, peer := range user2Clients { | ||||||
|  | 			if client.Hostname() == peer.Hostname() { | ||||||
|  | 				continue | ||||||
|  | 			} | ||||||
|  |  | ||||||
|  | 			fqdn, err := peer.FQDN() | ||||||
|  | 			require.NoError(t, err) | ||||||
|  |  | ||||||
|  | 			url := fmt.Sprintf("http://%s/etc/hostname", fqdn) | ||||||
|  | 			t.Logf("url from %s (user2) to %s (user2)", client.Hostname(), fqdn) | ||||||
|  |  | ||||||
|  | 			result, err := client.Curl(url) | ||||||
|  | 			assert.Len(t, result, 13) | ||||||
|  | 			require.NoError(t, err) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	// Test that devices from different users cannot access each other | ||||||
|  | 	for _, client := range user1Clients { | ||||||
|  | 		for _, peer := range user2Clients { | ||||||
|  | 			fqdn, err := peer.FQDN() | ||||||
|  | 			require.NoError(t, err) | ||||||
|  |  | ||||||
|  | 			url := fmt.Sprintf("http://%s/etc/hostname", fqdn) | ||||||
|  | 			t.Logf("url from %s (user1) to %s (user2) - should FAIL", client.Hostname(), fqdn) | ||||||
|  |  | ||||||
|  | 			result, err := client.Curl(url) | ||||||
|  | 			assert.Empty(t, result, "user1 should not be able to access user2's devices with autogroup:self") | ||||||
|  | 			assert.Error(t, err, "connection from user1 to user2 should fail") | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	for _, client := range user2Clients { | ||||||
|  | 		for _, peer := range user1Clients { | ||||||
|  | 			fqdn, err := peer.FQDN() | ||||||
|  | 			require.NoError(t, err) | ||||||
|  |  | ||||||
|  | 			url := fmt.Sprintf("http://%s/etc/hostname", fqdn) | ||||||
|  | 			t.Logf("url from %s (user2) to %s (user1) - should FAIL", client.Hostname(), fqdn) | ||||||
|  |  | ||||||
|  | 			result, err := client.Curl(url) | ||||||
|  | 			assert.Empty(t, result, "user2 should not be able to access user1's devices with autogroup:self") | ||||||
|  | 			assert.Error(t, err, "connection from user2 to user1 should fail") | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|   | |||||||
| @@ -13,6 +13,7 @@ import ( | |||||||
| 	"github.com/stretchr/testify/assert" | 	"github.com/stretchr/testify/assert" | ||||||
| 	"github.com/stretchr/testify/require" | 	"github.com/stretchr/testify/require" | ||||||
| 	"tailscale.com/tailcfg" | 	"tailscale.com/tailcfg" | ||||||
|  | 	"tailscale.com/types/ptr" | ||||||
| ) | ) | ||||||
|  |  | ||||||
| func isSSHNoAccessStdError(stderr string) bool { | func isSSHNoAccessStdError(stderr string) bool { | ||||||
| @@ -458,3 +459,84 @@ func assertSSHNoAccessStdError(t *testing.T, err error, stderr string) { | |||||||
| 		t.Errorf("expected stderr output suggesting access denied, got: %s", stderr) | 		t.Errorf("expected stderr output suggesting access denied, got: %s", stderr) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
|  | // TestSSHAutogroupSelf tests that SSH with autogroup:self works correctly: | ||||||
|  | // - Users can SSH to their own devices | ||||||
|  | // - Users cannot SSH to other users' devices | ||||||
|  | func TestSSHAutogroupSelf(t *testing.T) { | ||||||
|  | 	IntegrationSkip(t) | ||||||
|  |  | ||||||
|  | 	scenario := sshScenario(t, | ||||||
|  | 		&policyv2.Policy{ | ||||||
|  | 			ACLs: []policyv2.ACL{ | ||||||
|  | 				{ | ||||||
|  | 					Action:   "accept", | ||||||
|  | 					Protocol: "tcp", | ||||||
|  | 					Sources:  []policyv2.Alias{wildcard()}, | ||||||
|  | 					Destinations: []policyv2.AliasWithPorts{ | ||||||
|  | 						aliasWithPorts(wildcard(), tailcfg.PortRangeAny), | ||||||
|  | 					}, | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 			SSHs: []policyv2.SSH{ | ||||||
|  | 				{ | ||||||
|  | 					Action: "accept", | ||||||
|  | 					Sources: policyv2.SSHSrcAliases{ | ||||||
|  | 						ptr.To(policyv2.AutoGroupMember), | ||||||
|  | 					}, | ||||||
|  | 					Destinations: policyv2.SSHDstAliases{ | ||||||
|  | 						ptr.To(policyv2.AutoGroupSelf), | ||||||
|  | 					}, | ||||||
|  | 					Users: []policyv2.SSHUser{policyv2.SSHUser("ssh-it-user")}, | ||||||
|  | 				}, | ||||||
|  | 			}, | ||||||
|  | 		}, | ||||||
|  | 		2, // 2 clients per user | ||||||
|  | 	) | ||||||
|  | 	defer scenario.ShutdownAssertNoPanics(t) | ||||||
|  |  | ||||||
|  | 	user1Clients, err := scenario.ListTailscaleClients("user1") | ||||||
|  | 	assertNoErrListClients(t, err) | ||||||
|  |  | ||||||
|  | 	user2Clients, err := scenario.ListTailscaleClients("user2") | ||||||
|  | 	assertNoErrListClients(t, err) | ||||||
|  |  | ||||||
|  | 	err = scenario.WaitForTailscaleSync() | ||||||
|  | 	assertNoErrSync(t, err) | ||||||
|  |  | ||||||
|  | 	// Test that user1's devices can SSH to each other | ||||||
|  | 	for _, client := range user1Clients { | ||||||
|  | 		for _, peer := range user1Clients { | ||||||
|  | 			if client.Hostname() == peer.Hostname() { | ||||||
|  | 				continue | ||||||
|  | 			} | ||||||
|  |  | ||||||
|  | 			assertSSHHostname(t, client, peer) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	// Test that user2's devices can SSH to each other | ||||||
|  | 	for _, client := range user2Clients { | ||||||
|  | 		for _, peer := range user2Clients { | ||||||
|  | 			if client.Hostname() == peer.Hostname() { | ||||||
|  | 				continue | ||||||
|  | 			} | ||||||
|  |  | ||||||
|  | 			assertSSHHostname(t, client, peer) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	// Test that user1 cannot SSH to user2's devices | ||||||
|  | 	for _, client := range user1Clients { | ||||||
|  | 		for _, peer := range user2Clients { | ||||||
|  | 			assertSSHPermissionDenied(t, client, peer) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  |  | ||||||
|  | 	// Test that user2 cannot SSH to user1's devices | ||||||
|  | 	for _, client := range user2Clients { | ||||||
|  | 		for _, peer := range user1Clients { | ||||||
|  | 			assertSSHPermissionDenied(t, client, peer) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user