diff --git a/ALPINE.txt b/ALPINE.txt index 318956c3d51e2..93a84c380075c 100644 --- a/ALPINE.txt +++ b/ALPINE.txt @@ -1 +1 @@ -3.19 \ No newline at end of file +3.22 \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index bd0f2840fddc1..c546cf6574abd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -71,10 +71,10 @@ RUN GOARCH=$TARGETARCH go install -ldflags="\ -X tailscale.com/version.gitCommitStamp=$VERSION_GIT_HASH" \ -v ./cmd/tailscale ./cmd/tailscaled ./cmd/containerboot -FROM alpine:3.19 +FROM alpine:3.22 RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables -RUN rm /sbin/iptables && ln -s /sbin/iptables-legacy /sbin/iptables -RUN rm /sbin/ip6tables && ln -s /sbin/ip6tables-legacy /sbin/ip6tables +RUN ln -s /sbin/iptables-legacy /sbin/iptables +RUN ln -s /sbin/ip6tables-legacy /sbin/ip6tables COPY --from=build-env /go/bin/* /usr/local/bin/ # For compat with the previous run.sh, although ideally you should be diff --git a/Dockerfile.base b/Dockerfile.base index b7e79a43c6fdf..6c3c8ed084fce 100644 --- a/Dockerfile.base +++ b/Dockerfile.base @@ -1,12 +1,12 @@ # Copyright (c) Tailscale Inc & AUTHORS # SPDX-License-Identifier: BSD-3-Clause -FROM alpine:3.19 +FROM alpine:3.22 RUN apk add --no-cache ca-certificates iptables iptables-legacy iproute2 ip6tables iputils -# Alpine 3.19 replaces legacy iptables with nftables based implementation. We +# Alpine 3.19 replaced legacy iptables with nftables based implementation. We # can't be certain that all hosts that run Tailscale containers currently # suppport nftables, so link back to legacy for backwards compatibility reasons. # TODO(irbekrm): add some way how to determine if we still run on nodes that # don't support nftables, so that we can eventually remove these symlinks. -RUN rm /sbin/iptables && ln -s /sbin/iptables-legacy /sbin/iptables -RUN rm /sbin/ip6tables && ln -s /sbin/ip6tables-legacy /sbin/ip6tables +RUN ln -s /sbin/iptables-legacy /sbin/iptables +RUN ln -s /sbin/ip6tables-legacy /sbin/ip6tables diff --git a/Makefile b/Makefile index 532bded9413b9..b78ef046913a7 100644 --- a/Makefile +++ b/Makefile @@ -18,28 +18,36 @@ lint: ## Run golangci-lint updatedeps: ## Update depaware deps # depaware (via x/tools/go/packages) shells back to "go", so make sure the "go" # it finds in its $$PATH is the right one. - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --vendor --internal \ tailscale.com/cmd/tailscaled \ tailscale.com/cmd/tailscale \ tailscale.com/cmd/derper \ tailscale.com/cmd/k8s-operator \ tailscale.com/cmd/stund \ tailscale.com/cmd/tsidp - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update -goos=linux,darwin,windows,android,ios --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --goos=linux,darwin,windows,android,ios --vendor --internal \ tailscale.com/tsnet + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --file=depaware-minbox.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min --add=cli)" --vendor --internal \ + tailscale.com/cmd/tailscaled + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update --file=depaware-min.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min)" --vendor --internal \ + tailscale.com/cmd/tailscaled depaware: ## Run depaware checks # depaware (via x/tools/go/packages) shells back to "go", so make sure the "go" # it finds in its $$PATH is the right one. - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --vendor --internal \ tailscale.com/cmd/tailscaled \ tailscale.com/cmd/tailscale \ tailscale.com/cmd/derper \ tailscale.com/cmd/k8s-operator \ tailscale.com/cmd/stund \ tailscale.com/cmd/tsidp - PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --goos=linux,darwin,windows,android,ios --internal \ + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --goos=linux,darwin,windows,android,ios --vendor --internal \ tailscale.com/tsnet + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --file=depaware-minbox.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min --add=cli)" --vendor --internal \ + tailscale.com/cmd/tailscaled + PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check --file=depaware-min.txt --goos=linux --tags="$$(./tool/go run ./cmd/featuretags --min)" --vendor --internal \ + tailscale.com/cmd/tailscaled buildwindows: ## Build tailscale CLI for windows/amd64 GOOS=windows GOARCH=amd64 ./tool/go install tailscale.com/cmd/tailscale tailscale.com/cmd/tailscaled diff --git a/README.md b/README.md index 2c9713a6f339c..70b92d411b9de 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ not open source. ## Building -We always require the latest Go release, currently Go 1.23. (While we build +We always require the latest Go release, currently Go 1.25. (While we build releases with our [Go fork](https://github.com/tailscale/go/), its use is not required.) diff --git a/VERSION.txt b/VERSION.txt index 2d0226d9f3d99..604e786f2b495 100644 --- a/VERSION.txt +++ b/VERSION.txt @@ -1 +1 @@ -1.88.1 +1.90.3 diff --git a/appc/appconnector.go b/appc/appconnector.go index 89c6c9aeb9aa7..e7b5032f0edc4 100644 --- a/appc/appconnector.go +++ b/appc/appconnector.go @@ -12,19 +12,20 @@ package appc import ( "context" "fmt" + "maps" "net/netip" "slices" "strings" "sync" "time" - "golang.org/x/net/dns/dnsmessage" + "tailscale.com/types/appctype" "tailscale.com/types/logger" "tailscale.com/types/views" "tailscale.com/util/clientmetric" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus" "tailscale.com/util/execqueue" - "tailscale.com/util/mak" "tailscale.com/util/slicesx" ) @@ -115,19 +116,6 @@ func metricStoreRoutes(rate, nRoutes int64) { recordMetric(nRoutes, metricStoreRoutesNBuckets, metricStoreRoutesN) } -// RouteInfo is a data structure used to persist the in memory state of an AppConnector -// so that we can know, even after a restart, which routes came from ACLs and which were -// learned from domains. -type RouteInfo struct { - // Control is the routes from the 'routes' section of an app connector acl. - Control []netip.Prefix `json:",omitempty"` - // Domains are the routes discovered by observing DNS lookups for configured domains. - Domains map[string][]netip.Addr `json:",omitempty"` - // Wildcards are the configured DNS lookup domains to observe. When a DNS query matches Wildcards, - // its result is added to Domains. - Wildcards []string `json:",omitempty"` -} - // AppConnector is an implementation of an AppConnector that performs // its function as a subsystem inside of a tailscale node. At the control plane // side App Connector routing is configured in terms of domains rather than IP @@ -138,11 +126,17 @@ type RouteInfo struct { // routes not yet served by the AppConnector the local node configuration is // updated to advertise the new route. type AppConnector struct { + // These fields are immutable after initialization. logf logger.Logf + eventBus *eventbus.Bus routeAdvertiser RouteAdvertiser + pubClient *eventbus.Client + updatePub *eventbus.Publisher[appctype.RouteUpdate] + storePub *eventbus.Publisher[appctype.RouteInfo] - // storeRoutesFunc will be called to persist routes if it is not nil. - storeRoutesFunc func(*RouteInfo) error + // hasStoredRoutes records whether the connector was initialized with + // persisted route information. + hasStoredRoutes bool // mu guards the fields that follow mu sync.Mutex @@ -164,17 +158,50 @@ type AppConnector struct { writeRateDay *rateLogger } +// Config carries the settings for an [AppConnector]. +type Config struct { + // Logf is the logger to which debug logs from the connector will be sent. + // It must be non-nil. + Logf logger.Logf + + // EventBus receives events when the collection of routes maintained by the + // connector is updated. It must be non-nil. + EventBus *eventbus.Bus + + // RouteAdvertiser allows the connector to update the set of advertised routes. + RouteAdvertiser RouteAdvertiser + + // RouteInfo, if non-nil, use used as the initial set of routes for the + // connector. If nil, the connector starts empty. + RouteInfo *appctype.RouteInfo + + // HasStoredRoutes indicates that the connector should assume stored routes. + HasStoredRoutes bool +} + // NewAppConnector creates a new AppConnector. -func NewAppConnector(logf logger.Logf, routeAdvertiser RouteAdvertiser, routeInfo *RouteInfo, storeRoutesFunc func(*RouteInfo) error) *AppConnector { +func NewAppConnector(c Config) *AppConnector { + switch { + case c.Logf == nil: + panic("missing logger") + case c.EventBus == nil: + panic("missing event bus") + } + ec := c.EventBus.Client("appc.AppConnector") + ac := &AppConnector{ - logf: logger.WithPrefix(logf, "appc: "), - routeAdvertiser: routeAdvertiser, - storeRoutesFunc: storeRoutesFunc, + logf: logger.WithPrefix(c.Logf, "appc: "), + eventBus: c.EventBus, + pubClient: ec, + updatePub: eventbus.Publish[appctype.RouteUpdate](ec), + storePub: eventbus.Publish[appctype.RouteInfo](ec), + routeAdvertiser: c.RouteAdvertiser, + hasStoredRoutes: c.HasStoredRoutes, } - if routeInfo != nil { - ac.domains = routeInfo.Domains - ac.wildcards = routeInfo.Wildcards - ac.controlRoutes = routeInfo.Control + if c.RouteInfo != nil { + ac.domains = c.RouteInfo.Domains + ac.wildcards = c.RouteInfo.Wildcards + ac.controlRoutes = c.RouteInfo.Control } ac.writeRateMinute = newRateLogger(time.Now, time.Minute, func(c int64, s time.Time, l int64) { ac.logf("routeInfo write rate: %d in minute starting at %v (%d routes)", c, s, l) @@ -188,29 +215,26 @@ func NewAppConnector(logf logger.Logf, routeAdvertiser RouteAdvertiser, routeInf // ShouldStoreRoutes returns true if the appconnector was created with the controlknob on // and is storing its discovered routes persistently. -func (e *AppConnector) ShouldStoreRoutes() bool { - return e.storeRoutesFunc != nil -} +func (e *AppConnector) ShouldStoreRoutes() bool { return e.hasStoredRoutes } // storeRoutesLocked takes the current state of the AppConnector and persists it -func (e *AppConnector) storeRoutesLocked() error { - if !e.ShouldStoreRoutes() { - return nil - } - - // log write rate and write size - numRoutes := int64(len(e.controlRoutes)) - for _, rs := range e.domains { - numRoutes += int64(len(rs)) +func (e *AppConnector) storeRoutesLocked() { + if e.storePub.ShouldPublish() { + // log write rate and write size + numRoutes := int64(len(e.controlRoutes)) + for _, rs := range e.domains { + numRoutes += int64(len(rs)) + } + e.writeRateMinute.update(numRoutes) + e.writeRateDay.update(numRoutes) + + e.storePub.Publish(appctype.RouteInfo{ + // Clone here, as the subscriber will handle these outside our lock. + Control: slices.Clone(e.controlRoutes), + Domains: maps.Clone(e.domains), + Wildcards: slices.Clone(e.wildcards), + }) } - e.writeRateMinute.update(numRoutes) - e.writeRateDay.update(numRoutes) - - return e.storeRoutesFunc(&RouteInfo{ - Control: e.controlRoutes, - Domains: e.domains, - Wildcards: e.wildcards, - }) } // ClearRoutes removes all route state from the AppConnector. @@ -220,7 +244,8 @@ func (e *AppConnector) ClearRoutes() error { e.controlRoutes = nil e.domains = nil e.wildcards = nil - return e.storeRoutesLocked() + e.storeRoutesLocked() + return nil } // UpdateDomainsAndRoutes starts an asynchronous update of the configuration @@ -249,6 +274,18 @@ func (e *AppConnector) Wait(ctx context.Context) { e.queue.Wait(ctx) } +// Close closes the connector and cleans up resources associated with it. +// It is safe (and a noop) to call Close on nil. +func (e *AppConnector) Close() { + if e == nil { + return + } + e.mu.Lock() + defer e.mu.Unlock() + e.queue.Shutdown() // TODO(creachadair): Should we wait for it too? + e.pubClient.Close() +} + func (e *AppConnector) updateDomains(domains []string) { e.mu.Lock() defer e.mu.Unlock() @@ -280,20 +317,26 @@ func (e *AppConnector) updateDomains(domains []string) { } } - // Everything left in oldDomains is a domain we're no longer tracking - // and if we are storing route info we can unadvertise the routes - if e.ShouldStoreRoutes() { + // Everything left in oldDomains is a domain we're no longer tracking and we + // can unadvertise the routes. + if e.hasStoredRoutes { toRemove := []netip.Prefix{} for _, addrs := range oldDomains { for _, a := range addrs { toRemove = append(toRemove, netip.PrefixFrom(a, a.BitLen())) } } - e.queue.Add(func() { - if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { - e.logf("failed to unadvertise routes on domain removal: %v: %v: %v", slicesx.MapKeys(oldDomains), toRemove, err) + + if len(toRemove) != 0 { + if ra := e.routeAdvertiser; ra != nil { + e.queue.Add(func() { + if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { + e.logf("failed to unadvertise routes on domain removal: %v: %v: %v", slicesx.MapKeys(oldDomains), toRemove, err) + } + }) } - }) + e.updatePub.Publish(appctype.RouteUpdate{Unadvertise: toRemove}) + } } e.logf("handling domains: %v and wildcards: %v", slicesx.MapKeys(e.domains), e.wildcards) @@ -314,11 +357,10 @@ func (e *AppConnector) updateRoutes(routes []netip.Prefix) { var toRemove []netip.Prefix - // If we're storing routes and know e.controlRoutes is a good - // representation of what should be in AdvertisedRoutes we can stop - // advertising routes that used to be in e.controlRoutes but are not - // in routes. - if e.ShouldStoreRoutes() { + // If we know e.controlRoutes is a good representation of what should be in + // AdvertisedRoutes we can stop advertising routes that used to be in + // e.controlRoutes but are not in routes. + if e.hasStoredRoutes { toRemove = routesWithout(e.controlRoutes, routes) } @@ -335,19 +377,23 @@ nextRoute: } } - e.queue.Add(func() { - if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil { - e.logf("failed to advertise routes: %v: %v", routes, err) - } - if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { - e.logf("failed to unadvertise routes: %v: %v", toRemove, err) - } + if e.routeAdvertiser != nil { + e.queue.Add(func() { + if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil { + e.logf("failed to advertise routes: %v: %v", routes, err) + } + if err := e.routeAdvertiser.UnadvertiseRoute(toRemove...); err != nil { + e.logf("failed to unadvertise routes: %v: %v", toRemove, err) + } + }) + } + e.updatePub.Publish(appctype.RouteUpdate{ + Advertise: routes, + Unadvertise: toRemove, }) e.controlRoutes = routes - if err := e.storeRoutesLocked(); err != nil { - e.logf("failed to store route info: %v", err) - } + e.storeRoutesLocked() } // Domains returns the currently configured domain list. @@ -372,124 +418,6 @@ func (e *AppConnector) DomainRoutes() map[string][]netip.Addr { return drCopy } -// ObserveDNSResponse is a callback invoked by the DNS resolver when a DNS -// response is being returned over the PeerAPI. The response is parsed and -// matched against the configured domains, if matched the routeAdvertiser is -// advised to advertise the discovered route. -func (e *AppConnector) ObserveDNSResponse(res []byte) error { - var p dnsmessage.Parser - if _, err := p.Start(res); err != nil { - return err - } - if err := p.SkipAllQuestions(); err != nil { - return err - } - - // cnameChain tracks a chain of CNAMEs for a given query in order to reverse - // a CNAME chain back to the original query for flattening. The keys are - // CNAME record targets, and the value is the name the record answers, so - // for www.example.com CNAME example.com, the map would contain - // ["example.com"] = "www.example.com". - var cnameChain map[string]string - - // addressRecords is a list of address records found in the response. - var addressRecords map[string][]netip.Addr - - for { - h, err := p.AnswerHeader() - if err == dnsmessage.ErrSectionDone { - break - } - if err != nil { - return err - } - - if h.Class != dnsmessage.ClassINET { - if err := p.SkipAnswer(); err != nil { - return err - } - continue - } - - switch h.Type { - case dnsmessage.TypeCNAME, dnsmessage.TypeA, dnsmessage.TypeAAAA: - default: - if err := p.SkipAnswer(); err != nil { - return err - } - continue - - } - - domain := strings.TrimSuffix(strings.ToLower(h.Name.String()), ".") - if len(domain) == 0 { - continue - } - - if h.Type == dnsmessage.TypeCNAME { - res, err := p.CNAMEResource() - if err != nil { - return err - } - cname := strings.TrimSuffix(strings.ToLower(res.CNAME.String()), ".") - if len(cname) == 0 { - continue - } - mak.Set(&cnameChain, cname, domain) - continue - } - - switch h.Type { - case dnsmessage.TypeA: - r, err := p.AResource() - if err != nil { - return err - } - addr := netip.AddrFrom4(r.A) - mak.Set(&addressRecords, domain, append(addressRecords[domain], addr)) - case dnsmessage.TypeAAAA: - r, err := p.AAAAResource() - if err != nil { - return err - } - addr := netip.AddrFrom16(r.AAAA) - mak.Set(&addressRecords, domain, append(addressRecords[domain], addr)) - default: - if err := p.SkipAnswer(); err != nil { - return err - } - continue - } - } - - e.mu.Lock() - defer e.mu.Unlock() - - for domain, addrs := range addressRecords { - domain, isRouted := e.findRoutedDomainLocked(domain, cnameChain) - - // domain and none of the CNAMEs in the chain are routed - if !isRouted { - continue - } - - // advertise each address we have learned for the routed domain, that - // was not already known. - var toAdvertise []netip.Prefix - for _, addr := range addrs { - if !e.isAddrKnownLocked(domain, addr) { - toAdvertise = append(toAdvertise, netip.PrefixFrom(addr, addr.BitLen())) - } - } - - if len(toAdvertise) > 0 { - e.logf("[v2] observed new routes for %s: %s", domain, toAdvertise) - e.scheduleAdvertisement(domain, toAdvertise...) - } - } - return nil -} - // starting from the given domain that resolved to an address, find it, or any // of the domains in the CNAME chain toward resolving it, that are routed // domains, returning the routed domain name and a bool indicating whether a @@ -544,10 +472,13 @@ func (e *AppConnector) isAddrKnownLocked(domain string, addr netip.Addr) bool { // associated with the given domain. func (e *AppConnector) scheduleAdvertisement(domain string, routes ...netip.Prefix) { e.queue.Add(func() { - if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil { - e.logf("failed to advertise routes for %s: %v: %v", domain, routes, err) - return + if e.routeAdvertiser != nil { + if err := e.routeAdvertiser.AdvertiseRoute(routes...); err != nil { + e.logf("failed to advertise routes for %s: %v: %v", domain, routes, err) + return + } } + e.updatePub.Publish(appctype.RouteUpdate{Advertise: routes}) e.mu.Lock() defer e.mu.Unlock() @@ -561,9 +492,7 @@ func (e *AppConnector) scheduleAdvertisement(domain string, routes ...netip.Pref e.logf("[v2] advertised route for %v: %v", domain, addr) } } - if err := e.storeRoutesLocked(); err != nil { - e.logf("failed to store route info: %v", err) - } + e.storeRoutesLocked() }) } diff --git a/appc/appconnector_test.go b/appc/appconnector_test.go index c13835f39ed9a..5c362d6fd1217 100644 --- a/appc/appconnector_test.go +++ b/appc/appconnector_test.go @@ -4,7 +4,8 @@ package appc import ( - "context" + stdcmp "cmp" + "fmt" "net/netip" "reflect" "slices" @@ -12,28 +13,31 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "golang.org/x/net/dns/dnsmessage" "tailscale.com/appc/appctest" "tailscale.com/tstest" + "tailscale.com/types/appctype" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/util/slicesx" ) -func fakeStoreRoutes(*RouteInfo) error { return nil } - func TestUpdateDomains(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() - var a *AppConnector - if shouldStore { - a = NewAppConnector(t.Logf, &appctest.RouteCollector{}, &RouteInfo{}, fakeStoreRoutes) - } else { - a = NewAppConnector(t.Logf, &appctest.RouteCollector{}, nil, nil) - } - a.UpdateDomains([]string{"example.com"}) + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) + a.UpdateDomains([]string{"example.com"}) a.Wait(ctx) if got, want := a.Domains().AsSlice(), []string{"example.com"}; !slices.Equal(got, want) { t.Errorf("got %v; want %v", got, want) @@ -58,15 +62,19 @@ func TestUpdateDomains(t *testing.T) { } func TestUpdateRoutes(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} - var a *AppConnector - if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) - } else { - a = NewAppConnector(t.Logf, rc, nil, nil) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) + a.updateDomains([]string{"*.example.com"}) // This route should be collapsed into the range @@ -103,19 +111,37 @@ func TestUpdateRoutes(t *testing.T) { if !slices.EqualFunc(rc.RemovedRoutes(), wantRemoved, prefixEqual) { t.Fatalf("unexpected removed routes: %v", rc.RemovedRoutes()) } + + if err := eventbustest.Expect(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.2.1/32")}), + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.1/32")}), + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{ + Advertise: prefixes("192.0.0.1/32", "192.0.2.0/24"), + Unadvertise: prefixes("192.0.2.1/32"), + }), + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } } } func TestUpdateRoutesUnadvertisesContainedRoutes(t *testing.T) { - ctx := context.Background() + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} - var a *AppConnector - if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) - } else { - a = NewAppConnector(t.Logf, rc, nil, nil) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) + mak.Set(&a.domains, "example.com", []netip.Addr{netip.MustParseAddr("192.0.2.1")}) rc.SetRoutes([]netip.Prefix{netip.MustParsePrefix("192.0.2.1/32")}) routes := []netip.Prefix{netip.MustParsePrefix("192.0.2.0/24")} @@ -125,23 +151,36 @@ func TestUpdateRoutesUnadvertisesContainedRoutes(t *testing.T) { if !slices.EqualFunc(routes, rc.Routes(), prefixEqual) { t.Fatalf("got %v, want %v", rc.Routes(), routes) } + + if err := eventbustest.ExpectExactly(w, + eqUpdate(appctype.RouteUpdate{ + Advertise: prefixes("192.0.2.0/24"), + Unadvertise: prefixes("192.0.2.1/32"), + }), + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } } } func TestDomainRoutes(t *testing.T) { + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} - var a *AppConnector - if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) - } else { - a = NewAppConnector(t.Logf, rc, nil, nil) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) a.updateDomains([]string{"example.com"}) if err := a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { t.Errorf("ObserveDNSResponse: %v", err) } - a.Wait(context.Background()) + a.Wait(t.Context()) want := map[string][]netip.Addr{ "example.com": {netip.MustParseAddr("192.0.0.8")}, @@ -150,19 +189,29 @@ func TestDomainRoutes(t *testing.T) { if got := a.DomainRoutes(); !reflect.DeepEqual(got, want) { t.Fatalf("DomainRoutes: got %v, want %v", got, want) } + + if err := eventbustest.ExpectExactly(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.8/32")}), + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } } } func TestObserveDNSResponse(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} - var a *AppConnector - if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) - } else { - a = NewAppConnector(t.Logf, rc, nil, nil) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) // a has no domains configured, so it should not advertise any routes if err := a.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { @@ -239,19 +288,38 @@ func TestObserveDNSResponse(t *testing.T) { if !slices.Contains(a.domains["example.com"], netip.MustParseAddr("192.0.2.1")) { t.Errorf("missing %v from %v", "192.0.2.1", a.domains["exmaple.com"]) } + + if err := eventbustest.ExpectExactly(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.8/32")}), // from initial DNS response, via example.com + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.9/32")}), // from CNAME response + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.10/32")}), // from CNAME response, mid-chain + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("2001:db8::1/128")}), // v6 DNS response + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.2.0/24")}), // additional prefix + eventbustest.Type[appctype.RouteInfo](), + // N.B. no update for 192.0.2.1 as it is already covered + ); err != nil { + t.Error(err) + } } } func TestWildcardDomains(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} - var a *AppConnector - if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) - } else { - a = NewAppConnector(t.Logf, rc, nil, nil) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) a.updateDomains([]string{"*.example.com"}) if err := a.ObserveDNSResponse(dnsResponse("foo.example.com.", "192.0.0.8")); err != nil { @@ -278,6 +346,13 @@ func TestWildcardDomains(t *testing.T) { if len(a.wildcards) != 1 { t.Errorf("expected only one wildcard domain, got %v", a.wildcards) } + + if err := eventbustest.ExpectExactly(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("192.0.0.8/32")}), + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } } } @@ -393,8 +468,10 @@ func prefixes(in ...string) []netip.Prefix { } func TestUpdateRouteRouteRemoval(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) { @@ -406,12 +483,14 @@ func TestUpdateRouteRouteRemoval(t *testing.T) { } } - var a *AppConnector - if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) - } else { - a = NewAppConnector(t.Logf, rc, nil, nil) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) + // nothing has yet been advertised assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) @@ -434,12 +513,21 @@ func TestUpdateRouteRouteRemoval(t *testing.T) { wantRemovedRoutes = prefixes("1.2.3.2/32") } assertRoutes("removal", wantRoutes, wantRemovedRoutes) + + if err := eventbustest.Expect(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.1/32", "1.2.3.2/32")}), // no duplicates here + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } } } func TestUpdateDomainRouteRemoval(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) { @@ -451,12 +539,14 @@ func TestUpdateDomainRouteRemoval(t *testing.T) { } } - var a *AppConnector - if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) - } else { - a = NewAppConnector(t.Logf, rc, nil, nil) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) + assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) a.UpdateDomainsAndRoutes([]string{"a.example.com", "b.example.com"}, []netip.Prefix{}) @@ -489,12 +579,30 @@ func TestUpdateDomainRouteRemoval(t *testing.T) { wantRemovedRoutes = prefixes("1.2.3.3/32", "1.2.3.4/32") } assertRoutes("removal", wantRoutes, wantRemovedRoutes) + + wantEvents := []any{ + // Each DNS record observed triggers an update. + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.1/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.2/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.3/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.4/32")}), + } + if shouldStore { + wantEvents = append(wantEvents, eqUpdate(appctype.RouteUpdate{ + Unadvertise: prefixes("1.2.3.3/32", "1.2.3.4/32"), + })) + } + if err := eventbustest.Expect(w, wantEvents...); err != nil { + t.Error(err) + } } } func TestUpdateWildcardRouteRemoval(t *testing.T) { + ctx := t.Context() + bus := eventbustest.NewBus(t) for _, shouldStore := range []bool{false, true} { - ctx := context.Background() + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} assertRoutes := func(prefix string, routes, removedRoutes []netip.Prefix) { @@ -506,12 +614,14 @@ func TestUpdateWildcardRouteRemoval(t *testing.T) { } } - var a *AppConnector - if shouldStore { - a = NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) - } else { - a = NewAppConnector(t.Logf, rc, nil, nil) - } + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) + assertRoutes("appc init", []netip.Prefix{}, []netip.Prefix{}) a.UpdateDomainsAndRoutes([]string{"a.example.com", "*.b.example.com"}, []netip.Prefix{}) @@ -544,6 +654,22 @@ func TestUpdateWildcardRouteRemoval(t *testing.T) { wantRemovedRoutes = prefixes("1.2.3.3/32", "1.2.3.4/32") } assertRoutes("removal", wantRoutes, wantRemovedRoutes) + + wantEvents := []any{ + // Each DNS record observed triggers an update. + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.1/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.2/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.3/32")}), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("1.2.3.4/32")}), + } + if shouldStore { + wantEvents = append(wantEvents, eqUpdate(appctype.RouteUpdate{ + Unadvertise: prefixes("1.2.3.3/32", "1.2.3.4/32"), + })) + } + if err := eventbustest.Expect(w, wantEvents...); err != nil { + t.Error(err) + } } } @@ -646,10 +772,22 @@ func TestMetricBucketsAreSorted(t *testing.T) { // routeAdvertiser, calls to Advertise/UnadvertiseRoutes can end up calling // back into AppConnector via authReconfig. If everything is called // synchronously, this results in a deadlock on AppConnector.mu. +// +// TODO(creachadair, 2025-09-18): Remove this along with the advertiser +// interface once the LocalBackend is switched to use the event bus and the +// tests have been updated not to need it. func TestUpdateRoutesDeadlock(t *testing.T) { - ctx := context.Background() + ctx := t.Context() + bus := eventbustest.NewBus(t) + w := eventbustest.NewWatcher(t, bus) rc := &appctest.RouteCollector{} - a := NewAppConnector(t.Logf, rc, &RouteInfo{}, fakeStoreRoutes) + a := NewAppConnector(Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: true, + }) + t.Cleanup(a.Close) advertiseCalled := new(atomic.Bool) unadvertiseCalled := new(atomic.Bool) @@ -693,4 +831,42 @@ func TestUpdateRoutesDeadlock(t *testing.T) { if want := []netip.Prefix{netip.MustParsePrefix("127.0.0.1/32")}; !slices.Equal(slices.Compact(rc.Routes()), want) { t.Fatalf("got %v, want %v", rc.Routes(), want) } + + if err := eventbustest.ExpectExactly(w, + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("127.0.0.1/32", "127.0.0.2/32")}), + eventbustest.Type[appctype.RouteInfo](), + eqUpdate(appctype.RouteUpdate{Advertise: prefixes("127.0.0.1/32"), Unadvertise: prefixes("127.0.0.2/32")}), + eventbustest.Type[appctype.RouteInfo](), + ); err != nil { + t.Error(err) + } +} + +type textUpdate struct { + Advertise []string + Unadvertise []string +} + +func routeUpdateToText(u appctype.RouteUpdate) textUpdate { + var out textUpdate + for _, p := range u.Advertise { + out.Advertise = append(out.Advertise, p.String()) + } + for _, p := range u.Unadvertise { + out.Unadvertise = append(out.Unadvertise, p.String()) + } + return out +} + +// eqUpdate generates an eventbus test filter that matches a appctype.RouteUpdate +// message equal to want, or reports an error giving a human-readable diff. +func eqUpdate(want appctype.RouteUpdate) func(appctype.RouteUpdate) error { + return func(got appctype.RouteUpdate) error { + if diff := cmp.Diff(routeUpdateToText(got), routeUpdateToText(want), + cmpopts.SortSlices(stdcmp.Less[string]), + ); diff != "" { + return fmt.Errorf("wrong update (-got, +want):\n%s", diff) + } + return nil + } } diff --git a/appc/observe.go b/appc/observe.go new file mode 100644 index 0000000000000..06dc04f9dcfdf --- /dev/null +++ b/appc/observe.go @@ -0,0 +1,132 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_appconnectors + +package appc + +import ( + "net/netip" + "strings" + + "golang.org/x/net/dns/dnsmessage" + "tailscale.com/util/mak" +) + +// ObserveDNSResponse is a callback invoked by the DNS resolver when a DNS +// response is being returned over the PeerAPI. The response is parsed and +// matched against the configured domains, if matched the routeAdvertiser is +// advised to advertise the discovered route. +func (e *AppConnector) ObserveDNSResponse(res []byte) error { + var p dnsmessage.Parser + if _, err := p.Start(res); err != nil { + return err + } + if err := p.SkipAllQuestions(); err != nil { + return err + } + + // cnameChain tracks a chain of CNAMEs for a given query in order to reverse + // a CNAME chain back to the original query for flattening. The keys are + // CNAME record targets, and the value is the name the record answers, so + // for www.example.com CNAME example.com, the map would contain + // ["example.com"] = "www.example.com". + var cnameChain map[string]string + + // addressRecords is a list of address records found in the response. + var addressRecords map[string][]netip.Addr + + for { + h, err := p.AnswerHeader() + if err == dnsmessage.ErrSectionDone { + break + } + if err != nil { + return err + } + + if h.Class != dnsmessage.ClassINET { + if err := p.SkipAnswer(); err != nil { + return err + } + continue + } + + switch h.Type { + case dnsmessage.TypeCNAME, dnsmessage.TypeA, dnsmessage.TypeAAAA: + default: + if err := p.SkipAnswer(); err != nil { + return err + } + continue + + } + + domain := strings.TrimSuffix(strings.ToLower(h.Name.String()), ".") + if len(domain) == 0 { + continue + } + + if h.Type == dnsmessage.TypeCNAME { + res, err := p.CNAMEResource() + if err != nil { + return err + } + cname := strings.TrimSuffix(strings.ToLower(res.CNAME.String()), ".") + if len(cname) == 0 { + continue + } + mak.Set(&cnameChain, cname, domain) + continue + } + + switch h.Type { + case dnsmessage.TypeA: + r, err := p.AResource() + if err != nil { + return err + } + addr := netip.AddrFrom4(r.A) + mak.Set(&addressRecords, domain, append(addressRecords[domain], addr)) + case dnsmessage.TypeAAAA: + r, err := p.AAAAResource() + if err != nil { + return err + } + addr := netip.AddrFrom16(r.AAAA) + mak.Set(&addressRecords, domain, append(addressRecords[domain], addr)) + default: + if err := p.SkipAnswer(); err != nil { + return err + } + continue + } + } + + e.mu.Lock() + defer e.mu.Unlock() + + for domain, addrs := range addressRecords { + domain, isRouted := e.findRoutedDomainLocked(domain, cnameChain) + + // domain and none of the CNAMEs in the chain are routed + if !isRouted { + continue + } + + // advertise each address we have learned for the routed domain, that + // was not already known. + var toAdvertise []netip.Prefix + for _, addr := range addrs { + if !e.isAddrKnownLocked(domain, addr) { + toAdvertise = append(toAdvertise, netip.PrefixFrom(addr, addr.BitLen())) + } + } + + if len(toAdvertise) > 0 { + e.logf("[v2] observed new routes for %s: %s", domain, toAdvertise) + e.scheduleAdvertisement(domain, toAdvertise...) + } + } + return nil +} diff --git a/appc/observe_disabled.go b/appc/observe_disabled.go new file mode 100644 index 0000000000000..45aa285eaa758 --- /dev/null +++ b/appc/observe_disabled.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_appconnectors + +package appc + +func (e *AppConnector) ObserveDNSResponse(res []byte) error { return nil } diff --git a/build_dist.sh b/build_dist.sh index 57231eb7079ea..c05644711cfa3 100755 --- a/build_dist.sh +++ b/build_dist.sh @@ -18,7 +18,7 @@ fi eval `CGO_ENABLED=0 GOOS=$($go env GOHOSTOS) GOARCH=$($go env GOHOSTARCH) $go run ./cmd/mkversion` -if [ "$1" = "shellvars" ]; then +if [ "$#" -ge 1 ] && [ "$1" = "shellvars" ]; then cat < len(name)+1 && strings.HasPrefix(d, name) && d[len(name)] == '.' { + return d, true + } + } + return "", false +} diff --git a/client/local/debugportmapper.go b/client/local/debugportmapper.go new file mode 100644 index 0000000000000..04ed1c109a54f --- /dev/null +++ b/client/local/debugportmapper.go @@ -0,0 +1,84 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_debugportmapper + +package local + +import ( + "cmp" + "context" + "fmt" + "io" + "net/http" + "net/netip" + "net/url" + "strconv" + "time" + + "tailscale.com/client/tailscale/apitype" +) + +// DebugPortmapOpts contains options for the [Client.DebugPortmap] command. +type DebugPortmapOpts struct { + // Duration is how long the mapping should be created for. It defaults + // to 5 seconds if not set. + Duration time.Duration + + // Type is the kind of portmap to debug. The empty string instructs the + // portmap client to perform all known types. Other valid options are + // "pmp", "pcp", and "upnp". + Type string + + // GatewayAddr specifies the gateway address used during portmapping. + // If set, SelfAddr must also be set. If unset, it will be + // autodetected. + GatewayAddr netip.Addr + + // SelfAddr specifies the gateway address used during portmapping. If + // set, GatewayAddr must also be set. If unset, it will be + // autodetected. + SelfAddr netip.Addr + + // LogHTTP instructs the debug-portmap endpoint to print all HTTP + // requests and responses made to the logs. + LogHTTP bool +} + +// DebugPortmap invokes the debug-portmap endpoint, and returns an +// io.ReadCloser that can be used to read the logs that are printed during this +// process. +// +// opts can be nil; if so, default values will be used. +func (lc *Client) DebugPortmap(ctx context.Context, opts *DebugPortmapOpts) (io.ReadCloser, error) { + vals := make(url.Values) + if opts == nil { + opts = &DebugPortmapOpts{} + } + + vals.Set("duration", cmp.Or(opts.Duration, 5*time.Second).String()) + vals.Set("type", opts.Type) + vals.Set("log_http", strconv.FormatBool(opts.LogHTTP)) + + if opts.GatewayAddr.IsValid() != opts.SelfAddr.IsValid() { + return nil, fmt.Errorf("both GatewayAddr and SelfAddr must be provided if one is") + } else if opts.GatewayAddr.IsValid() { + vals.Set("gateway_and_self", fmt.Sprintf("%s/%s", opts.GatewayAddr, opts.SelfAddr)) + } + + req, err := http.NewRequestWithContext(ctx, "GET", "http://"+apitype.LocalAPIHost+"/localapi/v0/debug-portmap?"+vals.Encode(), nil) + if err != nil { + return nil, err + } + res, err := lc.doLocalRequestNiceError(req) + if err != nil { + return nil, err + } + if res.StatusCode != 200 { + body, _ := io.ReadAll(res.Body) + res.Body.Close() + return nil, fmt.Errorf("HTTP %s: %s", res.Status, body) + } + + return res.Body, nil +} diff --git a/client/local/local.go b/client/local/local.go index 0257c7a260b7a..582c7b8487957 100644 --- a/client/local/local.go +++ b/client/local/local.go @@ -9,7 +9,6 @@ import ( "bytes" "cmp" "context" - "crypto/tls" "encoding/base64" "encoding/json" "errors" @@ -28,20 +27,21 @@ import ( "sync" "time" - "go4.org/mem" "tailscale.com/client/tailscale/apitype" "tailscale.com/drive" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/net/netutil" + "tailscale.com/net/udprelay/status" "tailscale.com/paths" "tailscale.com/safesocket" "tailscale.com/tailcfg" - "tailscale.com/tka" + "tailscale.com/types/appctype" "tailscale.com/types/dnstype" "tailscale.com/types/key" - "tailscale.com/types/tkatype" "tailscale.com/util/eventbus" ) @@ -381,6 +381,9 @@ func (lc *Client) UserMetrics(ctx context.Context) ([]byte, error) { // // IncrementCounter does not support gauge metrics or negative delta values. func (lc *Client) IncrementCounter(ctx context.Context, name string, delta int) error { + if !buildfeatures.HasClientMetrics { + return nil + } type metricUpdate struct { Name string `json:"name"` Type string `json:"type"` @@ -593,70 +596,6 @@ func (lc *Client) DebugResultJSON(ctx context.Context, action string) (any, erro return x, nil } -// DebugPortmapOpts contains options for the [Client.DebugPortmap] command. -type DebugPortmapOpts struct { - // Duration is how long the mapping should be created for. It defaults - // to 5 seconds if not set. - Duration time.Duration - - // Type is the kind of portmap to debug. The empty string instructs the - // portmap client to perform all known types. Other valid options are - // "pmp", "pcp", and "upnp". - Type string - - // GatewayAddr specifies the gateway address used during portmapping. - // If set, SelfAddr must also be set. If unset, it will be - // autodetected. - GatewayAddr netip.Addr - - // SelfAddr specifies the gateway address used during portmapping. If - // set, GatewayAddr must also be set. If unset, it will be - // autodetected. - SelfAddr netip.Addr - - // LogHTTP instructs the debug-portmap endpoint to print all HTTP - // requests and responses made to the logs. - LogHTTP bool -} - -// DebugPortmap invokes the debug-portmap endpoint, and returns an -// io.ReadCloser that can be used to read the logs that are printed during this -// process. -// -// opts can be nil; if so, default values will be used. -func (lc *Client) DebugPortmap(ctx context.Context, opts *DebugPortmapOpts) (io.ReadCloser, error) { - vals := make(url.Values) - if opts == nil { - opts = &DebugPortmapOpts{} - } - - vals.Set("duration", cmp.Or(opts.Duration, 5*time.Second).String()) - vals.Set("type", opts.Type) - vals.Set("log_http", strconv.FormatBool(opts.LogHTTP)) - - if opts.GatewayAddr.IsValid() != opts.SelfAddr.IsValid() { - return nil, fmt.Errorf("both GatewayAddr and SelfAddr must be provided if one is") - } else if opts.GatewayAddr.IsValid() { - vals.Set("gateway_and_self", fmt.Sprintf("%s/%s", opts.GatewayAddr, opts.SelfAddr)) - } - - req, err := http.NewRequestWithContext(ctx, "GET", "http://"+apitype.LocalAPIHost+"/localapi/v0/debug-portmap?"+vals.Encode(), nil) - if err != nil { - return nil, err - } - res, err := lc.doLocalRequestNiceError(req) - if err != nil { - return nil, err - } - if res.StatusCode != 200 { - body, _ := io.ReadAll(res.Body) - res.Body.Close() - return nil, fmt.Errorf("HTTP %s: %s", res.Status, body) - } - - return res.Body, nil -} - // SetDevStoreKeyValue set a statestore key/value. It's only meant for development. // The schema (including when keys are re-read) is not a stable interface. func (lc *Client) SetDevStoreKeyValue(ctx context.Context, key, value string) error { @@ -674,6 +613,9 @@ func (lc *Client) SetDevStoreKeyValue(ctx context.Context, key, value string) er // the provided duration. If the duration is in the past, the debug logging // is disabled. func (lc *Client) SetComponentDebugLogging(ctx context.Context, component string, d time.Duration) error { + if !buildfeatures.HasDebug { + return feature.ErrUnavailable + } body, err := lc.send(ctx, "POST", fmt.Sprintf("/localapi/v0/component-debug-logging?component=%s&secs=%d", url.QueryEscape(component), int64(d.Seconds())), 200, nil) @@ -813,6 +755,9 @@ func (lc *Client) PushFile(ctx context.Context, target tailcfg.StableNodeID, siz // machine is properly configured to forward IP packets as a subnet router // or exit node. func (lc *Client) CheckIPForwarding(ctx context.Context) error { + if !buildfeatures.HasAdvertiseRoutes { + return nil + } body, err := lc.get200(ctx, "/localapi/v0/check-ip-forwarding") if err != nil { return err @@ -928,6 +873,9 @@ func (lc *Client) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Pref // GetDNSOSConfig returns the system DNS configuration for the current device. // That is, it returns the DNS configuration that the system would use if Tailscale weren't being used. func (lc *Client) GetDNSOSConfig(ctx context.Context) (*apitype.DNSOSConfig, error) { + if !buildfeatures.HasDNS { + return nil, feature.ErrUnavailable + } body, err := lc.get200(ctx, "/localapi/v0/dns-osconfig") if err != nil { return nil, err @@ -943,6 +891,9 @@ func (lc *Client) GetDNSOSConfig(ctx context.Context) (*apitype.DNSOSConfig, err // It returns the raw DNS response bytes and the resolvers that were used to answer the query // (often just one, but can be more if we raced multiple resolvers). func (lc *Client) QueryDNS(ctx context.Context, name string, queryType string) (bytes []byte, resolvers []*dnstype.Resolver, err error) { + if !buildfeatures.HasDNS { + return nil, nil, feature.ErrUnavailable + } body, err := lc.get200(ctx, fmt.Sprintf("/localapi/v0/dns-query?name=%s&type=%s", url.QueryEscape(name), queryType)) if err != nil { return nil, nil, err @@ -973,28 +924,6 @@ func (lc *Client) Logout(ctx context.Context) error { return err } -// SetDNS adds a DNS TXT record for the given domain name, containing -// the provided TXT value. The intended use case is answering -// LetsEncrypt/ACME dns-01 challenges. -// -// The control plane will only permit SetDNS requests with very -// specific names and values. The name should be -// "_acme-challenge." + your node's MagicDNS name. It's expected that -// clients cache the certs from LetsEncrypt (or whichever CA is -// providing them) and only request new ones as needed; the control plane -// rate limits SetDNS requests. -// -// This is a low-level interface; it's expected that most Tailscale -// users use a higher level interface to getting/using TLS -// certificates. -func (lc *Client) SetDNS(ctx context.Context, name, value string) error { - v := url.Values{} - v.Set("name", name) - v.Set("value", value) - _, err := lc.send(ctx, "POST", "/localapi/v0/set-dns?"+v.Encode(), 200, nil) - return err -} - // DialTCP connects to the host's port via Tailscale. // // The host may be a base DNS name (resolved from the netmap inside @@ -1075,117 +1004,6 @@ func (lc *Client) CurrentDERPMap(ctx context.Context) (*tailcfg.DERPMap, error) return &derpMap, nil } -// CertPair returns a cert and private key for the provided DNS domain. -// -// It returns a cached certificate from disk if it's still valid. -// -// Deprecated: use [Client.CertPair]. -func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { - return defaultClient.CertPair(ctx, domain) -} - -// CertPair returns a cert and private key for the provided DNS domain. -// -// It returns a cached certificate from disk if it's still valid. -// -// API maturity: this is considered a stable API. -func (lc *Client) CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { - return lc.CertPairWithValidity(ctx, domain, 0) -} - -// CertPairWithValidity returns a cert and private key for the provided DNS -// domain. -// -// It returns a cached certificate from disk if it's still valid. -// When minValidity is non-zero, the returned certificate will be valid for at -// least the given duration, if permitted by the CA. If the certificate is -// valid, but for less than minValidity, it will be synchronously renewed. -// -// API maturity: this is considered a stable API. -func (lc *Client) CertPairWithValidity(ctx context.Context, domain string, minValidity time.Duration) (certPEM, keyPEM []byte, err error) { - res, err := lc.send(ctx, "GET", fmt.Sprintf("/localapi/v0/cert/%s?type=pair&min_validity=%s", domain, minValidity), 200, nil) - if err != nil { - return nil, nil, err - } - // with ?type=pair, the response PEM is first the one private - // key PEM block, then the cert PEM blocks. - i := mem.Index(mem.B(res), mem.S("--\n--")) - if i == -1 { - return nil, nil, fmt.Errorf("unexpected output: no delimiter") - } - i += len("--\n") - keyPEM, certPEM = res[:i], res[i:] - if mem.Contains(mem.B(certPEM), mem.S(" PRIVATE KEY-----")) { - return nil, nil, fmt.Errorf("unexpected output: key in cert") - } - return certPEM, keyPEM, nil -} - -// GetCertificate fetches a TLS certificate for the TLS ClientHello in hi. -// -// It returns a cached certificate from disk if it's still valid. -// -// It's the right signature to use as the value of -// [tls.Config.GetCertificate]. -// -// Deprecated: use [Client.GetCertificate]. -func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { - return defaultClient.GetCertificate(hi) -} - -// GetCertificate fetches a TLS certificate for the TLS ClientHello in hi. -// -// It returns a cached certificate from disk if it's still valid. -// -// It's the right signature to use as the value of -// [tls.Config.GetCertificate]. -// -// API maturity: this is considered a stable API. -func (lc *Client) GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { - if hi == nil || hi.ServerName == "" { - return nil, errors.New("no SNI ServerName") - } - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - - name := hi.ServerName - if !strings.Contains(name, ".") { - if v, ok := lc.ExpandSNIName(ctx, name); ok { - name = v - } - } - certPEM, keyPEM, err := lc.CertPair(ctx, name) - if err != nil { - return nil, err - } - cert, err := tls.X509KeyPair(certPEM, keyPEM) - if err != nil { - return nil, err - } - return &cert, nil -} - -// ExpandSNIName expands bare label name into the most likely actual TLS cert name. -// -// Deprecated: use [Client.ExpandSNIName]. -func ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { - return defaultClient.ExpandSNIName(ctx, name) -} - -// ExpandSNIName expands bare label name into the most likely actual TLS cert name. -func (lc *Client) ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { - st, err := lc.StatusWithoutPeers(ctx) - if err != nil { - return "", false - } - for _, d := range st.CertDomains { - if len(d) > len(name)+1 && strings.HasPrefix(d, name) && d[len(name)] == '.' { - return d, true - } - } - return "", false -} - // PingOpts contains options for the ping request. // // The zero value is valid, which means to use defaults. @@ -1219,197 +1037,6 @@ func (lc *Client) Ping(ctx context.Context, ip netip.Addr, pingtype tailcfg.Ping return lc.PingWithOpts(ctx, ip, pingtype, PingOpts{}) } -// NetworkLockStatus fetches information about the tailnet key authority, if one is configured. -func (lc *Client) NetworkLockStatus(ctx context.Context) (*ipnstate.NetworkLockStatus, error) { - body, err := lc.send(ctx, "GET", "/localapi/v0/tka/status", 200, nil) - if err != nil { - return nil, fmt.Errorf("error: %w", err) - } - return decodeJSON[*ipnstate.NetworkLockStatus](body) -} - -// NetworkLockInit initializes the tailnet key authority. -// -// TODO(tom): Plumb through disablement secrets. -func (lc *Client) NetworkLockInit(ctx context.Context, keys []tka.Key, disablementValues [][]byte, supportDisablement []byte) (*ipnstate.NetworkLockStatus, error) { - var b bytes.Buffer - type initRequest struct { - Keys []tka.Key - DisablementValues [][]byte - SupportDisablement []byte - } - - if err := json.NewEncoder(&b).Encode(initRequest{Keys: keys, DisablementValues: disablementValues, SupportDisablement: supportDisablement}); err != nil { - return nil, err - } - - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/init", 200, &b) - if err != nil { - return nil, fmt.Errorf("error: %w", err) - } - return decodeJSON[*ipnstate.NetworkLockStatus](body) -} - -// NetworkLockWrapPreauthKey wraps a pre-auth key with information to -// enable unattended bringup in the locked tailnet. -func (lc *Client) NetworkLockWrapPreauthKey(ctx context.Context, preauthKey string, tkaKey key.NLPrivate) (string, error) { - encodedPrivate, err := tkaKey.MarshalText() - if err != nil { - return "", err - } - - var b bytes.Buffer - type wrapRequest struct { - TSKey string - TKAKey string // key.NLPrivate.MarshalText - } - if err := json.NewEncoder(&b).Encode(wrapRequest{TSKey: preauthKey, TKAKey: string(encodedPrivate)}); err != nil { - return "", err - } - - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/wrap-preauth-key", 200, &b) - if err != nil { - return "", fmt.Errorf("error: %w", err) - } - return string(body), nil -} - -// NetworkLockModify adds and/or removes key(s) to the tailnet key authority. -func (lc *Client) NetworkLockModify(ctx context.Context, addKeys, removeKeys []tka.Key) error { - var b bytes.Buffer - type modifyRequest struct { - AddKeys []tka.Key - RemoveKeys []tka.Key - } - - if err := json.NewEncoder(&b).Encode(modifyRequest{AddKeys: addKeys, RemoveKeys: removeKeys}); err != nil { - return err - } - - if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/modify", 204, &b); err != nil { - return fmt.Errorf("error: %w", err) - } - return nil -} - -// NetworkLockSign signs the specified node-key and transmits that signature to the control plane. -// rotationPublic, if specified, must be an ed25519 public key. -func (lc *Client) NetworkLockSign(ctx context.Context, nodeKey key.NodePublic, rotationPublic []byte) error { - var b bytes.Buffer - type signRequest struct { - NodeKey key.NodePublic - RotationPublic []byte - } - - if err := json.NewEncoder(&b).Encode(signRequest{NodeKey: nodeKey, RotationPublic: rotationPublic}); err != nil { - return err - } - - if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/sign", 200, &b); err != nil { - return fmt.Errorf("error: %w", err) - } - return nil -} - -// NetworkLockAffectedSigs returns all signatures signed by the specified keyID. -func (lc *Client) NetworkLockAffectedSigs(ctx context.Context, keyID tkatype.KeyID) ([]tkatype.MarshaledSignature, error) { - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/affected-sigs", 200, bytes.NewReader(keyID)) - if err != nil { - return nil, fmt.Errorf("error: %w", err) - } - return decodeJSON[[]tkatype.MarshaledSignature](body) -} - -// NetworkLockLog returns up to maxEntries number of changes to network-lock state. -func (lc *Client) NetworkLockLog(ctx context.Context, maxEntries int) ([]ipnstate.NetworkLockUpdate, error) { - v := url.Values{} - v.Set("limit", fmt.Sprint(maxEntries)) - body, err := lc.send(ctx, "GET", "/localapi/v0/tka/log?"+v.Encode(), 200, nil) - if err != nil { - return nil, fmt.Errorf("error %w: %s", err, body) - } - return decodeJSON[[]ipnstate.NetworkLockUpdate](body) -} - -// NetworkLockForceLocalDisable forcibly shuts down network lock on this node. -func (lc *Client) NetworkLockForceLocalDisable(ctx context.Context) error { - // This endpoint expects an empty JSON stanza as the payload. - var b bytes.Buffer - if err := json.NewEncoder(&b).Encode(struct{}{}); err != nil { - return err - } - - if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/force-local-disable", 200, &b); err != nil { - return fmt.Errorf("error: %w", err) - } - return nil -} - -// NetworkLockVerifySigningDeeplink verifies the network lock deeplink contained -// in url and returns information extracted from it. -func (lc *Client) NetworkLockVerifySigningDeeplink(ctx context.Context, url string) (*tka.DeeplinkValidationResult, error) { - vr := struct { - URL string - }{url} - - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/verify-deeplink", 200, jsonBody(vr)) - if err != nil { - return nil, fmt.Errorf("sending verify-deeplink: %w", err) - } - - return decodeJSON[*tka.DeeplinkValidationResult](body) -} - -// NetworkLockGenRecoveryAUM generates an AUM for recovering from a tailnet-lock key compromise. -func (lc *Client) NetworkLockGenRecoveryAUM(ctx context.Context, removeKeys []tkatype.KeyID, forkFrom tka.AUMHash) ([]byte, error) { - vr := struct { - Keys []tkatype.KeyID - ForkFrom string - }{removeKeys, forkFrom.String()} - - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/generate-recovery-aum", 200, jsonBody(vr)) - if err != nil { - return nil, fmt.Errorf("sending generate-recovery-aum: %w", err) - } - - return body, nil -} - -// NetworkLockCosignRecoveryAUM co-signs a recovery AUM using the node's tailnet lock key. -func (lc *Client) NetworkLockCosignRecoveryAUM(ctx context.Context, aum tka.AUM) ([]byte, error) { - r := bytes.NewReader(aum.Serialize()) - body, err := lc.send(ctx, "POST", "/localapi/v0/tka/cosign-recovery-aum", 200, r) - if err != nil { - return nil, fmt.Errorf("sending cosign-recovery-aum: %w", err) - } - - return body, nil -} - -// NetworkLockSubmitRecoveryAUM submits a recovery AUM to the control plane. -func (lc *Client) NetworkLockSubmitRecoveryAUM(ctx context.Context, aum tka.AUM) error { - r := bytes.NewReader(aum.Serialize()) - _, err := lc.send(ctx, "POST", "/localapi/v0/tka/submit-recovery-aum", 200, r) - if err != nil { - return fmt.Errorf("sending cosign-recovery-aum: %w", err) - } - return nil -} - -// SetServeConfig sets or replaces the serving settings. -// If config is nil, settings are cleared and serving is disabled. -func (lc *Client) SetServeConfig(ctx context.Context, config *ipn.ServeConfig) error { - h := make(http.Header) - if config != nil { - h.Set("If-Match", config.ETag) - } - _, _, err := lc.sendWithHeaders(ctx, "POST", "/localapi/v0/serve-config", 200, jsonBody(config), h) - if err != nil { - return fmt.Errorf("sending serve config: %w", err) - } - return nil -} - // DisconnectControl shuts down all connections to control, thus making control consider this node inactive. This can be // run on HA subnet router or app connector replicas before shutting them down to ensure peers get told to switch over // to another replica whilst there is still some grace period for the existing connections to terminate. @@ -1421,40 +1048,6 @@ func (lc *Client) DisconnectControl(ctx context.Context) error { return nil } -// NetworkLockDisable shuts down network-lock across the tailnet. -func (lc *Client) NetworkLockDisable(ctx context.Context, secret []byte) error { - if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/disable", 200, bytes.NewReader(secret)); err != nil { - return fmt.Errorf("error: %w", err) - } - return nil -} - -// GetServeConfig return the current serve config. -// -// If the serve config is empty, it returns (nil, nil). -func (lc *Client) GetServeConfig(ctx context.Context) (*ipn.ServeConfig, error) { - body, h, err := lc.sendWithHeaders(ctx, "GET", "/localapi/v0/serve-config", 200, nil, nil) - if err != nil { - return nil, fmt.Errorf("getting serve config: %w", err) - } - sc, err := getServeConfigFromJSON(body) - if err != nil { - return nil, err - } - if sc == nil { - sc = new(ipn.ServeConfig) - } - sc.ETag = h.Get("Etag") - return sc, nil -} - -func getServeConfigFromJSON(body []byte) (sc *ipn.ServeConfig, err error) { - if err := json.Unmarshal(body, &sc); err != nil { - return nil, err - } - return sc, nil -} - // tailscaledConnectHint gives a little thing about why tailscaled (or // platform equivalent) is not answering localapi connections. // @@ -1610,6 +1203,16 @@ func (lc *Client) DebugSetExpireIn(ctx context.Context, d time.Duration) error { return err } +// DebugPeerRelaySessions returns debug information about the current peer +// relay sessions running through this node. +func (lc *Client) DebugPeerRelaySessions(ctx context.Context) (*status.ServerStatus, error) { + body, err := lc.send(ctx, "GET", "/localapi/v0/debug-peer-relay-sessions", 200, nil) + if err != nil { + return nil, fmt.Errorf("error %w: %s", err, body) + } + return decodeJSON[*status.ServerStatus](body) +} + // StreamDebugCapture streams a pcap-formatted packet capture. // // The provided context does not determine the lifetime of the @@ -1783,3 +1386,17 @@ func (lc *Client) SuggestExitNode(ctx context.Context) (apitype.ExitNodeSuggesti } return decodeJSON[apitype.ExitNodeSuggestionResponse](body) } + +// ShutdownTailscaled requests a graceful shutdown of tailscaled. +func (lc *Client) ShutdownTailscaled(ctx context.Context) error { + _, err := lc.send(ctx, "POST", "/localapi/v0/shutdown", 200, nil) + return err +} + +func (lc *Client) GetAppConnectorRouteInfo(ctx context.Context) (appctype.RouteInfo, error) { + body, err := lc.get200(ctx, "/localapi/v0/appc-route-info") + if err != nil { + return appctype.RouteInfo{}, err + } + return decodeJSON[appctype.RouteInfo](body) +} diff --git a/client/local/serve.go b/client/local/serve.go new file mode 100644 index 0000000000000..51d15e7e5439b --- /dev/null +++ b/client/local/serve.go @@ -0,0 +1,55 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_serve + +package local + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "tailscale.com/ipn" +) + +// GetServeConfig return the current serve config. +// +// If the serve config is empty, it returns (nil, nil). +func (lc *Client) GetServeConfig(ctx context.Context) (*ipn.ServeConfig, error) { + body, h, err := lc.sendWithHeaders(ctx, "GET", "/localapi/v0/serve-config", 200, nil, nil) + if err != nil { + return nil, fmt.Errorf("getting serve config: %w", err) + } + sc, err := getServeConfigFromJSON(body) + if err != nil { + return nil, err + } + if sc == nil { + sc = new(ipn.ServeConfig) + } + sc.ETag = h.Get("Etag") + return sc, nil +} + +func getServeConfigFromJSON(body []byte) (sc *ipn.ServeConfig, err error) { + if err := json.Unmarshal(body, &sc); err != nil { + return nil, err + } + return sc, nil +} + +// SetServeConfig sets or replaces the serving settings. +// If config is nil, settings are cleared and serving is disabled. +func (lc *Client) SetServeConfig(ctx context.Context, config *ipn.ServeConfig) error { + h := make(http.Header) + if config != nil { + h.Set("If-Match", config.ETag) + } + _, _, err := lc.sendWithHeaders(ctx, "POST", "/localapi/v0/serve-config", 200, jsonBody(config), h) + if err != nil { + return fmt.Errorf("sending serve config: %w", err) + } + return nil +} diff --git a/client/local/tailnetlock.go b/client/local/tailnetlock.go new file mode 100644 index 0000000000000..9d37d2f3553d5 --- /dev/null +++ b/client/local/tailnetlock.go @@ -0,0 +1,204 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_tailnetlock + +package local + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/url" + + "tailscale.com/ipn/ipnstate" + "tailscale.com/tka" + "tailscale.com/types/key" + "tailscale.com/types/tkatype" +) + +// NetworkLockStatus fetches information about the tailnet key authority, if one is configured. +func (lc *Client) NetworkLockStatus(ctx context.Context) (*ipnstate.NetworkLockStatus, error) { + body, err := lc.send(ctx, "GET", "/localapi/v0/tka/status", 200, nil) + if err != nil { + return nil, fmt.Errorf("error: %w", err) + } + return decodeJSON[*ipnstate.NetworkLockStatus](body) +} + +// NetworkLockInit initializes the tailnet key authority. +// +// TODO(tom): Plumb through disablement secrets. +func (lc *Client) NetworkLockInit(ctx context.Context, keys []tka.Key, disablementValues [][]byte, supportDisablement []byte) (*ipnstate.NetworkLockStatus, error) { + var b bytes.Buffer + type initRequest struct { + Keys []tka.Key + DisablementValues [][]byte + SupportDisablement []byte + } + + if err := json.NewEncoder(&b).Encode(initRequest{Keys: keys, DisablementValues: disablementValues, SupportDisablement: supportDisablement}); err != nil { + return nil, err + } + + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/init", 200, &b) + if err != nil { + return nil, fmt.Errorf("error: %w", err) + } + return decodeJSON[*ipnstate.NetworkLockStatus](body) +} + +// NetworkLockWrapPreauthKey wraps a pre-auth key with information to +// enable unattended bringup in the locked tailnet. +func (lc *Client) NetworkLockWrapPreauthKey(ctx context.Context, preauthKey string, tkaKey key.NLPrivate) (string, error) { + encodedPrivate, err := tkaKey.MarshalText() + if err != nil { + return "", err + } + + var b bytes.Buffer + type wrapRequest struct { + TSKey string + TKAKey string // key.NLPrivate.MarshalText + } + if err := json.NewEncoder(&b).Encode(wrapRequest{TSKey: preauthKey, TKAKey: string(encodedPrivate)}); err != nil { + return "", err + } + + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/wrap-preauth-key", 200, &b) + if err != nil { + return "", fmt.Errorf("error: %w", err) + } + return string(body), nil +} + +// NetworkLockModify adds and/or removes key(s) to the tailnet key authority. +func (lc *Client) NetworkLockModify(ctx context.Context, addKeys, removeKeys []tka.Key) error { + var b bytes.Buffer + type modifyRequest struct { + AddKeys []tka.Key + RemoveKeys []tka.Key + } + + if err := json.NewEncoder(&b).Encode(modifyRequest{AddKeys: addKeys, RemoveKeys: removeKeys}); err != nil { + return err + } + + if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/modify", 204, &b); err != nil { + return fmt.Errorf("error: %w", err) + } + return nil +} + +// NetworkLockSign signs the specified node-key and transmits that signature to the control plane. +// rotationPublic, if specified, must be an ed25519 public key. +func (lc *Client) NetworkLockSign(ctx context.Context, nodeKey key.NodePublic, rotationPublic []byte) error { + var b bytes.Buffer + type signRequest struct { + NodeKey key.NodePublic + RotationPublic []byte + } + + if err := json.NewEncoder(&b).Encode(signRequest{NodeKey: nodeKey, RotationPublic: rotationPublic}); err != nil { + return err + } + + if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/sign", 200, &b); err != nil { + return fmt.Errorf("error: %w", err) + } + return nil +} + +// NetworkLockAffectedSigs returns all signatures signed by the specified keyID. +func (lc *Client) NetworkLockAffectedSigs(ctx context.Context, keyID tkatype.KeyID) ([]tkatype.MarshaledSignature, error) { + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/affected-sigs", 200, bytes.NewReader(keyID)) + if err != nil { + return nil, fmt.Errorf("error: %w", err) + } + return decodeJSON[[]tkatype.MarshaledSignature](body) +} + +// NetworkLockLog returns up to maxEntries number of changes to network-lock state. +func (lc *Client) NetworkLockLog(ctx context.Context, maxEntries int) ([]ipnstate.NetworkLockUpdate, error) { + v := url.Values{} + v.Set("limit", fmt.Sprint(maxEntries)) + body, err := lc.send(ctx, "GET", "/localapi/v0/tka/log?"+v.Encode(), 200, nil) + if err != nil { + return nil, fmt.Errorf("error %w: %s", err, body) + } + return decodeJSON[[]ipnstate.NetworkLockUpdate](body) +} + +// NetworkLockForceLocalDisable forcibly shuts down network lock on this node. +func (lc *Client) NetworkLockForceLocalDisable(ctx context.Context) error { + // This endpoint expects an empty JSON stanza as the payload. + var b bytes.Buffer + if err := json.NewEncoder(&b).Encode(struct{}{}); err != nil { + return err + } + + if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/force-local-disable", 200, &b); err != nil { + return fmt.Errorf("error: %w", err) + } + return nil +} + +// NetworkLockVerifySigningDeeplink verifies the network lock deeplink contained +// in url and returns information extracted from it. +func (lc *Client) NetworkLockVerifySigningDeeplink(ctx context.Context, url string) (*tka.DeeplinkValidationResult, error) { + vr := struct { + URL string + }{url} + + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/verify-deeplink", 200, jsonBody(vr)) + if err != nil { + return nil, fmt.Errorf("sending verify-deeplink: %w", err) + } + + return decodeJSON[*tka.DeeplinkValidationResult](body) +} + +// NetworkLockGenRecoveryAUM generates an AUM for recovering from a tailnet-lock key compromise. +func (lc *Client) NetworkLockGenRecoveryAUM(ctx context.Context, removeKeys []tkatype.KeyID, forkFrom tka.AUMHash) ([]byte, error) { + vr := struct { + Keys []tkatype.KeyID + ForkFrom string + }{removeKeys, forkFrom.String()} + + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/generate-recovery-aum", 200, jsonBody(vr)) + if err != nil { + return nil, fmt.Errorf("sending generate-recovery-aum: %w", err) + } + + return body, nil +} + +// NetworkLockCosignRecoveryAUM co-signs a recovery AUM using the node's tailnet lock key. +func (lc *Client) NetworkLockCosignRecoveryAUM(ctx context.Context, aum tka.AUM) ([]byte, error) { + r := bytes.NewReader(aum.Serialize()) + body, err := lc.send(ctx, "POST", "/localapi/v0/tka/cosign-recovery-aum", 200, r) + if err != nil { + return nil, fmt.Errorf("sending cosign-recovery-aum: %w", err) + } + + return body, nil +} + +// NetworkLockSubmitRecoveryAUM submits a recovery AUM to the control plane. +func (lc *Client) NetworkLockSubmitRecoveryAUM(ctx context.Context, aum tka.AUM) error { + r := bytes.NewReader(aum.Serialize()) + _, err := lc.send(ctx, "POST", "/localapi/v0/tka/submit-recovery-aum", 200, r) + if err != nil { + return fmt.Errorf("sending cosign-recovery-aum: %w", err) + } + return nil +} + +// NetworkLockDisable shuts down network-lock across the tailnet. +func (lc *Client) NetworkLockDisable(ctx context.Context, secret []byte) error { + if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/disable", 200, bytes.NewReader(secret)); err != nil { + return fmt.Errorf("error: %w", err) + } + return nil +} diff --git a/client/systray/systray.go b/client/systray/systray.go index bd7c1597204ed..4ac08058854e4 100644 --- a/client/systray/systray.go +++ b/client/systray/systray.go @@ -339,9 +339,9 @@ func profileTitle(profile ipn.LoginProfile) string { if profile.NetworkProfile.DomainName != "" { if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { // windows and mac don't support multi-line menu - title += " (" + profile.NetworkProfile.DomainName + ")" + title += " (" + profile.NetworkProfile.DisplayNameOrDefault() + ")" } else { - title += "\n" + profile.NetworkProfile.DomainName + title += "\n" + profile.NetworkProfile.DisplayNameOrDefault() } } return title @@ -540,9 +540,9 @@ func (menu *Menu) copyTailscaleIP(device *ipnstate.PeerStatus) { err := clipboard.WriteAll(ip) if err != nil { log.Printf("clipboard error: %v", err) + } else { + menu.sendNotification(fmt.Sprintf("Copied Address for %v", name), ip) } - - menu.sendNotification(fmt.Sprintf("Copied Address for %v", name), ip) } // sendNotification sends a desktop notification with the given title and content. diff --git a/client/tailscale/cert.go b/client/tailscale/cert.go new file mode 100644 index 0000000000000..4f351ab990984 --- /dev/null +++ b/client/tailscale/cert.go @@ -0,0 +1,34 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !js && !ts_omit_acme + +package tailscale + +import ( + "context" + "crypto/tls" + + "tailscale.com/client/local" +) + +// GetCertificate is an alias for [tailscale.com/client/local.GetCertificate]. +// +// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.GetCertificate]. +func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { + return local.GetCertificate(hi) +} + +// CertPair is an alias for [tailscale.com/client/local.CertPair]. +// +// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.CertPair]. +func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { + return local.CertPair(ctx, domain) +} + +// ExpandSNIName is an alias for [tailscale.com/client/local.ExpandSNIName]. +// +// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.ExpandSNIName]. +func ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { + return local.ExpandSNIName(ctx, name) +} diff --git a/client/tailscale/localclient_aliases.go b/client/tailscale/localclient_aliases.go index 2b53906b71ae4..e3492e841b1c9 100644 --- a/client/tailscale/localclient_aliases.go +++ b/client/tailscale/localclient_aliases.go @@ -5,7 +5,6 @@ package tailscale import ( "context" - "crypto/tls" "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" @@ -32,23 +31,11 @@ type IPNBusWatcher = local.IPNBusWatcher // Deprecated: import [tailscale.com/client/local] instead. type BugReportOpts = local.BugReportOpts -// DebugPortmapOpts is an alias for [tailscale.com/client/local.DebugPortmapOpts]. -// -// Deprecated: import [tailscale.com/client/local] instead. -type DebugPortmapOpts = local.DebugPortmapOpts - // PingOpts is an alias for [tailscale.com/client/local.PingOpts]. // // Deprecated: import [tailscale.com/client/local] instead. type PingOpts = local.PingOpts -// GetCertificate is an alias for [tailscale.com/client/local.GetCertificate]. -// -// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.GetCertificate]. -func GetCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) { - return local.GetCertificate(hi) -} - // SetVersionMismatchHandler is an alias for [tailscale.com/client/local.SetVersionMismatchHandler]. // // Deprecated: import [tailscale.com/client/local] instead. @@ -90,17 +77,3 @@ func Status(ctx context.Context) (*ipnstate.Status, error) { func StatusWithoutPeers(ctx context.Context) (*ipnstate.Status, error) { return local.StatusWithoutPeers(ctx) } - -// CertPair is an alias for [tailscale.com/client/local.CertPair]. -// -// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.CertPair]. -func CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { - return local.CertPair(ctx, domain) -} - -// ExpandSNIName is an alias for [tailscale.com/client/local.ExpandSNIName]. -// -// Deprecated: import [tailscale.com/client/local] instead and use [local.Client.ExpandSNIName]. -func ExpandSNIName(ctx context.Context, name string) (fqdn string, ok bool) { - return local.ExpandSNIName(ctx, name) -} diff --git a/client/web/web.go b/client/web/web.go index 71a015daba465..dbd3d5df0be86 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -24,9 +24,10 @@ import ( "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" - "tailscale.com/clientupdate" "tailscale.com/envknob" "tailscale.com/envknob/featureknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" @@ -496,6 +497,10 @@ func (s *Server) authorizeRequest(w http.ResponseWriter, r *http.Request) (ok bo // Client using system-specific auth. switch distro.Get() { case distro.Synology: + if !buildfeatures.HasSynology { + // Synology support not built in. + return false + } authorized, _ := authorizeSynology(r) return authorized case distro.QNAP: @@ -978,9 +983,18 @@ func (s *Server) serveGetNodeData(w http.ResponseWriter, r *http.Request) { data.ClientVersion = cv } - if st.CurrentTailnet != nil { - data.TailnetName = st.CurrentTailnet.MagicDNSSuffix - data.DomainName = st.CurrentTailnet.Name + profile, _, err := s.lc.ProfileStatus(r.Context()) + if err != nil { + s.logf("error fetching profiles: %v", err) + // If for some reason we can't fetch profiles, + // continue to use st.CurrentTailnet if set. + if st.CurrentTailnet != nil { + data.TailnetName = st.CurrentTailnet.MagicDNSSuffix + data.DomainName = st.CurrentTailnet.Name + } + } else { + data.TailnetName = profile.NetworkProfile.MagicDNSName + data.DomainName = profile.NetworkProfile.DisplayNameOrDefault() } if st.Self.Tags != nil { data.Tags = st.Self.Tags.AsSlice() @@ -1040,7 +1054,7 @@ func availableFeatures() map[string]bool { "advertise-routes": true, // available on all platforms "use-exit-node": featureknob.CanUseExitNode() == nil, "ssh": featureknob.CanRunTailscaleSSH() == nil, - "auto-update": version.IsUnstableBuild() && clientupdate.CanAutoUpdate(), + "auto-update": version.IsUnstableBuild() && feature.CanAutoUpdate(), } return features } diff --git a/clientupdate/clientupdate.go b/clientupdate/clientupdate.go index ffd3fb03bb80d..84b289615f911 100644 --- a/clientupdate/clientupdate.go +++ b/clientupdate/clientupdate.go @@ -27,6 +27,7 @@ import ( "strconv" "strings" + "tailscale.com/feature" "tailscale.com/hostinfo" "tailscale.com/types/lazy" "tailscale.com/types/logger" @@ -252,9 +253,13 @@ func (up *Updater) getUpdateFunction() (fn updateFunction, canAutoUpdate bool) { var canAutoUpdateCache lazy.SyncValue[bool] -// CanAutoUpdate reports whether auto-updating via the clientupdate package +func init() { + feature.HookCanAutoUpdate.Set(canAutoUpdate) +} + +// canAutoUpdate reports whether auto-updating via the clientupdate package // is supported for the current os/distro. -func CanAutoUpdate() bool { return canAutoUpdateCache.Get(canAutoUpdateUncached) } +func canAutoUpdate() bool { return canAutoUpdateCache.Get(canAutoUpdateUncached) } func canAutoUpdateUncached() bool { if version.IsMacSysExt() { diff --git a/clientupdate/clientupdate_windows.go b/clientupdate/clientupdate_windows.go index b79d447ad4d30..5faeda6dd70e3 100644 --- a/clientupdate/clientupdate_windows.go +++ b/clientupdate/clientupdate_windows.go @@ -30,11 +30,6 @@ const ( // tailscale.exe process from running before the msiexec process runs and // tries to overwrite ourselves. winMSIEnv = "TS_UPDATE_WIN_MSI" - // winExePathEnv is the environment variable that is set along with - // winMSIEnv and carries the full path of the calling tailscale.exe binary. - // It is used to re-launch the GUI process (tailscale-ipn.exe) after - // install is complete. - winExePathEnv = "TS_UPDATE_WIN_EXE_PATH" // winVersionEnv is the environment variable that is set along with // winMSIEnv and carries the version of tailscale that is being installed. // It is used for logging purposes. @@ -78,6 +73,17 @@ func verifyAuthenticode(path string) error { return authenticode.Verify(path, certSubjectTailscale) } +func isTSGUIPresent() bool { + us, err := os.Executable() + if err != nil { + return false + } + + tsgui := filepath.Join(filepath.Dir(us), "tsgui.dll") + _, err = os.Stat(tsgui) + return err == nil +} + func (up *Updater) updateWindows() error { if msi := os.Getenv(winMSIEnv); msi != "" { // stdout/stderr from this part of the install could be lost since the @@ -131,7 +137,15 @@ you can run the command prompt as Administrator one of these ways: return err } up.cleanupOldDownloads(filepath.Join(msiDir, "*.msi")) - pkgsPath := fmt.Sprintf("%s/tailscale-setup-%s-%s.msi", up.Track, ver, arch) + + qualifiers := []string{ver, arch} + // TODO(aaron): Temporary hack so autoupdate still works on winui builds; + // remove when we enable winui by default on the unstable track. + if isTSGUIPresent() { + qualifiers = append(qualifiers, "winui") + } + + pkgsPath := fmt.Sprintf("%s/tailscale-setup-%s.msi", up.Track, strings.Join(qualifiers, "-")) msiTarget := filepath.Join(msiDir, path.Base(pkgsPath)) if err := up.downloadURLToFile(pkgsPath, msiTarget); err != nil { return err @@ -145,7 +159,7 @@ you can run the command prompt as Administrator one of these ways: up.Logf("making tailscale.exe copy to switch to...") up.cleanupOldDownloads(filepath.Join(os.TempDir(), updaterPrefix+"-*.exe")) - selfOrig, selfCopy, err := makeSelfCopy() + _, selfCopy, err := makeSelfCopy() if err != nil { return err } @@ -153,7 +167,7 @@ you can run the command prompt as Administrator one of these ways: up.Logf("running tailscale.exe copy for final install...") cmd := exec.Command(selfCopy, "update") - cmd.Env = append(os.Environ(), winMSIEnv+"="+msiTarget, winExePathEnv+"="+selfOrig, winVersionEnv+"="+ver) + cmd.Env = append(os.Environ(), winMSIEnv+"="+msiTarget, winVersionEnv+"="+ver) cmd.Stdout = up.Stderr cmd.Stderr = up.Stderr cmd.Stdin = os.Stdin @@ -189,7 +203,7 @@ func (up *Updater) installMSI(msi string) error { case windows.ERROR_SUCCESS_REBOOT_REQUIRED: // In most cases, updating Tailscale should not require a reboot. // If it does, it might be because we failed to close the GUI - // and the installer couldn't replace tailscale-ipn.exe. + // and the installer couldn't replace its executable. // The old GUI will continue to run until the next reboot. // Not ideal, but also not a retryable error. up.Logf("[unexpected] reboot required") diff --git a/clientupdate/distsign/distsign.go b/clientupdate/distsign/distsign.go index eba4b9267b119..270ee4c1f9ace 100644 --- a/clientupdate/distsign/distsign.go +++ b/clientupdate/distsign/distsign.go @@ -55,7 +55,7 @@ import ( "github.com/hdevalence/ed25519consensus" "golang.org/x/crypto/blake2s" - "tailscale.com/net/tshttpproxy" + "tailscale.com/feature" "tailscale.com/types/logger" "tailscale.com/util/httpm" "tailscale.com/util/must" @@ -330,7 +330,7 @@ func fetch(url string, limit int64) ([]byte, error) { // limit bytes. On success, the returned value is a BLAKE2s hash of the file. func (c *Client) download(ctx context.Context, url, dst string, limit int64) ([]byte, int64, error) { tr := http.DefaultTransport.(*http.Transport).Clone() - tr.Proxy = tshttpproxy.ProxyFromEnvironment + tr.Proxy = feature.HookProxyFromEnvironment.GetOrNil() defer tr.CloseIdleConnections() hc := &http.Client{Transport: tr} diff --git a/cmd/cloner/cloner.go b/cmd/cloner/cloner.go index 15a808141e626..544d00518e113 100644 --- a/cmd/cloner/cloner.go +++ b/cmd/cloner/cloner.go @@ -121,7 +121,12 @@ func gen(buf *bytes.Buffer, it *codegen.ImportTracker, typ *types.Named) { continue } if !hasBasicUnderlying(ft) { - writef("dst.%s = *src.%s.Clone()", fname, fname) + // don't dereference if the underlying type is an interface + if _, isInterface := ft.Underlying().(*types.Interface); isInterface { + writef("if src.%s != nil { dst.%s = src.%s.Clone() }", fname, fname, fname) + } else { + writef("dst.%s = *src.%s.Clone()", fname, fname) + } continue } } diff --git a/cmd/cloner/cloner_test.go b/cmd/cloner/cloner_test.go index cf1063714afda..3556c14bc109e 100644 --- a/cmd/cloner/cloner_test.go +++ b/cmd/cloner/cloner_test.go @@ -59,3 +59,52 @@ func TestSliceContainer(t *testing.T) { }) } } + +func TestInterfaceContainer(t *testing.T) { + examples := []struct { + name string + in *clonerex.InterfaceContainer + }{ + { + name: "nil", + in: nil, + }, + { + name: "zero", + in: &clonerex.InterfaceContainer{}, + }, + { + name: "with_interface", + in: &clonerex.InterfaceContainer{ + Interface: &clonerex.CloneableImpl{Value: 42}, + }, + }, + { + name: "with_nil_interface", + in: &clonerex.InterfaceContainer{ + Interface: nil, + }, + }, + } + + for _, ex := range examples { + t.Run(ex.name, func(t *testing.T) { + out := ex.in.Clone() + if !reflect.DeepEqual(ex.in, out) { + t.Errorf("Clone() = %v, want %v", out, ex.in) + } + + // Verify no aliasing: modifying the clone should not affect the original + if ex.in != nil && ex.in.Interface != nil { + if impl, ok := out.Interface.(*clonerex.CloneableImpl); ok { + impl.Value = 999 + if origImpl, ok := ex.in.Interface.(*clonerex.CloneableImpl); ok { + if origImpl.Value == 999 { + t.Errorf("Clone() aliased memory with original") + } + } + } + } + }) + } +} diff --git a/cmd/cloner/clonerex/clonerex.go b/cmd/cloner/clonerex/clonerex.go index 96bf8a0bd6e9d..6463f91442a32 100644 --- a/cmd/cloner/clonerex/clonerex.go +++ b/cmd/cloner/clonerex/clonerex.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type SliceContainer +//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type SliceContainer,InterfaceContainer // Package clonerex is an example package for the cloner tool. package clonerex @@ -9,3 +9,26 @@ package clonerex type SliceContainer struct { Slice []*int } + +// Cloneable is an interface with a Clone method. +type Cloneable interface { + Clone() Cloneable +} + +// CloneableImpl is a concrete type that implements Cloneable. +type CloneableImpl struct { + Value int +} + +func (c *CloneableImpl) Clone() Cloneable { + if c == nil { + return nil + } + return &CloneableImpl{Value: c.Value} +} + +// InterfaceContainer has a pointer to an interface field, which tests +// the special handling for interface types in the cloner. +type InterfaceContainer struct { + Interface Cloneable +} diff --git a/cmd/cloner/clonerex/clonerex_clone.go b/cmd/cloner/clonerex/clonerex_clone.go index e334a4e3a1bf4..533d7e723d3ea 100644 --- a/cmd/cloner/clonerex/clonerex_clone.go +++ b/cmd/cloner/clonerex/clonerex_clone.go @@ -35,9 +35,28 @@ var _SliceContainerCloneNeedsRegeneration = SliceContainer(struct { Slice []*int }{}) +// Clone makes a deep copy of InterfaceContainer. +// The result aliases no memory with the original. +func (src *InterfaceContainer) Clone() *InterfaceContainer { + if src == nil { + return nil + } + dst := new(InterfaceContainer) + *dst = *src + if src.Interface != nil { + dst.Interface = src.Interface.Clone() + } + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _InterfaceContainerCloneNeedsRegeneration = InterfaceContainer(struct { + Interface Cloneable +}{}) + // Clone duplicates src into dst and reports whether it succeeded. // To succeed, must be of types <*T, *T> or <*T, **T>, -// where T is one of SliceContainer. +// where T is one of SliceContainer,InterfaceContainer. func Clone(dst, src any) bool { switch src := src.(type) { case *SliceContainer: @@ -49,6 +68,15 @@ func Clone(dst, src any) bool { *dst = src.Clone() return true } + case *InterfaceContainer: + switch dst := dst.(type) { + case *InterfaceContainer: + *dst = *src.Clone() + return true + case **InterfaceContainer: + *dst = src.Clone() + return true + } } return false } diff --git a/cmd/containerboot/egressservices.go b/cmd/containerboot/egressservices.go index 64ca0a13a4ed7..fe835a69e0b82 100644 --- a/cmd/containerboot/egressservices.go +++ b/cmd/containerboot/egressservices.go @@ -570,7 +570,7 @@ func ensureRulesAdded(rulesPerSvc map[string][]rule, nfr linuxfw.NetfilterRunner } // ensureRulesDeleted ensures that the given rules are deleted from the firewall -// configuration. For any rules that do not exist, calling this funcion is a +// configuration. For any rules that do not exist, calling this function is a // no-op. func ensureRulesDeleted(rulesPerSvc map[string][]rule, nfr linuxfw.NetfilterRunner) error { for svc, rules := range rulesPerSvc { diff --git a/cmd/containerboot/kube.go b/cmd/containerboot/kube.go index 4873ae13f753a..e566fa483447c 100644 --- a/cmd/containerboot/kube.go +++ b/cmd/containerboot/kube.go @@ -23,9 +23,9 @@ import ( "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" "tailscale.com/kube/kubetypes" - "tailscale.com/logtail/backoff" "tailscale.com/tailcfg" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/set" ) diff --git a/cmd/derper/ace.go b/cmd/derper/ace.go new file mode 100644 index 0000000000000..56fb68c336cd3 --- /dev/null +++ b/cmd/derper/ace.go @@ -0,0 +1,77 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// TODO: docs about all this + +package main + +import ( + "errors" + "fmt" + "net" + "net/http" + "strings" + + "tailscale.com/derp/derpserver" + "tailscale.com/net/connectproxy" +) + +// serveConnect handles a CONNECT request for ACE support. +func serveConnect(s *derpserver.Server, w http.ResponseWriter, r *http.Request) { + if !*flagACEEnabled { + http.Error(w, "CONNECT not enabled", http.StatusForbidden) + return + } + if r.TLS == nil { + // This should already be enforced by the caller of serveConnect, but + // double check. + http.Error(w, "CONNECT requires TLS", http.StatusForbidden) + return + } + + ch := &connectproxy.Handler{ + Check: func(hostPort string) error { + host, port, err := net.SplitHostPort(hostPort) + if err != nil { + return err + } + if port != "443" && port != "80" { + // There are only two types of CONNECT requests the client makes + // via ACE: requests for /key (port 443) and requests to upgrade + // to the bidirectional ts2021 Noise protocol. + // + // The ts2021 layer can bootstrap over port 80 (http) or port + // 443 (https). + // + // Without ACE, we prefer port 80 to avoid unnecessary double + // encryption. But enough places require TLS+port 443 that we do + // support that double encryption path as a fallback. + // + // But ACE adds its own TLS layer (ACE is always CONNECT over + // https). If we don't permit port 80 here as a target, we'd + // have three layers of encryption (TLS + TLS + Noise) which is + // even more silly than two. + // + // So we permit port 80 such that we can only have two layers of + // encryption, varying by the request type: + // + // 1. TLS from client to ACE proxy (CONNECT) + // 2a. TLS from ACE proxy to https://controlplane.tailscale.com/key (port 443) + // 2b. ts2021 Noise from ACE proxy to http://controlplane.tailscale.com/ts2021 (port 80) + // + // But nothing's stopping the client from doing its ts2021 + // upgrade over https anyway and having three layers of + // encryption. But we can at least permit the client to do a + // "CONNECT controlplane.tailscale.com:80 HTTP/1.1" if it wants. + return fmt.Errorf("only ports 443 and 80 are allowed") + } + // TODO(bradfitz): make policy configurable from flags and/or come + // from local tailscaled nodeAttrs + if !strings.HasSuffix(host, ".tailscale.com") || strings.Contains(host, "derp") { + return errors.New("bad host") + } + return nil + }, + } + ch.ServeHTTP(w, r) +} diff --git a/cmd/derper/cert_test.go b/cmd/derper/cert_test.go index 31fd4ea446949..c8a3229e9f41c 100644 --- a/cmd/derper/cert_test.go +++ b/cmd/derper/cert_test.go @@ -22,8 +22,8 @@ import ( "testing" "time" - "tailscale.com/derp" "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/net/netmon" "tailscale.com/tailcfg" "tailscale.com/types/key" @@ -131,9 +131,9 @@ func TestPinnedCertRawIP(t *testing.T) { } defer ln.Close() - ds := derp.NewServer(key.NewNode(), t.Logf) + ds := derpserver.New(key.NewNode(), t.Logf) - derpHandler := derphttp.Handler(ds) + derpHandler := derpserver.Handler(ds) mux := http.NewServeMux() mux.Handle("/derp", derpHandler) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 8adb2d3382b13..01c278fbd1691 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -2,17 +2,13 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus filippo.io/edwards25519/field from filippo.io/edwards25519 - W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ - W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate - W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus 💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus github.com/coder/websocket from tailscale.com/cmd/derper+ github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket - L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw - W 💣 github.com/dblohm7/wingoes from tailscale.com/util/winutil+ + W 💣 github.com/dblohm7/wingoes from tailscale.com/util/winutil github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ @@ -21,18 +17,11 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ github.com/golang/groupcache/lru from tailscale.com/net/dnscache - L github.com/google/nftables from tailscale.com/util/linuxfw - L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt - L 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ - L github.com/google/nftables/expr from github.com/google/nftables+ - L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ - L github.com/google/nftables/xt from github.com/google/nftables/expr+ github.com/hdevalence/ed25519consensus from tailscale.com/tka L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink - L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ + L 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - L github.com/mdlayher/netlink/nltest from github.com/google/nftables L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket github.com/munnerz/goautoneg from github.com/prometheus/common/expfmt @@ -49,11 +38,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa W 💣 github.com/tailscale/go-winio/internal/socket from github.com/tailscale/go-winio W github.com/tailscale/go-winio/internal/stringbuffer from github.com/tailscale/go-winio/internal/fs W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ - L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw - L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink github.com/tailscale/setec/client/setec from tailscale.com/cmd/derper github.com/tailscale/setec/types/api from github.com/tailscale/setec/client/setec - L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 💣 go4.org/mem from tailscale.com/client/local+ go4.org/netipx from tailscale.com/net/tsaddr @@ -89,15 +75,17 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ tailscale.com from tailscale.com/version 💣 tailscale.com/atomicfile from tailscale.com/cmd/derper+ - tailscale.com/client/local from tailscale.com/derp + tailscale.com/client/local from tailscale.com/derp/derpserver tailscale.com/client/tailscale/apitype from tailscale.com/client/local tailscale.com/derp from tailscale.com/cmd/derper+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/cmd/derper - tailscale.com/disco from tailscale.com/derp + tailscale.com/derp/derpserver from tailscale.com/cmd/derper + tailscale.com/disco from tailscale.com/derp/derpserver tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ - tailscale.com/feature from tailscale.com/tsweb + tailscale.com/feature from tailscale.com/tsweb+ + tailscale.com/feature/buildfeatures from tailscale.com/feature+ tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/hostinfo from tailscale.com/net/netmon+ tailscale.com/ipn from tailscale.com/client/local @@ -105,6 +93,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/metrics from tailscale.com/cmd/derper+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial + tailscale.com/net/connectproxy from tailscale.com/cmd/derper tailscale.com/net/dnscache from tailscale.com/derp/derphttp tailscale.com/net/ktimeout from tailscale.com/cmd/derper tailscale.com/net/netaddr from tailscale.com/ipn+ @@ -116,24 +105,25 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/net/sockstats from tailscale.com/derp/derphttp tailscale.com/net/stun from tailscale.com/net/stunserver tailscale.com/net/stunserver from tailscale.com/cmd/derper - L tailscale.com/net/tcpinfo from tailscale.com/derp + L tailscale.com/net/tcpinfo from tailscale.com/derp/derpserver tailscale.com/net/tlsdial from tailscale.com/derp/derphttp tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/ipn+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/derp/derphttp+ + tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/net/wsconn from tailscale.com/cmd/derper tailscale.com/paths from tailscale.com/client/local 💣 tailscale.com/safesocket from tailscale.com/client/local tailscale.com/syncs from tailscale.com/cmd/derper+ tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tka from tailscale.com/client/local+ - W tailscale.com/tsconst from tailscale.com/net/netmon+ + tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tstime from tailscale.com/derp+ tailscale.com/tstime/mono from tailscale.com/tstime/rate - tailscale.com/tstime/rate from tailscale.com/derp + tailscale.com/tstime/rate from tailscale.com/derp/derpserver tailscale.com/tsweb from tailscale.com/cmd/derper+ tailscale.com/tsweb/promvarz from tailscale.com/cmd/derper tailscale.com/tsweb/varz from tailscale.com/tsweb+ + tailscale.com/types/appctype from tailscale.com/client/local tailscale.com/types/dnstype from tailscale.com/tailcfg+ tailscale.com/types/empty from tailscale.com/ipn tailscale.com/types/ipproto from tailscale.com/tailcfg+ @@ -142,7 +132,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/types/logger from tailscale.com/cmd/derper+ tailscale.com/types/netmap from tailscale.com/ipn tailscale.com/types/opt from tailscale.com/envknob+ - tailscale.com/types/persist from tailscale.com/ipn + tailscale.com/types/persist from tailscale.com/ipn+ tailscale.com/types/preftype from tailscale.com/ipn tailscale.com/types/ptr from tailscale.com/hostinfo+ tailscale.com/types/result from tailscale.com/util/lineiter @@ -150,9 +140,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/ipn+ tailscale.com/util/cibuild from tailscale.com/health - tailscale.com/util/clientmetric from tailscale.com/net/netmon+ + tailscale.com/util/clientmetric from tailscale.com/net/netmon tailscale.com/util/cloudenv from tailscale.com/hostinfo+ - W tailscale.com/util/cmpver from tailscale.com/net/tshttpproxy tailscale.com/util/ctxkey from tailscale.com/tsweb+ 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics @@ -160,12 +149,10 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/eventbus from tailscale.com/net/netmon+ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns tailscale.com/util/mak from tailscale.com/health+ - tailscale.com/util/multierr from tailscale.com/health+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/rands from tailscale.com/tsweb - tailscale.com/util/set from tailscale.com/derp+ + tailscale.com/util/set from tailscale.com/derp/derpserver+ tailscale.com/util/singleflight from tailscale.com/net/dnscache tailscale.com/util/slicesx from tailscale.com/cmd/derper+ tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting @@ -178,7 +165,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/util/vizerror from tailscale.com/tailcfg+ W 💣 tailscale.com/util/winutil from tailscale.com/hostinfo+ W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ - tailscale.com/version from tailscale.com/derp+ + tailscale.com/version from tailscale.com/cmd/derper+ tailscale.com/version/distro from tailscale.com/envknob+ tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap golang.org/x/crypto/acme from golang.org/x/crypto/acme/autocert @@ -186,31 +173,24 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from tailscale.com/tka - golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 - golang.org/x/crypto/chacha20poly1305 from crypto/tls+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+ - golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ - golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ + golang.org/x/crypto/internal/alias from golang.org/x/crypto/nacl/secretbox + golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/nacl/secretbox golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/exp/constraints from tailscale.com/util/winutil+ golang.org/x/exp/maps from tailscale.com/util/syspolicy/setting L golang.org/x/net/bpf from github.com/mdlayher/netlink+ - golang.org/x/net/dns/dnsmessage from net+ - golang.org/x/net/http/httpguts from net/http+ - golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2/hpack from net/http+ - golang.org/x/net/idna from golang.org/x/crypto/acme/autocert+ + golang.org/x/net/dns/dnsmessage from tailscale.com/net/dnscache + golang.org/x/net/idna from golang.org/x/crypto/acme/autocert golang.org/x/net/internal/socks from golang.org/x/net/proxy golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sync/singleflight from github.com/tailscale/setec/client/setec golang.org/x/sys/cpu from golang.org/x/crypto/argon2+ - LD golang.org/x/sys/unix from github.com/google/nftables+ + LD golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/svc from golang.org/x/sys/windows/svc/mgr+ @@ -220,6 +200,22 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from tailscale.com/cmd/derper+ + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna bufio from compress/flate+ bytes from bufio+ cmp from slices+ @@ -345,6 +341,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ + internal/saferio from encoding/asn1 internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ @@ -359,7 +356,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa internal/unsafeheader from internal/reflectlite+ io from bufio+ io/fs from crypto/x509+ - L io/ioutil from github.com/mitchellh/go-ps+ + L io/ioutil from github.com/mitchellh/go-ps iter from maps+ log from expvar+ log/internal from log @@ -380,16 +377,16 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/tsweb net/netip from go4.org/netipx+ - net/textproto from golang.org/x/net/http/httpguts+ + net/textproto from github.com/coder/websocket+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from github.com/coreos/go-iptables/iptables+ + os/exec from golang.zx2c4.com/wireguard/windows/tunnel/winipcfg+ os/signal from tailscale.com/cmd/derper W os/user from tailscale.com/util/winutil path from github.com/prometheus/client_golang/prometheus/internal+ path/filepath from crypto/x509+ reflect from crypto/x509+ - regexp from github.com/coreos/go-iptables/iptables+ + regexp from github.com/prometheus/client_golang/prometheus/internal+ regexp/syntax from regexp runtime from crypto/internal/fips140+ runtime/debug from github.com/prometheus/client_golang/prometheus+ diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index 7ea404beb50af..857d7def3b6ff 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -40,8 +40,7 @@ import ( "github.com/tailscale/setec/client/setec" "golang.org/x/time/rate" "tailscale.com/atomicfile" - "tailscale.com/derp" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/metrics" "tailscale.com/net/ktimeout" "tailscale.com/net/stunserver" @@ -90,7 +89,10 @@ var ( // tcpUserTimeout is intentionally short, so that hung connections are cleaned up promptly. DERPs should be nearby users. tcpUserTimeout = flag.Duration("tcp-user-timeout", 15*time.Second, "TCP user timeout") // tcpWriteTimeout is the timeout for writing to client TCP connections. It does not apply to mesh connections. - tcpWriteTimeout = flag.Duration("tcp-write-timeout", derp.DefaultTCPWiteTimeout, "TCP write timeout; 0 results in no timeout being set on writes") + tcpWriteTimeout = flag.Duration("tcp-write-timeout", derpserver.DefaultTCPWiteTimeout, "TCP write timeout; 0 results in no timeout being set on writes") + + // ACE + flagACEEnabled = flag.Bool("ace", false, "whether to enable embedded ACE server [experimental + in-development as of 2025-09-12; not yet documented]") ) var ( @@ -186,7 +188,7 @@ func main() { serveTLS := tsweb.IsProd443(*addr) || *certMode == "manual" - s := derp.NewServer(cfg.PrivateKey, log.Printf) + s := derpserver.New(cfg.PrivateKey, log.Printf) s.SetVerifyClient(*verifyClients) s.SetTailscaledSocketPath(*socket) s.SetVerifyClientURL(*verifyClientURL) @@ -253,7 +255,7 @@ func main() { mux := http.NewServeMux() if *runDERP { - derpHandler := derphttp.Handler(s) + derpHandler := derpserver.Handler(s) derpHandler = addWebSocketSupport(s, derpHandler) mux.Handle("/derp", derpHandler) } else { @@ -264,8 +266,8 @@ func main() { // These two endpoints are the same. Different versions of the clients // have assumes different paths over time so we support both. - mux.HandleFunc("/derp/probe", derphttp.ProbeHandler) - mux.HandleFunc("/derp/latency-check", derphttp.ProbeHandler) + mux.HandleFunc("/derp/probe", derpserver.ProbeHandler) + mux.HandleFunc("/derp/latency-check", derpserver.ProbeHandler) go refreshBootstrapDNSLoop() mux.HandleFunc("/bootstrap-dns", tsweb.BrowserHeaderHandlerFunc(handleBootstrapDNS)) @@ -277,7 +279,7 @@ func main() { tsweb.AddBrowserHeaders(w) io.WriteString(w, "User-agent: *\nDisallow: /\n") })) - mux.Handle("/generate_204", http.HandlerFunc(derphttp.ServeNoContent)) + mux.Handle("/generate_204", http.HandlerFunc(derpserver.ServeNoContent)) debug := tsweb.Debugger(mux) debug.KV("TLS hostname", *hostname) debug.KV("Mesh key", s.HasMeshKey()) @@ -373,6 +375,11 @@ func main() { tlsRequestVersion.Add(label, 1) tlsActiveVersion.Add(label, 1) defer tlsActiveVersion.Add(label, -1) + + if r.Method == "CONNECT" { + serveConnect(s, w, r) + return + } } mux.ServeHTTP(w, r) @@ -380,7 +387,7 @@ func main() { if *httpPort > -1 { go func() { port80mux := http.NewServeMux() - port80mux.HandleFunc("/generate_204", derphttp.ServeNoContent) + port80mux.HandleFunc("/generate_204", derpserver.ServeNoContent) port80mux.Handle("/", certManager.HTTPHandler(tsweb.Port80Handler{Main: mux})) port80srv := &http.Server{ Addr: net.JoinHostPort(listenHost, fmt.Sprintf("%d", *httpPort)), diff --git a/cmd/derper/derper_test.go b/cmd/derper/derper_test.go index 6dce1fcdfebdd..d27f8cb20144d 100644 --- a/cmd/derper/derper_test.go +++ b/cmd/derper/derper_test.go @@ -11,7 +11,7 @@ import ( "strings" "testing" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/tstest/deptest" ) @@ -78,20 +78,20 @@ func TestNoContent(t *testing.T) { t.Run(tt.name, func(t *testing.T) { req, _ := http.NewRequest("GET", "https://localhost/generate_204", nil) if tt.input != "" { - req.Header.Set(derphttp.NoContentChallengeHeader, tt.input) + req.Header.Set(derpserver.NoContentChallengeHeader, tt.input) } w := httptest.NewRecorder() - derphttp.ServeNoContent(w, req) + derpserver.ServeNoContent(w, req) resp := w.Result() if tt.want == "" { - if h, found := resp.Header[derphttp.NoContentResponseHeader]; found { + if h, found := resp.Header[derpserver.NoContentResponseHeader]; found { t.Errorf("got %+v; expected no response header", h) } return } - if got := resp.Header.Get(derphttp.NoContentResponseHeader); got != tt.want { + if got := resp.Header.Get(derpserver.NoContentResponseHeader); got != tt.want { t.Errorf("got %q; want %q", got, tt.want) } }) diff --git a/cmd/derper/mesh.go b/cmd/derper/mesh.go index cbb2fa59ac030..909b5f2ca18c4 100644 --- a/cmd/derper/mesh.go +++ b/cmd/derper/mesh.go @@ -13,11 +13,12 @@ import ( "tailscale.com/derp" "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/net/netmon" "tailscale.com/types/logger" ) -func startMesh(s *derp.Server) error { +func startMesh(s *derpserver.Server) error { if *meshWith == "" { return nil } @@ -32,7 +33,7 @@ func startMesh(s *derp.Server) error { return nil } -func startMeshWithHost(s *derp.Server, hostTuple string) error { +func startMeshWithHost(s *derpserver.Server, hostTuple string) error { var host string var dialHost string hostParts := strings.Split(hostTuple, "/") diff --git a/cmd/derper/websocket.go b/cmd/derper/websocket.go index 05f40deb816d5..82fd30bed165a 100644 --- a/cmd/derper/websocket.go +++ b/cmd/derper/websocket.go @@ -11,14 +11,14 @@ import ( "strings" "github.com/coder/websocket" - "tailscale.com/derp" + "tailscale.com/derp/derpserver" "tailscale.com/net/wsconn" ) var counterWebSocketAccepts = expvar.NewInt("derp_websocket_accepts") // addWebSocketSupport returns a Handle wrapping base that adds WebSocket server support. -func addWebSocketSupport(s *derp.Server, base http.Handler) http.Handler { +func addWebSocketSupport(s *derpserver.Server, base http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { up := strings.ToLower(r.Header.Get("Upgrade")) diff --git a/cmd/featuretags/featuretags.go b/cmd/featuretags/featuretags.go new file mode 100644 index 0000000000000..8c8a2ceaf54ff --- /dev/null +++ b/cmd/featuretags/featuretags.go @@ -0,0 +1,86 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// The featuretags command helps other build tools select Tailscale's Go build +// tags to use. +package main + +import ( + "flag" + "fmt" + "log" + "maps" + "slices" + "strings" + + "tailscale.com/feature/featuretags" + "tailscale.com/util/set" +) + +var ( + min = flag.Bool("min", false, "remove all features not mentioned in --add") + remove = flag.String("remove", "", "a comma-separated list of features to remove from the build. (without the 'ts_omit_' prefix)") + add = flag.String("add", "", "a comma-separated list of features or tags to add, if --min is used.") + list = flag.Bool("list", false, "if true, list all known features and what they do") +) + +func main() { + flag.Parse() + + features := featuretags.Features + + if *list { + for _, f := range slices.Sorted(maps.Keys(features)) { + fmt.Printf("%20s: %s\n", f, features[f].Desc) + } + return + } + + var keep = map[featuretags.FeatureTag]bool{} + for t := range strings.SplitSeq(*add, ",") { + if t != "" { + for ft := range featuretags.Requires(featuretags.FeatureTag(t)) { + keep[ft] = true + } + } + } + var tags []string + if keep[featuretags.CLI] { + tags = append(tags, "ts_include_cli") + } + if *min { + for _, f := range slices.Sorted(maps.Keys(features)) { + if f == "" { + continue + } + if !keep[f] && f.IsOmittable() { + tags = append(tags, f.OmitTag()) + } + } + } + removeSet := set.Set[featuretags.FeatureTag]{} + for v := range strings.SplitSeq(*remove, ",") { + if v == "" { + continue + } + f := featuretags.FeatureTag(v) + if _, ok := features[f]; !ok { + log.Fatalf("unknown feature %q in --remove", f) + } + removeSet.Add(f) + } + for ft := range removeSet { + set := featuretags.RequiredBy(ft) + for dependent := range set { + if !removeSet.Contains(dependent) { + log.Fatalf("cannot remove %q without also removing %q, which depends on it", ft, dependent) + } + } + tags = append(tags, ft.OmitTag()) + } + slices.Sort(tags) + tags = slices.Compact(tags) + if len(tags) != 0 { + fmt.Println(strings.Join(tags, ",")) + } +} diff --git a/cmd/k8s-nameserver/main.go b/cmd/k8s-nameserver/main.go index ca4b449358083..84e65452d2334 100644 --- a/cmd/k8s-nameserver/main.go +++ b/cmd/k8s-nameserver/main.go @@ -31,6 +31,9 @@ const ( tsNetDomain = "ts.net" // addr is the the address that the UDP and TCP listeners will listen on. addr = ":1053" + // defaultTTL is the default TTL for DNS records in seconds. + // Set to 0 to disable caching. Can be increased when usage patterns are better understood. + defaultTTL = 0 // The following constants are specific to the nameserver configuration // provided by a mounted Kubernetes Configmap. The Configmap mounted at @@ -39,9 +42,9 @@ const ( kubeletMountedConfigLn = "..data" ) -// nameserver is a simple nameserver that responds to DNS queries for A records +// nameserver is a simple nameserver that responds to DNS queries for A and AAAA records // for ts.net domain names over UDP or TCP. It serves DNS responses from -// in-memory IPv4 host records. It is intended to be deployed on Kubernetes with +// in-memory IPv4 and IPv6 host records. It is intended to be deployed on Kubernetes with // a ConfigMap mounted at /config that should contain the host records. It // dynamically reconfigures its in-memory mappings as the contents of the // mounted ConfigMap changes. @@ -56,10 +59,13 @@ type nameserver struct { // in-memory records. configWatcher <-chan string - mu sync.Mutex // protects following + mu sync.RWMutex // protects following // ip4 are the in-memory hostname -> IP4 mappings that the nameserver // uses to respond to A record queries. ip4 map[dnsname.FQDN][]net.IP + // ip6 are the in-memory hostname -> IP6 mappings that the nameserver + // uses to respond to AAAA record queries. + ip6 map[dnsname.FQDN][]net.IP } func main() { @@ -98,16 +104,13 @@ func main() { tcpSig <- s // stop the TCP listener } -// handleFunc is a DNS query handler that can respond to A record queries from +// handleFunc is a DNS query handler that can respond to A and AAAA record queries from // the nameserver's in-memory records. -// - If an A record query is received and the -// nameserver's in-memory records contain records for the queried domain name, -// return a success response. -// - If an A record query is received, but the -// nameserver's in-memory records do not contain records for the queried domain name, -// return NXDOMAIN. -// - If an A record query is received, but the queried domain name is not valid, return Format Error. -// - If a query is received for any other record type than A, return Not Implemented. +// - For A queries: returns IPv4 addresses if available, NXDOMAIN if the name doesn't exist +// - For AAAA queries: returns IPv6 addresses if available, NOERROR with no data if only +// IPv4 exists (per RFC 4074), or NXDOMAIN if the name doesn't exist at all +// - For invalid domain names: returns Format Error +// - For other record types: returns Not Implemented func (n *nameserver) handleFunc() func(w dns.ResponseWriter, r *dns.Msg) { h := func(w dns.ResponseWriter, r *dns.Msg) { m := new(dns.Msg) @@ -135,35 +138,19 @@ func (n *nameserver) handleFunc() func(w dns.ResponseWriter, r *dns.Msg) { m.RecursionAvailable = false ips := n.lookupIP4(fqdn) - if ips == nil || len(ips) == 0 { + if len(ips) == 0 { // As we are the authoritative nameserver for MagicDNS // names, if we do not have a record for this MagicDNS // name, it does not exist. m = m.SetRcode(r, dns.RcodeNameError) return } - // TODO (irbekrm): TTL is currently set to 0, meaning - // that cluster workloads will not cache the DNS - // records. Revisit this in future when we understand - // the usage patterns better- is it putting too much - // load on kube DNS server or is this fine? for _, ip := range ips { - rr := &dns.A{Hdr: dns.RR_Header{Name: q, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 0}, A: ip} + rr := &dns.A{Hdr: dns.RR_Header{Name: q, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: defaultTTL}, A: ip} m.SetRcode(r, dns.RcodeSuccess) m.Answer = append(m.Answer, rr) } case dns.TypeAAAA: - // TODO (irbekrm): add IPv6 support. - // The nameserver currently does not support IPv6 - // (records are not being created for IPv6 Pod addresses). - // However, we can expect that some callers will - // nevertheless send AAAA queries. - // We have to return NOERROR if a query is received for - // an AAAA record for a DNS name that we have an A - // record for- else the caller might not follow with an - // A record query. - // https://github.com/tailscale/tailscale/issues/12321 - // https://datatracker.ietf.org/doc/html/rfc4074 q := r.Question[0].Name fqdn, err := dnsname.ToFQDN(q) if err != nil { @@ -174,14 +161,27 @@ func (n *nameserver) handleFunc() func(w dns.ResponseWriter, r *dns.Msg) { // single source of truth for MagicDNS names by // non-tailnet Kubernetes workloads. m.Authoritative = true - ips := n.lookupIP4(fqdn) - if len(ips) == 0 { + m.RecursionAvailable = false + + ips := n.lookupIP6(fqdn) + // Also check if we have IPv4 records to determine correct response code. + // If the name exists (has A records) but no AAAA records, we return NOERROR + // per RFC 4074. If the name doesn't exist at all, we return NXDOMAIN. + ip4s := n.lookupIP4(fqdn) + + if len(ips) == 0 && len(ip4s) == 0 { // As we are the authoritative nameserver for MagicDNS - // names, if we do not have a record for this MagicDNS + // names, if we do not have any record for this MagicDNS // name, it does not exist. m = m.SetRcode(r, dns.RcodeNameError) return } + + // Return IPv6 addresses if available + for _, ip := range ips { + rr := &dns.AAAA{Hdr: dns.RR_Header{Name: q, Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: defaultTTL}, AAAA: ip} + m.Answer = append(m.Answer, rr) + } m.SetRcode(r, dns.RcodeSuccess) default: log.Printf("[unexpected] nameserver received a query for an unsupported record type: %s", r.Question[0].String()) @@ -231,10 +231,11 @@ func (n *nameserver) resetRecords() error { log.Printf("error reading nameserver's configuration: %v", err) return err } - if dnsCfgBytes == nil || len(dnsCfgBytes) < 1 { + if len(dnsCfgBytes) == 0 { log.Print("nameserver's configuration is empty, any in-memory records will be unset") n.mu.Lock() n.ip4 = make(map[dnsname.FQDN][]net.IP) + n.ip6 = make(map[dnsname.FQDN][]net.IP) n.mu.Unlock() return nil } @@ -249,30 +250,63 @@ func (n *nameserver) resetRecords() error { } ip4 := make(map[dnsname.FQDN][]net.IP) + ip6 := make(map[dnsname.FQDN][]net.IP) defer func() { n.mu.Lock() defer n.mu.Unlock() n.ip4 = ip4 + n.ip6 = ip6 }() - if len(dnsCfg.IP4) == 0 { + if len(dnsCfg.IP4) == 0 && len(dnsCfg.IP6) == 0 { log.Print("nameserver's configuration contains no records, any in-memory records will be unset") return nil } + // Process IPv4 records for fqdn, ips := range dnsCfg.IP4 { fqdn, err := dnsname.ToFQDN(fqdn) if err != nil { log.Printf("invalid nameserver's configuration: %s is not a valid FQDN: %v; skipping this record", fqdn, err) continue // one invalid hostname should not break the whole nameserver } + var validIPs []net.IP for _, ipS := range ips { ip := net.ParseIP(ipS).To4() if ip == nil { // To4 returns nil if IP is not a IPv4 address log.Printf("invalid nameserver's configuration: %v does not appear to be an IPv4 address; skipping this record", ipS) continue // one invalid IP address should not break the whole nameserver } - ip4[fqdn] = []net.IP{ip} + validIPs = append(validIPs, ip) + } + if len(validIPs) > 0 { + ip4[fqdn] = validIPs + } + } + + // Process IPv6 records + for fqdn, ips := range dnsCfg.IP6 { + fqdn, err := dnsname.ToFQDN(fqdn) + if err != nil { + log.Printf("invalid nameserver's configuration: %s is not a valid FQDN: %v; skipping this record", fqdn, err) + continue // one invalid hostname should not break the whole nameserver + } + var validIPs []net.IP + for _, ipS := range ips { + ip := net.ParseIP(ipS) + if ip == nil { + log.Printf("invalid nameserver's configuration: %v does not appear to be a valid IP address; skipping this record", ipS) + continue + } + // Check if it's a valid IPv6 address + if ip.To4() != nil { + log.Printf("invalid nameserver's configuration: %v appears to be IPv4 but was in IPv6 records; skipping this record", ipS) + continue + } + validIPs = append(validIPs, ip.To16()) + } + if len(validIPs) > 0 { + ip6[fqdn] = validIPs } } return nil @@ -372,8 +406,20 @@ func (n *nameserver) lookupIP4(fqdn dnsname.FQDN) []net.IP { if n.ip4 == nil { return nil } - n.mu.Lock() - defer n.mu.Unlock() + n.mu.RLock() + defer n.mu.RUnlock() f := n.ip4[fqdn] return f } + +// lookupIP6 returns any IPv6 addresses for the given FQDN from nameserver's +// in-memory records. +func (n *nameserver) lookupIP6(fqdn dnsname.FQDN) []net.IP { + if n.ip6 == nil { + return nil + } + n.mu.RLock() + defer n.mu.RUnlock() + f := n.ip6[fqdn] + return f +} diff --git a/cmd/k8s-nameserver/main_test.go b/cmd/k8s-nameserver/main_test.go index d9a33c4faffe5..bca010048664a 100644 --- a/cmd/k8s-nameserver/main_test.go +++ b/cmd/k8s-nameserver/main_test.go @@ -19,6 +19,7 @@ func TestNameserver(t *testing.T) { tests := []struct { name string ip4 map[dnsname.FQDN][]net.IP + ip6 map[dnsname.FQDN][]net.IP query *dns.Msg wantResp *dns.Msg }{ @@ -112,6 +113,49 @@ func TestNameserver(t *testing.T) { Authoritative: true, }}, }, + { + name: "AAAA record query with IPv6 record", + ip6: map[dnsname.FQDN][]net.IP{dnsname.FQDN("foo.bar.com."): {net.ParseIP("2001:db8::1")}}, + query: &dns.Msg{ + Question: []dns.Question{{Name: "foo.bar.com", Qtype: dns.TypeAAAA}}, + MsgHdr: dns.MsgHdr{Id: 1, RecursionDesired: true}, + }, + wantResp: &dns.Msg{ + Answer: []dns.RR{&dns.AAAA{Hdr: dns.RR_Header{ + Name: "foo.bar.com", Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: 0}, + AAAA: net.ParseIP("2001:db8::1")}}, + Question: []dns.Question{{Name: "foo.bar.com", Qtype: dns.TypeAAAA}}, + MsgHdr: dns.MsgHdr{ + Id: 1, + Rcode: dns.RcodeSuccess, + RecursionAvailable: false, + RecursionDesired: true, + Response: true, + Opcode: dns.OpcodeQuery, + Authoritative: true, + }}, + }, + { + name: "Dual-stack: both A and AAAA records exist", + ip4: map[dnsname.FQDN][]net.IP{dnsname.FQDN("dual.bar.com."): {{10, 0, 0, 1}}}, + ip6: map[dnsname.FQDN][]net.IP{dnsname.FQDN("dual.bar.com."): {net.ParseIP("2001:db8::1")}}, + query: &dns.Msg{ + Question: []dns.Question{{Name: "dual.bar.com", Qtype: dns.TypeAAAA}}, + MsgHdr: dns.MsgHdr{Id: 1}, + }, + wantResp: &dns.Msg{ + Answer: []dns.RR{&dns.AAAA{Hdr: dns.RR_Header{ + Name: "dual.bar.com", Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: 0}, + AAAA: net.ParseIP("2001:db8::1")}}, + Question: []dns.Question{{Name: "dual.bar.com", Qtype: dns.TypeAAAA}}, + MsgHdr: dns.MsgHdr{ + Id: 1, + Rcode: dns.RcodeSuccess, + Response: true, + Opcode: dns.OpcodeQuery, + Authoritative: true, + }}, + }, { name: "CNAME record query", ip4: map[dnsname.FQDN][]net.IP{dnsname.FQDN("foo.bar.com."): {{1, 2, 3, 4}}}, @@ -133,6 +177,7 @@ func TestNameserver(t *testing.T) { t.Run(tt.name, func(t *testing.T) { ns := &nameserver{ ip4: tt.ip4, + ip6: tt.ip6, } handler := ns.handleFunc() fakeRespW := &fakeResponseWriter{} @@ -149,43 +194,63 @@ func TestResetRecords(t *testing.T) { name string config []byte hasIp4 map[dnsname.FQDN][]net.IP + hasIp6 map[dnsname.FQDN][]net.IP wantsIp4 map[dnsname.FQDN][]net.IP + wantsIp6 map[dnsname.FQDN][]net.IP wantsErr bool }{ { name: "previously empty nameserver.ip4 gets set", config: []byte(`{"version": "v1alpha1", "ip4": {"foo.bar.com": ["1.2.3.4"]}}`), wantsIp4: map[dnsname.FQDN][]net.IP{"foo.bar.com.": {{1, 2, 3, 4}}}, + wantsIp6: make(map[dnsname.FQDN][]net.IP), }, { name: "nameserver.ip4 gets reset", hasIp4: map[dnsname.FQDN][]net.IP{"baz.bar.com.": {{1, 1, 3, 3}}}, config: []byte(`{"version": "v1alpha1", "ip4": {"foo.bar.com": ["1.2.3.4"]}}`), wantsIp4: map[dnsname.FQDN][]net.IP{"foo.bar.com.": {{1, 2, 3, 4}}}, + wantsIp6: make(map[dnsname.FQDN][]net.IP), }, { name: "configuration with incompatible version", hasIp4: map[dnsname.FQDN][]net.IP{"baz.bar.com.": {{1, 1, 3, 3}}}, config: []byte(`{"version": "v1beta1", "ip4": {"foo.bar.com": ["1.2.3.4"]}}`), wantsIp4: map[dnsname.FQDN][]net.IP{"baz.bar.com.": {{1, 1, 3, 3}}}, + wantsIp6: nil, wantsErr: true, }, { name: "nameserver.ip4 gets reset to empty config when no configuration is provided", hasIp4: map[dnsname.FQDN][]net.IP{"baz.bar.com.": {{1, 1, 3, 3}}}, wantsIp4: make(map[dnsname.FQDN][]net.IP), + wantsIp6: make(map[dnsname.FQDN][]net.IP), }, { name: "nameserver.ip4 gets reset to empty config when the provided configuration is empty", hasIp4: map[dnsname.FQDN][]net.IP{"baz.bar.com.": {{1, 1, 3, 3}}}, config: []byte(`{"version": "v1alpha1", "ip4": {}}`), wantsIp4: make(map[dnsname.FQDN][]net.IP), + wantsIp6: make(map[dnsname.FQDN][]net.IP), + }, + { + name: "nameserver.ip6 gets set", + config: []byte(`{"version": "v1alpha1", "ip6": {"foo.bar.com": ["2001:db8::1"]}}`), + wantsIp4: make(map[dnsname.FQDN][]net.IP), + wantsIp6: map[dnsname.FQDN][]net.IP{"foo.bar.com.": {net.ParseIP("2001:db8::1")}}, + }, + { + name: "dual-stack configuration", + config: []byte(`{"version": "v1alpha1", "ip4": {"dual.bar.com": ["10.0.0.1"]}, "ip6": {"dual.bar.com": ["2001:db8::1"]}}`), + wantsIp4: map[dnsname.FQDN][]net.IP{"dual.bar.com.": {{10, 0, 0, 1}}}, + wantsIp6: map[dnsname.FQDN][]net.IP{"dual.bar.com.": {net.ParseIP("2001:db8::1")}}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ns := &nameserver{ ip4: tt.hasIp4, + ip6: tt.hasIp6, configReader: func() ([]byte, error) { return tt.config, nil }, } if err := ns.resetRecords(); err == nil == tt.wantsErr { @@ -194,6 +259,9 @@ func TestResetRecords(t *testing.T) { if diff := cmp.Diff(ns.ip4, tt.wantsIp4); diff != "" { t.Fatalf("unexpected nameserver.ip4 contents (-got +want): \n%s", diff) } + if diff := cmp.Diff(ns.ip6, tt.wantsIp6); diff != "" { + t.Fatalf("unexpected nameserver.ip6 contents (-got +want): \n%s", diff) + } }) } } diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index d94b5b6cf52f7..6cffda2ddb2c8 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -5,95 +5,19 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy - L github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ - L github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ - L github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 - L github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ - L github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/config from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds - L github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds - L github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ - L github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints+ - L github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ - L github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds - L github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 - L github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws - L github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/ssm from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm - L github.com/aws/aws-sdk-go-v2/service/ssm/types from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ - L github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ - L github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ - L github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ - L github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ - L github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ - L github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http - L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus + github.com/blang/semver/v4 from k8s.io/component-base/metrics 💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus github.com/coder/websocket from tailscale.com/util/eventbus github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket - L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw 💣 github.com/davecgh/go-spew/spew from k8s.io/apimachinery/pkg/util/dump - W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ + W 💣 github.com/dblohm7/wingoes from tailscale.com/net/tshttpproxy+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc W github.com/dblohm7/wingoes/internal from github.com/dblohm7/wingoes/com W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/osdiag+ - LW 💣 github.com/digitalocean/go-smbios/smbios from tailscale.com/posture github.com/distribution/reference from tailscale.com/cmd/k8s-operator github.com/emicklei/go-restful/v3 from k8s.io/kube-openapi/pkg/common github.com/emicklei/go-restful/v3/log from github.com/emicklei/go-restful/v3 @@ -113,8 +37,6 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/go-logr/logr from github.com/go-logr/logr/slogr+ github.com/go-logr/logr/slogr from github.com/go-logr/zapr github.com/go-logr/zapr from sigs.k8s.io/controller-runtime/pkg/log/zap+ - W 💣 github.com/go-ole/go-ole from github.com/go-ole/go-ole/oleutil+ - W 💣 github.com/go-ole/go-ole/oleutil from tailscale.com/wgengine/winnet github.com/go-openapi/jsonpointer from github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference from k8s.io/kube-openapi/pkg/internal+ github.com/go-openapi/jsonreference/internal from github.com/go-openapi/jsonreference @@ -137,17 +59,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 github.com/google/go-cmp/cmp/internal/value from github.com/google/go-cmp/cmp github.com/google/gofuzz from k8s.io/apimachinery/pkg/apis/meta/v1+ github.com/google/gofuzz/bytesource from github.com/google/gofuzz - L github.com/google/nftables from tailscale.com/util/linuxfw - L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt - L 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ - L github.com/google/nftables/expr from github.com/google/nftables+ - L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ - L github.com/google/nftables/xt from github.com/google/nftables/expr+ github.com/google/uuid from github.com/prometheus-community/pro-bing+ - github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns - L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 - L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm + github.com/hdevalence/ed25519consensus from tailscale.com/tka + W 💣 github.com/inconshreveable/mousetrap from github.com/spf13/cobra github.com/josharian/intern from github.com/mailru/easyjson/jlexer L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink @@ -162,13 +76,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/mailru/easyjson/buffer from github.com/mailru/easyjson/jwriter 💣 github.com/mailru/easyjson/jlexer from github.com/go-openapi/swag github.com/mailru/easyjson/jwriter from github.com/go-openapi/swag - L github.com/mdlayher/genetlink from tailscale.com/net/tstun - L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ + L 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - L github.com/mdlayher/netlink/nltest from github.com/google/nftables - L github.com/mdlayher/sdnotify from tailscale.com/util/systemd L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ - github.com/miekg/dns from tailscale.com/net/dns/recursive 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket github.com/modern-go/concurrent from github.com/json-iterator/go 💣 github.com/modern-go/reflect2 from github.com/json-iterator/go @@ -179,17 +89,18 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil from github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header from github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil 💣 github.com/prometheus/client_golang/prometheus from github.com/prometheus/client_golang/prometheus/collectors+ - github.com/prometheus/client_golang/prometheus/collectors from sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics + github.com/prometheus/client_golang/prometheus/collectors from sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics+ github.com/prometheus/client_golang/prometheus/internal from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/client_golang/prometheus/promhttp from sigs.k8s.io/controller-runtime/pkg/metrics/server+ github.com/prometheus/client_model/go from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/expfmt from github.com/prometheus/client_golang/prometheus+ github.com/prometheus/common/model from github.com/prometheus/client_golang/prometheus+ - LD github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus + LD github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus+ LD github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs LD github.com/prometheus/procfs/internal/util from github.com/prometheus/procfs - L 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ - github.com/spf13/pflag from k8s.io/client-go/tools/clientcmd + L 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf + github.com/spf13/cobra from k8s.io/component-base/cli/flag + github.com/spf13/pflag from k8s.io/client-go/tools/clientcmd+ W 💣 github.com/tailscale/certstore from tailscale.com/control/controlclient W 💣 github.com/tailscale/go-winio from tailscale.com/safesocket W 💣 github.com/tailscale/go-winio/internal/fs from github.com/tailscale/go-winio @@ -203,9 +114,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp github.com/tailscale/hujson from tailscale.com/ipn/conffile+ - L 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ - L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink - github.com/tailscale/peercred from tailscale.com/ipn/ipnauth + LD github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ W 💣 github.com/tailscale/wireguard-go/conn/winrio from github.com/tailscale/wireguard-go/conn @@ -217,8 +126,13 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ - L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 + go.opentelemetry.io/otel/attribute from go.opentelemetry.io/otel/trace + go.opentelemetry.io/otel/codes from go.opentelemetry.io/otel/trace + 💣 go.opentelemetry.io/otel/internal from go.opentelemetry.io/otel/attribute + go.opentelemetry.io/otel/internal/attribute from go.opentelemetry.io/otel/attribute + go.opentelemetry.io/otel/trace from k8s.io/component-base/metrics + go.opentelemetry.io/otel/trace/embedded from go.opentelemetry.io/otel/trace go.uber.org/multierr from go.uber.org/zap+ go.uber.org/zap from github.com/go-logr/zapr+ go.uber.org/zap/buffer from go.uber.org/zap/internal/bufferpool+ @@ -378,8 +292,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/apimachinery/pkg/api/meta/testrestmapper from k8s.io/client-go/testing k8s.io/apimachinery/pkg/api/resource from k8s.io/api/autoscaling/v1+ k8s.io/apimachinery/pkg/api/validation from k8s.io/apimachinery/pkg/util/managedfields/internal+ + k8s.io/apimachinery/pkg/api/validation/path from k8s.io/apiserver/pkg/endpoints/request 💣 k8s.io/apimachinery/pkg/apis/meta/internalversion from k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme+ - k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme from k8s.io/client-go/metadata + k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme from k8s.io/client-go/metadata+ k8s.io/apimachinery/pkg/apis/meta/internalversion/validation from k8s.io/client-go/util/watchlist 💣 k8s.io/apimachinery/pkg/apis/meta/v1 from k8s.io/api/admission/v1+ k8s.io/apimachinery/pkg/apis/meta/v1/unstructured from k8s.io/apimachinery/pkg/runtime/serializer/versioning+ @@ -422,13 +337,18 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/apimachinery/pkg/util/uuid from sigs.k8s.io/controller-runtime/pkg/internal/controller+ k8s.io/apimachinery/pkg/util/validation from k8s.io/apimachinery/pkg/api/validation+ k8s.io/apimachinery/pkg/util/validation/field from k8s.io/apimachinery/pkg/api/errors+ + k8s.io/apimachinery/pkg/util/version from k8s.io/apiserver/pkg/features+ k8s.io/apimachinery/pkg/util/wait from k8s.io/client-go/tools/cache+ k8s.io/apimachinery/pkg/util/yaml from k8s.io/apimachinery/pkg/runtime/serializer/json k8s.io/apimachinery/pkg/version from k8s.io/client-go/discovery+ k8s.io/apimachinery/pkg/watch from k8s.io/apimachinery/pkg/apis/meta/v1+ k8s.io/apimachinery/third_party/forked/golang/json from k8s.io/apimachinery/pkg/util/strategicpatch k8s.io/apimachinery/third_party/forked/golang/reflect from k8s.io/apimachinery/pkg/conversion + k8s.io/apiserver/pkg/authentication/user from k8s.io/apiserver/pkg/endpoints/request + k8s.io/apiserver/pkg/endpoints/request from tailscale.com/k8s-operator/api-proxy + k8s.io/apiserver/pkg/features from k8s.io/apiserver/pkg/endpoints/request k8s.io/apiserver/pkg/storage/names from tailscale.com/cmd/k8s-operator + k8s.io/apiserver/pkg/util/feature from k8s.io/apiserver/pkg/endpoints/request+ k8s.io/client-go/applyconfigurations/admissionregistration/v1 from k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1+ k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 from k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 from k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 @@ -698,6 +618,13 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ k8s.io/client-go/util/keyutil from k8s.io/client-go/util/cert k8s.io/client-go/util/watchlist from k8s.io/client-go/dynamic+ k8s.io/client-go/util/workqueue from k8s.io/client-go/transport+ + k8s.io/component-base/cli/flag from k8s.io/component-base/featuregate + k8s.io/component-base/featuregate from k8s.io/apiserver/pkg/features+ + k8s.io/component-base/metrics from k8s.io/component-base/metrics/legacyregistry+ + k8s.io/component-base/metrics/legacyregistry from k8s.io/component-base/metrics/prometheus/feature + k8s.io/component-base/metrics/prometheus/feature from k8s.io/component-base/featuregate + k8s.io/component-base/metrics/prometheusextension from k8s.io/component-base/metrics + k8s.io/component-base/version from k8s.io/component-base/featuregate+ k8s.io/klog/v2 from k8s.io/apimachinery/pkg/api/meta+ k8s.io/klog/v2/internal/buffer from k8s.io/klog/v2 k8s.io/klog/v2/internal/clock from k8s.io/klog/v2 @@ -779,31 +706,34 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/client/tailscale from tailscale.com/cmd/k8s-operator+ tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal - tailscale.com/clientupdate from tailscale.com/client/web+ - LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnlocal+ - tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient tailscale.com/derp from tailscale.com/derp/derphttp+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ - tailscale.com/disco from tailscale.com/derp+ - tailscale.com/doctor from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal - 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal + tailscale.com/disco from tailscale.com/net/tstun+ tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ + tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/c2n from tailscale.com/tsnet + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock + tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet + tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet + tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey + tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy + tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ - tailscale.com/internal/client/tailscale from tailscale.com/cmd/k8s-operator - tailscale.com/internal/noiseconn from tailscale.com/control/controlclient + tailscale.com/internal/client/tailscale from tailscale.com/cmd/k8s-operator+ tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ @@ -811,10 +741,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet - tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ - L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store - tailscale.com/ipn/store/kubestore from tailscale.com/cmd/k8s-operator+ + tailscale.com/ipn/store/kubestore from tailscale.com/cmd/k8s-operator tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ tailscale.com/k8s-operator from tailscale.com/cmd/k8s-operator tailscale.com/k8s-operator/api-proxy from tailscale.com/cmd/k8s-operator @@ -835,21 +763,18 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ - tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/metrics from tailscale.com/tsweb+ tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ - tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dns/resolvconffile from tailscale.com/cmd/k8s-operator+ tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ tailscale.com/net/dnsfallback from tailscale.com/control/controlclient+ - tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/flowtrack from tailscale.com/wgengine+ tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ tailscale.com/net/memnet from tailscale.com/tsnet tailscale.com/net/netaddr from tailscale.com/ipn+ @@ -859,31 +784,28 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/net/netknob from tailscale.com/logpolicy+ 💣 tailscale.com/net/netmon from tailscale.com/control/controlclient+ 💣 tailscale.com/net/netns from tailscale.com/derp/derphttp+ - W 💣 tailscale.com/net/netstat from tailscale.com/portlist tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/netx from tailscale.com/control/controlclient+ - tailscale.com/net/packet from tailscale.com/net/connstats+ + tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ - tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ + tailscale.com/net/portmapper from tailscale.com/feature/portmapper + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/proxymux from tailscale.com/tsnet - tailscale.com/net/routetable from tailscale.com/doctor/routetable 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ - L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + 💣 tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock + tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ - 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal - tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ tailscale.com/sessionrecording from tailscale.com/k8s-operator/sessionrecording+ @@ -891,17 +813,17 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock - tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tempfork/httprec from tailscale.com/feature/c2n tailscale.com/tka from tailscale.com/client/local+ tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tsd from tailscale.com/ipn/ipnlocal+ tailscale.com/tsnet from tailscale.com/cmd/k8s-operator+ tailscale.com/tstime from tailscale.com/cmd/k8s-operator+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ - tailscale.com/tstime/rate from tailscale.com/derp+ + tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/util/usermetric+ - tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/bools from tailscale.com/tsnet tailscale.com/types/dnstype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/empty from tailscale.com/ipn+ @@ -911,7 +833,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext - tailscale.com/types/netlogtype from tailscale.com/net/connstats+ + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ + tailscale.com/types/netlogtype from tailscale.com/wgengine/netlog tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ tailscale.com/types/opt from tailscale.com/client/tailscale+ @@ -922,13 +845,15 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/appc+ + tailscale.com/util/backoff from tailscale.com/cmd/k8s-operator+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/cmd/k8s-operator+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ - tailscale.com/util/cmpver from tailscale.com/clientupdate+ + LW tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ - 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ - L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics+ + 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting + L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/tsd+ tailscale.com/util/execqueue from tailscale.com/appc+ @@ -937,10 +862,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns+ tailscale.com/util/mak from tailscale.com/appc+ - tailscale.com/util/multierr from tailscale.com/control/controlclient+ - tailscale.com/util/must from tailscale.com/clientupdate/distsign+ + tailscale.com/util/must from tailscale.com/logpolicy+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto 💣 tailscale.com/util/osdiag from tailscale.com/ipn/localapi W 💣 tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag @@ -962,13 +885,12 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ - 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ - W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate+ + 💣 tailscale.com/util/winutil from tailscale.com/hostinfo+ + W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/util/osdiag W 💣 tailscale.com/util/winutil/gp from tailscale.com/net/dns+ W tailscale.com/util/winutil/policy from tailscale.com/ipn/ipnlocal W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ @@ -987,15 +909,12 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal 💣 tailscale.com/wgengine/wgint from tailscale.com/wgengine+ tailscale.com/wgengine/wglog from tailscale.com/wgengine - W 💣 tailscale.com/wgengine/winnet from tailscale.com/wgengine/router golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/chacha20 from golang.org/x/crypto/ssh+ - golang.org/x/crypto/chacha20poly1305 from crypto/tls+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/ssh+ golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ @@ -1006,15 +925,14 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh - golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ + golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from sigs.k8s.io/controller-runtime/pkg/cache+ golang.org/x/exp/slices from tailscale.com/cmd/k8s-operator+ - golang.org/x/net/bpf from github.com/mdlayher/genetlink+ - golang.org/x/net/dns/dnsmessage from net+ + golang.org/x/net/bpf from github.com/mdlayher/netlink+ + golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ - golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal + golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy + golang.org/x/net/http2 from k8s.io/apimachinery/pkg/util/net+ golang.org/x/net/http2/hpack from golang.org/x/net/http2+ golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ @@ -1022,13 +940,13 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy - golang.org/x/net/ipv4 from github.com/miekg/dns+ - golang.org/x/net/ipv6 from github.com/miekg/dns+ + golang.org/x/net/ipv4 from github.com/prometheus-community/pro-bing+ + golang.org/x/net/ipv6 from github.com/prometheus-community/pro-bing+ golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/net/websocket from tailscale.com/k8s-operator/sessionrecording/ws golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials+ - golang.org/x/oauth2/clientcredentials from tailscale.com/cmd/k8s-operator + golang.org/x/oauth2/clientcredentials from tailscale.com/cmd/k8s-operator+ golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/certstore+ @@ -1043,13 +961,28 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ - archive/tar from tailscale.com/clientupdate + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna bufio from compress/flate+ - bytes from archive/tar+ + bytes from bufio+ cmp from github.com/gaissmai/bart+ compress/flate from compress/gzip+ - compress/gzip from github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding+ - compress/zlib from debug/pe+ + compress/gzip from github.com/emicklei/go-restful/v3+ + compress/zlib from github.com/emicklei/go-restful/v3+ container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp+ container/list from crypto/tls+ context from crypto/tls+ @@ -1113,7 +1046,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ crypto/sha3 from crypto/internal/fips140hash crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ - crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+ + crypto/tls from github.com/prometheus-community/pro-bing+ crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ D crypto/x509/internal/macos from crypto/x509 @@ -1132,11 +1065,11 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ - encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+ - errors from archive/tar+ + encoding/xml from github.com/emicklei/go-restful/v3+ + errors from bufio+ expvar from github.com/prometheus/client_golang/prometheus+ flag from github.com/spf13/pflag+ - fmt from archive/tar+ + fmt from compress/flate+ go/ast from go/doc+ go/build/constraint from go/parser go/doc from k8s.io/apimachinery/pkg/runtime @@ -1162,7 +1095,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ internal/filepathlite from os+ internal/fmtsort from fmt+ internal/goarch from crypto/internal/fips140deps/cpu+ - internal/godebug from archive/tar+ + internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ internal/goexperiment from hash/maphash+ internal/goos from crypto/x509+ @@ -1186,7 +1119,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ - W internal/saferio from debug/pe + internal/saferio from debug/pe+ internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ @@ -1199,9 +1132,9 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ internal/testlog from os internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ - io from archive/tar+ - io/fs from archive/tar+ - io/ioutil from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + io from bufio+ + io/fs from crypto/x509+ + io/ioutil from github.com/godbus/dbus/v5+ iter from go/ast+ log from expvar+ log/internal from log+ @@ -1209,51 +1142,51 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ log/slog/internal from log/slog log/slog/internal/buffer from log/slog maps from sigs.k8s.io/controller-runtime/pkg/predicate+ - math from archive/tar+ + math from compress/flate+ math/big from crypto/dsa+ math/bits from compress/flate+ math/rand from github.com/google/go-cmp/cmp+ - math/rand/v2 from tailscale.com/derp+ + math/rand/v2 from crypto/ecdsa+ mime from github.com/prometheus/common/expfmt+ mime/multipart from github.com/go-openapi/swag+ mime/quotedprintable from mime/multipart net from crypto/tls+ net/http from expvar+ net/http/httptrace from github.com/prometheus-community/pro-bing+ - net/http/httputil from github.com/aws/smithy-go/transport/http+ + net/http/httputil from tailscale.com/client/web+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ net/http/internal/httpcommon from net/http net/http/pprof from sigs.k8s.io/controller-runtime/pkg/manager+ net/netip from github.com/gaissmai/bart+ - net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + net/textproto from github.com/coder/websocket+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from github.com/aws/aws-sdk-go-v2/credentials/processcreds+ + os/exec from github.com/godbus/dbus/v5+ os/signal from sigs.k8s.io/controller-runtime/pkg/manager/signals - os/user from archive/tar+ - path from archive/tar+ - path/filepath from archive/tar+ - reflect from archive/tar+ - regexp from github.com/aws/aws-sdk-go-v2/internal/endpoints+ + os/user from github.com/godbus/dbus/v5+ + path from debug/dwarf+ + path/filepath from crypto/x509+ + reflect from crypto/x509+ + regexp from github.com/davecgh/go-spew/spew+ regexp/syntax from regexp - runtime from archive/tar+ - runtime/debug from github.com/aws/aws-sdk-go-v2/internal/sync/singleflight+ + runtime from crypto/internal/fips140+ + runtime/debug from github.com/coder/websocket/internal/xsync+ runtime/metrics from github.com/prometheus/client_golang/prometheus+ runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof slices from encoding/base32+ sort from compress/flate+ - strconv from archive/tar+ - strings from archive/tar+ + strconv from compress/flate+ + strings from bufio+ W structs from internal/syscall/windows - sync from archive/tar+ + sync from compress/flate+ sync/atomic from context+ - syscall from archive/tar+ + syscall from crypto/internal/sysrand+ text/tabwriter from k8s.io/apimachinery/pkg/util/diff+ - text/template from html/template + text/template from html/template+ text/template/parse from html/template+ - time from archive/tar+ + time from compress/gzip+ unicode from bytes+ unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ diff --git a/cmd/k8s-operator/deploy/chart/templates/.gitignore b/cmd/k8s-operator/deploy/chart/templates/.gitignore new file mode 100644 index 0000000000000..ae7c682d9fd15 --- /dev/null +++ b/cmd/k8s-operator/deploy/chart/templates/.gitignore @@ -0,0 +1,10 @@ +# Don't add helm chart CRDs to git. Canonical CRD files live in +# cmd/k8s-operator/deploy/crds. +# +# Generate for local usage with: +# go run tailscale.com/cmd/k8s-operator/generate helmcrd +/connector.yaml +/dnsconfig.yaml +/proxyclass.yaml +/proxygroup.yaml +/recorder.yaml diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml index bffad47f97191..a819aa6518684 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_dnsconfigs.yaml @@ -52,7 +52,6 @@ spec: using its MagicDNS name, you must also annotate the Ingress resource with tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation to ensure that the proxy created for the Ingress listens on its Pod IP address. - NB: Clusters where Pods get assigned IPv6 addresses only are currently not supported. type: object required: - spec @@ -101,6 +100,54 @@ spec: tag: description: Tag defaults to unstable. type: string + pod: + description: Pod configuration. + type: object + properties: + tolerations: + description: If specified, applies tolerations to the pods deployed by the DNSConfig resource. + type: array + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + type: object + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + replicas: + description: Replicas specifies how many Pods to create. Defaults to 1. + type: integer + format: int32 + minimum: 0 service: description: Service configuration. type: object diff --git a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml index cb9e0b991a4eb..516e75f489129 100644 --- a/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml +++ b/cmd/k8s-operator/deploy/crds/tailscale.com_proxyclasses.yaml @@ -1046,6 +1046,62 @@ spec: type: object additionalProperties: type: string + dnsConfig: + description: |- + DNSConfig defines DNS parameters for the proxy Pod in addition to those generated from DNSPolicy. + When DNSPolicy is set to "None", DNSConfig must be specified. + https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config + type: object + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + type: array + items: + type: string + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + type: array + items: + description: PodDNSConfigOption defines DNS resolver options of a pod. + type: object + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's value. + type: string + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + type: array + items: + type: string + x-kubernetes-list-type: atomic + dnsPolicy: + description: |- + DNSPolicy defines how DNS will be configured for the proxy Pod. + By default the Tailscale Kubernetes Operator does not set a DNS policy (uses cluster default). + https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + type: string + enum: + - ClusterFirstWithHostNet + - ClusterFirst + - Default + - None imagePullSecrets: description: |- Proxy Pod's image pull Secrets. diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index 766d7f0d647a9..c7c5ef0a7d3b2 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -390,7 +390,6 @@ spec: using its MagicDNS name, you must also annotate the Ingress resource with tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation to ensure that the proxy created for the Ingress listens on its Pod IP address. - NB: Clusters where Pods get assigned IPv6 addresses only are currently not supported. properties: apiVersion: description: |- @@ -432,6 +431,54 @@ spec: description: Tag defaults to unstable. type: string type: object + pod: + description: Pod configuration. + properties: + tolerations: + description: If specified, applies tolerations to the pods deployed by the DNSConfig resource. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + replicas: + description: Replicas specifies how many Pods to create. Defaults to 1. + format: int32 + minimum: 0 + type: integer service: description: Service configuration. properties: @@ -1570,6 +1617,62 @@ spec: Annotations must be valid Kubernetes annotations. https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set type: object + dnsConfig: + description: |- + DNSConfig defines DNS parameters for the proxy Pod in addition to those generated from DNSPolicy. + When DNSPolicy is set to "None", DNSConfig must be specified. + https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver options of a pod. + properties: + name: + description: |- + Name is this DNS resolver option's name. + Required. + type: string + value: + description: Value is this DNS resolver option's value. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + dnsPolicy: + description: |- + DNSPolicy defines how DNS will be configured for the proxy Pod. + By default the Tailscale Kubernetes Operator does not set a DNS policy (uses cluster default). + https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + enum: + - ClusterFirstWithHostNet + - ClusterFirst + - Default + - None + type: string imagePullSecrets: description: |- Proxy Pod's image pull Secrets. diff --git a/cmd/k8s-operator/dnsrecords.go b/cmd/k8s-operator/dnsrecords.go index 54c1584c6731e..1a9395aa00aa9 100644 --- a/cmd/k8s-operator/dnsrecords.go +++ b/cmd/k8s-operator/dnsrecords.go @@ -40,10 +40,10 @@ const ( // dnsRecordsReconciler knows how to update dnsrecords ConfigMap with DNS // records. // The records that it creates are: -// - For tailscale Ingress, a mapping of the Ingress's MagicDNSName to the IP address of -// the ingress proxy Pod. +// - For tailscale Ingress, a mapping of the Ingress's MagicDNSName to the IP addresses +// (both IPv4 and IPv6) of the ingress proxy Pod. // - For egress proxies configured via tailscale.com/tailnet-fqdn annotation, a -// mapping of the tailnet FQDN to the IP address of the egress proxy Pod. +// mapping of the tailnet FQDN to the IP addresses (both IPv4 and IPv6) of the egress proxy Pod. // // Records will only be created if there is exactly one ready // tailscale.com/v1alpha1.DNSConfig instance in the cluster (so that we know @@ -122,16 +122,16 @@ func (dnsRR *dnsRecordsReconciler) Reconcile(ctx context.Context, req reconcile. // For Ingress, the record is a mapping between the MagicDNSName of the Ingress, retrieved from // ingress.status.loadBalancer.ingress.hostname field and the proxy Pod IP addresses // retrieved from the EndpointSlice associated with this Service, i.e -// Records{IP4: : <[IPs of the ingress proxy Pods]>} +// Records{IP4: {: <[IPv4 addresses]>}, IP6: {: <[IPv6 addresses]>}} // // For egress, the record is a mapping between tailscale.com/tailnet-fqdn // annotation and the proxy Pod IP addresses, retrieved from the EndpointSlice // associated with this Service, i.e -// Records{IP4: {: <[IPs of the egress proxy Pods]>} +// Records{IP4: {: <[IPv4 addresses]>}, IP6: {: <[IPv6 addresses]>}} // // For ProxyGroup egress, the record is a mapping between tailscale.com/magic-dnsname -// annotation and the ClusterIP Service IP (which provides portmapping), i.e -// Records{IP4: {: <[ClusterIP Service IP]>} +// annotation and the ClusterIP Service IPs (which provides portmapping), i.e +// Records{IP4: {: <[IPv4 ClusterIPs]>}, IP6: {: <[IPv6 ClusterIPs]>}} // // If records need to be created for this proxy, maybeProvision will also: // - update the Service with a tailscale.com/magic-dnsname annotation @@ -178,17 +178,22 @@ func (dnsRR *dnsRecordsReconciler) maybeProvision(ctx context.Context, proxySvc } // Get the IP addresses for the DNS record - ips, err := dnsRR.getTargetIPs(ctx, proxySvc, logger) + ip4s, ip6s, err := dnsRR.getTargetIPs(ctx, proxySvc, logger) if err != nil { return fmt.Errorf("error getting target IPs: %w", err) } - if len(ips) == 0 { + if len(ip4s) == 0 && len(ip6s) == 0 { logger.Debugf("No target IP addresses available yet. We will reconcile again once they are available.") return nil } updateFunc := func(rec *operatorutils.Records) { - mak.Set(&rec.IP4, fqdn, ips) + if len(ip4s) > 0 { + mak.Set(&rec.IP4, fqdn, ip4s) + } + if len(ip6s) > 0 { + mak.Set(&rec.IP6, fqdn, ip6s) + } } if err = dnsRR.updateDNSConfig(ctx, updateFunc); err != nil { return fmt.Errorf("error updating DNS records: %w", err) @@ -212,42 +217,45 @@ func epIsReady(ep *discoveryv1.Endpoint) bool { // has been removed from the Service. If the record is not found in the // ConfigMap, the ConfigMap does not exist, or the Service does not have // tailscale.com/magic-dnsname annotation, just remove the finalizer. -func (h *dnsRecordsReconciler) maybeCleanup(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) error { +func (dnsRR *dnsRecordsReconciler) maybeCleanup(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) error { ix := slices.Index(proxySvc.Finalizers, dnsRecordsRecocilerFinalizer) if ix == -1 { logger.Debugf("no finalizer, nothing to do") return nil } cm := &corev1.ConfigMap{} - err := h.Client.Get(ctx, types.NamespacedName{Name: operatorutils.DNSRecordsCMName, Namespace: h.tsNamespace}, cm) + err := dnsRR.Client.Get(ctx, types.NamespacedName{Name: operatorutils.DNSRecordsCMName, Namespace: dnsRR.tsNamespace}, cm) if apierrors.IsNotFound(err) { logger.Debug("'dnsrecords' ConfigMap not found") - return h.removeProxySvcFinalizer(ctx, proxySvc) + return dnsRR.removeProxySvcFinalizer(ctx, proxySvc) } if err != nil { return fmt.Errorf("error retrieving 'dnsrecords' ConfigMap: %w", err) } if cm.Data == nil { logger.Debug("'dnsrecords' ConfigMap contains no records") - return h.removeProxySvcFinalizer(ctx, proxySvc) + return dnsRR.removeProxySvcFinalizer(ctx, proxySvc) } _, ok := cm.Data[operatorutils.DNSRecordsCMKey] if !ok { logger.Debug("'dnsrecords' ConfigMap contains no records") - return h.removeProxySvcFinalizer(ctx, proxySvc) + return dnsRR.removeProxySvcFinalizer(ctx, proxySvc) } - fqdn, _ := proxySvc.GetAnnotations()[annotationTSMagicDNSName] + fqdn := proxySvc.GetAnnotations()[annotationTSMagicDNSName] if fqdn == "" { - return h.removeProxySvcFinalizer(ctx, proxySvc) + return dnsRR.removeProxySvcFinalizer(ctx, proxySvc) } logger.Infof("removing DNS record for MagicDNS name %s", fqdn) updateFunc := func(rec *operatorutils.Records) { delete(rec.IP4, fqdn) + if rec.IP6 != nil { + delete(rec.IP6, fqdn) + } } - if err = h.updateDNSConfig(ctx, updateFunc); err != nil { + if err = dnsRR.updateDNSConfig(ctx, updateFunc); err != nil { return fmt.Errorf("error updating DNS config: %w", err) } - return h.removeProxySvcFinalizer(ctx, proxySvc) + return dnsRR.removeProxySvcFinalizer(ctx, proxySvc) } func (dnsRR *dnsRecordsReconciler) removeProxySvcFinalizer(ctx context.Context, proxySvc *corev1.Service) error { @@ -383,72 +391,106 @@ func (dnsRR *dnsRecordsReconciler) parentSvcTargetsFQDN(ctx context.Context, svc return parentSvc.Annotations[AnnotationTailnetTargetFQDN] != "" } -// getTargetIPs returns the IP addresses that should be used for DNS records +// getTargetIPs returns the IPv4 and IPv6 addresses that should be used for DNS records // for the given proxy Service. -func (dnsRR *dnsRecordsReconciler) getTargetIPs(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, error) { +func (dnsRR *dnsRecordsReconciler) getTargetIPs(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, []string, error) { if dnsRR.isProxyGroupEgressService(proxySvc) { return dnsRR.getClusterIPServiceIPs(proxySvc, logger) } return dnsRR.getPodIPs(ctx, proxySvc, logger) } -// getClusterIPServiceIPs returns the ClusterIP of a ProxyGroup egress Service. -func (dnsRR *dnsRecordsReconciler) getClusterIPServiceIPs(proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, error) { +// getClusterIPServiceIPs returns the ClusterIPs of a ProxyGroup egress Service. +// It separates IPv4 and IPv6 addresses for dual-stack services. +func (dnsRR *dnsRecordsReconciler) getClusterIPServiceIPs(proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, []string, error) { + // Handle services with no ClusterIP if proxySvc.Spec.ClusterIP == "" || proxySvc.Spec.ClusterIP == "None" { logger.Debugf("ProxyGroup egress ClusterIP Service does not have a ClusterIP yet.") - return nil, nil + return nil, nil, nil + } + + var ip4s, ip6s []string + + // Check all ClusterIPs for dual-stack support + clusterIPs := proxySvc.Spec.ClusterIPs + if len(clusterIPs) == 0 && proxySvc.Spec.ClusterIP != "" { + // Fallback to single ClusterIP for backward compatibility + clusterIPs = []string{proxySvc.Spec.ClusterIP} } - // Validate that ClusterIP is a valid IPv4 address - if !net.IsIPv4String(proxySvc.Spec.ClusterIP) { - logger.Debugf("ClusterIP %s is not a valid IPv4 address", proxySvc.Spec.ClusterIP) - return nil, fmt.Errorf("ClusterIP %s is not a valid IPv4 address", proxySvc.Spec.ClusterIP) + + for _, ip := range clusterIPs { + if net.IsIPv4String(ip) { + ip4s = append(ip4s, ip) + logger.Debugf("Using IPv4 ClusterIP %s for ProxyGroup egress DNS record", ip) + } else if net.IsIPv6String(ip) { + ip6s = append(ip6s, ip) + logger.Debugf("Using IPv6 ClusterIP %s for ProxyGroup egress DNS record", ip) + } else { + logger.Debugf("ClusterIP %s is not a valid IP address", ip) + } } - logger.Debugf("Using ClusterIP Service IP %s for ProxyGroup egress DNS record", proxySvc.Spec.ClusterIP) - return []string{proxySvc.Spec.ClusterIP}, nil + + if len(ip4s) == 0 && len(ip6s) == 0 { + return nil, nil, fmt.Errorf("no valid ClusterIPs found") + } + + return ip4s, ip6s, nil } -// getPodIPs returns Pod IP addresses from EndpointSlices for non-ProxyGroup Services. -func (dnsRR *dnsRecordsReconciler) getPodIPs(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, error) { +// getPodIPs returns Pod IPv4 and IPv6 addresses from EndpointSlices for non-ProxyGroup Services. +func (dnsRR *dnsRecordsReconciler) getPodIPs(ctx context.Context, proxySvc *corev1.Service, logger *zap.SugaredLogger) ([]string, []string, error) { // Get the Pod IP addresses for the proxy from the EndpointSlices for // the headless Service. The Service can have multiple EndpointSlices // associated with it, for example in dual-stack clusters. labels := map[string]string{discoveryv1.LabelServiceName: proxySvc.Name} // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership var eps = new(discoveryv1.EndpointSliceList) if err := dnsRR.List(ctx, eps, client.InNamespace(dnsRR.tsNamespace), client.MatchingLabels(labels)); err != nil { - return nil, fmt.Errorf("error listing EndpointSlices for the proxy's Service: %w", err) + return nil, nil, fmt.Errorf("error listing EndpointSlices for the proxy's Service: %w", err) } if len(eps.Items) == 0 { logger.Debugf("proxy's Service EndpointSlice does not yet exist.") - return nil, nil + return nil, nil, nil } // Each EndpointSlice for a Service can have a list of endpoints that each // can have multiple addresses - these are the IP addresses of any Pods - // selected by that Service. Pick all the IPv4 addresses. + // selected by that Service. Separate IPv4 and IPv6 addresses. // It is also possible that multiple EndpointSlices have overlapping addresses. // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#duplicate-endpoints - ips := make(set.Set[string], 0) + ip4s := make(set.Set[string], 0) + ip6s := make(set.Set[string], 0) for _, slice := range eps.Items { - if slice.AddressType != discoveryv1.AddressTypeIPv4 { - logger.Infof("EndpointSlice is for AddressType %s, currently only IPv4 address type is supported", slice.AddressType) - continue - } for _, ep := range slice.Endpoints { if !epIsReady(&ep) { logger.Debugf("Endpoint with addresses %v appears not ready to receive traffic %v", ep.Addresses, ep.Conditions.String()) continue } for _, ip := range ep.Addresses { - if !net.IsIPv4String(ip) { - logger.Infof("EndpointSlice contains IP address %q that is not IPv4, ignoring. Currently only IPv4 is supported", ip) - } else { - ips.Add(ip) + switch slice.AddressType { + case discoveryv1.AddressTypeIPv4: + if net.IsIPv4String(ip) { + ip4s.Add(ip) + } else { + logger.Debugf("EndpointSlice with AddressType IPv4 contains non-IPv4 address %q, ignoring", ip) + } + case discoveryv1.AddressTypeIPv6: + if net.IsIPv6String(ip) { + // Strip zone ID if present (e.g., fe80::1%eth0 -> fe80::1) + if idx := strings.IndexByte(ip, '%'); idx != -1 { + ip = ip[:idx] + } + ip6s.Add(ip) + } else { + logger.Debugf("EndpointSlice with AddressType IPv6 contains non-IPv6 address %q, ignoring", ip) + } + default: + logger.Debugf("EndpointSlice is for unsupported AddressType %s, skipping", slice.AddressType) } } } } - if ips.Len() == 0 { - logger.Debugf("EndpointSlice for the Service contains no IPv4 addresses.") - return nil, nil + if ip4s.Len() == 0 && ip6s.Len() == 0 { + logger.Debugf("EndpointSlice for the Service contains no IP addresses.") + return nil, nil, nil } - return ips.Slice(), nil + return ip4s.Slice(), ip6s.Slice(), nil } diff --git a/cmd/k8s-operator/dnsrecords_test.go b/cmd/k8s-operator/dnsrecords_test.go index 51dfb90497ff7..13898078fd4ba 100644 --- a/cmd/k8s-operator/dnsrecords_test.go +++ b/cmd/k8s-operator/dnsrecords_test.go @@ -99,8 +99,9 @@ func TestDNSRecordsReconciler(t *testing.T) { mustCreate(t, fc, epv6) expectReconciled(t, dnsRR, "tailscale", "egress-fqdn") // dns-records-reconciler reconcile the headless Service // ConfigMap should now have a record for foo.bar.ts.net -> 10.8.8.7 - wantHosts := map[string][]string{"foo.bar.ts.net": {"10.9.8.7"}} // IPv6 endpoint is currently ignored - expectHostsRecords(t, fc, wantHosts) + wantHosts := map[string][]string{"foo.bar.ts.net": {"10.9.8.7"}} + wantHostsIPv6 := map[string][]string{"foo.bar.ts.net": {"2600:1900:4011:161:0:d:0:d"}} + expectHostsRecordsWithIPv6(t, fc, wantHosts, wantHostsIPv6) // 2. DNS record is updated if tailscale.com/tailnet-fqdn annotation's // value changes @@ -271,17 +272,148 @@ func TestDNSRecordsReconcilerErrorCases(t *testing.T) { // Test invalid IP format testSvc.Spec.ClusterIP = "invalid-ip" - _, err = dnsRR.getClusterIPServiceIPs(testSvc, zl.Sugar()) + _, _, err = dnsRR.getClusterIPServiceIPs(testSvc, zl.Sugar()) if err == nil { t.Error("expected error for invalid IP format") } // Test valid IP testSvc.Spec.ClusterIP = "10.0.100.50" - _, err = dnsRR.getClusterIPServiceIPs(testSvc, zl.Sugar()) + ip4s, ip6s, err := dnsRR.getClusterIPServiceIPs(testSvc, zl.Sugar()) if err != nil { t.Errorf("unexpected error for valid IP: %v", err) } + if len(ip4s) != 1 || ip4s[0] != "10.0.100.50" { + t.Errorf("expected IPv4 address 10.0.100.50, got %v", ip4s) + } + if len(ip6s) != 0 { + t.Errorf("expected no IPv6 addresses, got %v", ip6s) + } +} + +func TestDNSRecordsReconcilerDualStack(t *testing.T) { + // Test dual-stack (IPv4 and IPv6) scenarios + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + + // Preconfigure cluster with DNSConfig + dnsCfg := &tsapi.DNSConfig{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + TypeMeta: metav1.TypeMeta{Kind: "DNSConfig"}, + Spec: tsapi.DNSConfigSpec{Nameserver: &tsapi.Nameserver{}}, + } + dnsCfg.Status.Conditions = append(dnsCfg.Status.Conditions, metav1.Condition{ + Type: string(tsapi.NameserverReady), + Status: metav1.ConditionTrue, + }) + + // Create dual-stack ingress + ing := &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dual-stack-ingress", + Namespace: "test", + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + }, + Status: networkingv1.IngressStatus{ + LoadBalancer: networkingv1.IngressLoadBalancerStatus{ + Ingress: []networkingv1.IngressLoadBalancerIngress{ + {Hostname: "dual-stack.example.ts.net"}, + }, + }, + }, + } + + headlessSvc := headlessSvcForParent(ing, "ingress") + headlessSvc.Name = "ts-dual-stack-ingress" + headlessSvc.SetLabels(map[string]string{ + kubetypes.LabelManaged: "true", + LabelParentName: "dual-stack-ingress", + LabelParentNamespace: "test", + LabelParentType: "ingress", + }) + + // Create both IPv4 and IPv6 endpoints + epv4 := endpointSliceForService(headlessSvc, "10.1.2.3", discoveryv1.AddressTypeIPv4) + epv6 := endpointSliceForService(headlessSvc, "2001:db8::1", discoveryv1.AddressTypeIPv6) + + dnsRRDualStack := &dnsRecordsReconciler{ + tsNamespace: "tailscale", + logger: zl.Sugar(), + } + + // Create the dnsrecords ConfigMap + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: operatorutils.DNSRecordsCMName, + Namespace: "tailscale", + }, + } + + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(dnsCfg, ing, headlessSvc, epv4, epv6, cm). + WithStatusSubresource(dnsCfg). + Build() + + dnsRRDualStack.Client = fc + + // Test dual-stack service records + expectReconciled(t, dnsRRDualStack, "tailscale", "ts-dual-stack-ingress") + + wantIPv4 := map[string][]string{"dual-stack.example.ts.net": {"10.1.2.3"}} + wantIPv6 := map[string][]string{"dual-stack.example.ts.net": {"2001:db8::1"}} + expectHostsRecordsWithIPv6(t, fc, wantIPv4, wantIPv6) + + // Test ProxyGroup with dual-stack ClusterIPs + // First create parent service + parentEgressSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pg-service", + Namespace: "tailscale", + Annotations: map[string]string{ + AnnotationTailnetTargetFQDN: "pg-service.example.ts.net", + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: "unused", + }, + } + + proxyGroupSvc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ts-proxygroup-dualstack", + Namespace: "tailscale", + Labels: map[string]string{ + kubetypes.LabelManaged: "true", + labelProxyGroup: "test-pg", + labelSvcType: typeEgress, + LabelParentName: "pg-service", + LabelParentNamespace: "tailscale", + LabelParentType: "svc", + }, + Annotations: map[string]string{ + annotationTSMagicDNSName: "pg-service.example.ts.net", + }, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + ClusterIP: "10.96.0.100", + ClusterIPs: []string{"10.96.0.100", "2001:db8::100"}, + }, + } + + mustCreate(t, fc, parentEgressSvc) + mustCreate(t, fc, proxyGroupSvc) + expectReconciled(t, dnsRRDualStack, "tailscale", "ts-proxygroup-dualstack") + + wantIPv4["pg-service.example.ts.net"] = []string{"10.96.0.100"} + wantIPv6["pg-service.example.ts.net"] = []string{"2001:db8::100"} + expectHostsRecordsWithIPv6(t, fc, wantIPv4, wantIPv6) } func headlessSvcForParent(o client.Object, typ string) *corev1.Service { @@ -344,3 +476,28 @@ func expectHostsRecords(t *testing.T, cl client.Client, wantsHosts map[string][] t.Fatalf("unexpected dns config (-got +want):\n%s", diff) } } + +func expectHostsRecordsWithIPv6(t *testing.T, cl client.Client, wantsHostsIPv4, wantsHostsIPv6 map[string][]string) { + t.Helper() + cm := new(corev1.ConfigMap) + if err := cl.Get(context.Background(), types.NamespacedName{Name: "dnsrecords", Namespace: "tailscale"}, cm); err != nil { + t.Fatalf("getting dnsconfig ConfigMap: %v", err) + } + if cm.Data == nil { + t.Fatal("dnsconfig ConfigMap has no data") + } + dnsConfigString, ok := cm.Data[operatorutils.DNSRecordsCMKey] + if !ok { + t.Fatal("dnsconfig ConfigMap does not contain dnsconfig") + } + dnsConfig := &operatorutils.Records{} + if err := json.Unmarshal([]byte(dnsConfigString), dnsConfig); err != nil { + t.Fatalf("unmarshaling dnsconfig: %v", err) + } + if diff := cmp.Diff(dnsConfig.IP4, wantsHostsIPv4); diff != "" { + t.Fatalf("unexpected IPv4 dns config (-got +want):\n%s", diff) + } + if diff := cmp.Diff(dnsConfig.IP6, wantsHostsIPv6); diff != "" { + t.Fatalf("unexpected IPv6 dns config (-got +want):\n%s", diff) + } +} diff --git a/cmd/k8s-operator/egress-pod-readiness.go b/cmd/k8s-operator/egress-pod-readiness.go index 05cf1aa1abfed..f3a812ecb9030 100644 --- a/cmd/k8s-operator/egress-pod-readiness.go +++ b/cmd/k8s-operator/egress-pod-readiness.go @@ -25,8 +25,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" - "tailscale.com/logtail/backoff" "tailscale.com/tstime" + "tailscale.com/util/backoff" "tailscale.com/util/httpm" ) diff --git a/cmd/k8s-operator/generate/main.go b/cmd/k8s-operator/generate/main.go index 25435a47cf14a..6904f1df02ec0 100644 --- a/cmd/k8s-operator/generate/main.go +++ b/cmd/k8s-operator/generate/main.go @@ -41,11 +41,16 @@ func main() { if len(os.Args) < 2 { log.Fatalf("usage ./generate [staticmanifests|helmcrd]") } - repoRoot := "../../" + gitOut, err := exec.Command("git", "rev-parse", "--show-toplevel").CombinedOutput() + if err != nil { + log.Fatalf("error determining git root: %v: %s", err, gitOut) + } + + repoRoot := strings.TrimSpace(string(gitOut)) switch os.Args[1] { case "helmcrd": // insert CRDs to Helm templates behind a installCRDs=true conditional check log.Print("Adding CRDs to Helm templates") - if err := generate("./"); err != nil { + if err := generate(repoRoot); err != nil { log.Fatalf("error adding CRDs to Helm templates: %v", err) } return diff --git a/cmd/k8s-operator/logger.go b/cmd/k8s-operator/logger.go new file mode 100644 index 0000000000000..46b1fc0c82d48 --- /dev/null +++ b/cmd/k8s-operator/logger.go @@ -0,0 +1,26 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "io" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + kzap "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +// wrapZapCore returns a zapcore.Core implementation that splits the core chain using zapcore.NewTee. This causes +// logs to be simultaneously written to both the original core and the provided io.Writer implementation. +func wrapZapCore(core zapcore.Core, writer io.Writer) zapcore.Core { + encoder := &kzap.KubeAwareEncoder{ + Encoder: zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()), + } + + // We use a tee logger here so that logs are written to stdout/stderr normally while at the same time being + // sent upstream. + return zapcore.NewTee(core, zapcore.NewCore(encoder, zapcore.AddSync(writer), zap.DebugLevel)) +} diff --git a/cmd/k8s-operator/nameserver.go b/cmd/k8s-operator/nameserver.go index 983a28c918276..5de1c47ba2b7e 100644 --- a/cmd/k8s-operator/nameserver.go +++ b/cmd/k8s-operator/nameserver.go @@ -30,6 +30,7 @@ import ( tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" "tailscale.com/tstime" + "tailscale.com/types/ptr" "tailscale.com/util/clientmetric" "tailscale.com/util/set" ) @@ -130,7 +131,7 @@ func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Requ return setStatus(&dnsCfg, metav1.ConditionFalse, reasonNameserverCreationFailed, msg) } } - if err := a.maybeProvision(ctx, &dnsCfg, logger); err != nil { + if err = a.maybeProvision(ctx, &dnsCfg); err != nil { if strings.Contains(err.Error(), optimisticLockErrorMsg) { logger.Infof("optimistic lock error, retrying: %s", err) return reconcile.Result{}, nil @@ -167,7 +168,7 @@ func nameserverResourceLabels(name, namespace string) map[string]string { return labels } -func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsapi.DNSConfig, logger *zap.SugaredLogger) error { +func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsapi.DNSConfig) error { labels := nameserverResourceLabels(tsDNSCfg.Name, a.tsNamespace) dCfg := &deployConfig{ ownerRefs: []metav1.OwnerReference{*metav1.NewControllerRef(tsDNSCfg, tsapi.SchemeGroupVersion.WithKind("DNSConfig"))}, @@ -175,6 +176,11 @@ func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsa labels: labels, imageRepo: defaultNameserverImageRepo, imageTag: defaultNameserverImageTag, + replicas: 1, + } + + if tsDNSCfg.Spec.Nameserver.Replicas != nil { + dCfg.replicas = *tsDNSCfg.Spec.Nameserver.Replicas } if tsDNSCfg.Spec.Nameserver.Image != nil && tsDNSCfg.Spec.Nameserver.Image.Repo != "" { dCfg.imageRepo = tsDNSCfg.Spec.Nameserver.Image.Repo @@ -185,6 +191,9 @@ func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsa if tsDNSCfg.Spec.Nameserver.Service != nil { dCfg.clusterIP = tsDNSCfg.Spec.Nameserver.Service.ClusterIP } + if tsDNSCfg.Spec.Nameserver.Pod != nil { + dCfg.tolerations = tsDNSCfg.Spec.Nameserver.Pod.Tolerations + } for _, deployable := range []deployable{saDeployable, deployDeployable, svcDeployable, cmDeployable} { if err := deployable.updateObj(ctx, dCfg, a.Client); err != nil { @@ -211,12 +220,14 @@ type deployable struct { } type deployConfig struct { - imageRepo string - imageTag string - labels map[string]string - ownerRefs []metav1.OwnerReference - namespace string - clusterIP string + replicas int32 + imageRepo string + imageTag string + labels map[string]string + ownerRefs []metav1.OwnerReference + namespace string + clusterIP string + tolerations []corev1.Toleration } var ( @@ -236,10 +247,12 @@ var ( if err := yaml.Unmarshal(deployYaml, &d); err != nil { return fmt.Errorf("error unmarshalling Deployment yaml: %w", err) } + d.Spec.Replicas = ptr.To(cfg.replicas) d.Spec.Template.Spec.Containers[0].Image = fmt.Sprintf("%s:%s", cfg.imageRepo, cfg.imageTag) d.ObjectMeta.Namespace = cfg.namespace d.ObjectMeta.Labels = cfg.labels d.ObjectMeta.OwnerReferences = cfg.ownerRefs + d.Spec.Template.Spec.Tolerations = cfg.tolerations updateF := func(oldD *appsv1.Deployment) { oldD.Spec = d.Spec } diff --git a/cmd/k8s-operator/nameserver_test.go b/cmd/k8s-operator/nameserver_test.go index 55a998ac31979..6da52d8a21490 100644 --- a/cmd/k8s-operator/nameserver_test.go +++ b/cmd/k8s-operator/nameserver_test.go @@ -22,6 +22,7 @@ import ( operatorutils "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/tstest" + "tailscale.com/types/ptr" "tailscale.com/util/mak" ) @@ -33,6 +34,7 @@ func TestNameserverReconciler(t *testing.T) { }, Spec: tsapi.DNSConfigSpec{ Nameserver: &tsapi.Nameserver{ + Replicas: ptr.To[int32](3), Image: &tsapi.NameserverImage{ Repo: "test", Tag: "v0.0.1", @@ -40,6 +42,16 @@ func TestNameserverReconciler(t *testing.T) { Service: &tsapi.NameserverService{ ClusterIP: "5.4.3.2", }, + Pod: &tsapi.NameserverPod{ + Tolerations: []corev1.Toleration{ + { + Key: "some-key", + Operator: corev1.TolerationOpEqual, + Value: "some-value", + Effect: corev1.TaintEffectNoSchedule, + }, + }, + }, }, }, } @@ -74,8 +86,18 @@ func TestNameserverReconciler(t *testing.T) { } wantsDeploy.OwnerReferences = []metav1.OwnerReference{*ownerReference} wantsDeploy.Spec.Template.Spec.Containers[0].Image = "test:v0.0.1" + wantsDeploy.Spec.Replicas = ptr.To[int32](3) wantsDeploy.Namespace = tsNamespace wantsDeploy.ObjectMeta.Labels = nameserverLabels + wantsDeploy.Spec.Template.Spec.Tolerations = []corev1.Toleration{ + { + Key: "some-key", + Operator: corev1.TolerationOpEqual, + Value: "some-value", + Effect: corev1.TaintEffectNoSchedule, + }, + } + expectEqual(t, fc, wantsDeploy) }) diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 76d2df51d47d2..89c8ff3e205bf 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -44,6 +44,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager/signals" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/envknob" "tailscale.com/client/local" "tailscale.com/client/tailscale" @@ -66,6 +67,9 @@ import ( // Generate static manifests for deploying Tailscale operator on Kubernetes from the operator's Helm chart. //go:generate go run tailscale.com/cmd/k8s-operator/generate staticmanifests +// Generate the helm chart's CRDs (which are ignored from git). +//go:generate go run tailscale.com/cmd/k8s-operator/generate helmcrd + // Generate CRD API docs. //go:generate go run github.com/elastic/crd-ref-docs --renderer=markdown --source-path=../../k8s-operator/apis/ --config=../../k8s-operator/api-docs-config.yaml --output-path=../../k8s-operator/api.md @@ -133,6 +137,14 @@ func main() { } }() } + + // Operator log uploads can be opted-out using the "TS_NO_LOGS_NO_SUPPORT" environment variable. + if !envknob.NoLogsNoSupport() { + zlog = zlog.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core { + return wrapZapCore(core, s.LogtailWriter()) + })) + } + rOpts := reconcilerOpts{ log: zlog, tsServer: s, diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 9a87d26438b8a..c52ffce85495b 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -63,7 +63,7 @@ const ( AnnotationHostname = "tailscale.com/hostname" annotationTailnetTargetIPOld = "tailscale.com/ts-tailnet-target-ip" AnnotationTailnetTargetIP = "tailscale.com/tailnet-ip" - //MagicDNS name of tailnet node. + // MagicDNS name of tailnet node. AnnotationTailnetTargetFQDN = "tailscale.com/tailnet-fqdn" AnnotationProxyGroup = "tailscale.com/proxy-group" @@ -439,12 +439,12 @@ func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, logger *z } if orig != nil && !apiequality.Semantic.DeepEqual(latest, orig) { - logger.Debugf("patching the existing proxy Secret with tailscaled config %s", sanitizeConfigBytes(latestConfig)) + logger.With("config", sanitizeConfig(latestConfig)).Debugf("patching the existing proxy Secret") if err = a.Patch(ctx, secret, client.MergeFrom(orig)); err != nil { return nil, err } } else { - logger.Debugf("creating a new Secret for the proxy with tailscaled config %s", sanitizeConfigBytes(latestConfig)) + logger.With("config", sanitizeConfig(latestConfig)).Debugf("creating a new Secret for the proxy") if err = a.Create(ctx, secret); err != nil { return nil, err } @@ -494,17 +494,16 @@ func (a *tailscaleSTSReconciler) provisionSecrets(ctx context.Context, logger *z return secretNames, nil } -// sanitizeConfigBytes returns ipn.ConfigVAlpha in string form with redacted -// auth key. -func sanitizeConfigBytes(c ipn.ConfigVAlpha) string { +// sanitizeConfig returns an ipn.ConfigVAlpha with sensitive fields redacted. Since we pump everything +// into JSON-encoded logs it's easier to read this with a .With method than converting it to a string. +func sanitizeConfig(c ipn.ConfigVAlpha) ipn.ConfigVAlpha { + // Explicitly redact AuthKey because we never want it appearing in logs. Never populate this with the + // actual auth key. if c.AuthKey != nil { c.AuthKey = ptr.To("**redacted**") } - sanitizedBytes, err := json.Marshal(c) - if err != nil { - return "invalid config" - } - return string(sanitizedBytes) + + return c } // DeviceInfo returns the device ID, hostname, IPs and capver for the Tailscale device that acts as an operator proxy. @@ -907,6 +906,12 @@ func applyProxyClassToStatefulSet(pc *tsapi.ProxyClass, ss *appsv1.StatefulSet, ss.Spec.Template.Spec.Tolerations = wantsPod.Tolerations ss.Spec.Template.Spec.PriorityClassName = wantsPod.PriorityClassName ss.Spec.Template.Spec.TopologySpreadConstraints = wantsPod.TopologySpreadConstraints + if wantsPod.DNSPolicy != nil { + ss.Spec.Template.Spec.DNSPolicy = *wantsPod.DNSPolicy + } + if wantsPod.DNSConfig != nil { + ss.Spec.Template.Spec.DNSConfig = wantsPod.DNSConfig + } // Update containers. updateContainer := func(overlay *tsapi.Container, base corev1.Container) corev1.Container { diff --git a/cmd/k8s-operator/sts_test.go b/cmd/k8s-operator/sts_test.go index e2cb2962fde48..ea28e77a14c36 100644 --- a/cmd/k8s-operator/sts_test.go +++ b/cmd/k8s-operator/sts_test.go @@ -87,6 +87,15 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { }, }, }, + DNSPolicy: ptr.To(corev1.DNSClusterFirstWithHostNet), + DNSConfig: &corev1.PodDNSConfig{ + Nameservers: []string{"1.1.1.1", "8.8.8.8"}, + Searches: []string{"example.com", "test.local"}, + Options: []corev1.PodDNSConfigOption{ + {Name: "ndots", Value: ptr.To("2")}, + {Name: "edns0"}, + }, + }, TailscaleContainer: &tsapi.Container{ SecurityContext: &corev1.SecurityContext{ Privileged: ptr.To(true), @@ -200,6 +209,8 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { wantSS.Spec.Template.Spec.InitContainers[0].Image = "ghcr.io/my-repo/tailscale:v0.01testsomething" wantSS.Spec.Template.Spec.InitContainers[0].ImagePullPolicy = "IfNotPresent" wantSS.Spec.Template.Spec.PriorityClassName = proxyClassAllOpts.Spec.StatefulSet.Pod.PriorityClassName + wantSS.Spec.Template.Spec.DNSPolicy = corev1.DNSClusterFirstWithHostNet + wantSS.Spec.Template.Spec.DNSConfig = proxyClassAllOpts.Spec.StatefulSet.Pod.DNSConfig gotSS := applyProxyClassToStatefulSet(proxyClassAllOpts, nonUserspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) if diff := cmp.Diff(gotSS, wantSS); diff != "" { @@ -239,6 +250,8 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { wantSS.Spec.Template.Spec.Containers[0].ImagePullPolicy = "IfNotPresent" wantSS.Spec.Template.Spec.Containers[0].Image = "ghcr.io/my-repo/tailscale:v0.01testsomething" wantSS.Spec.Template.Spec.PriorityClassName = proxyClassAllOpts.Spec.StatefulSet.Pod.PriorityClassName + wantSS.Spec.Template.Spec.DNSPolicy = corev1.DNSClusterFirstWithHostNet + wantSS.Spec.Template.Spec.DNSConfig = proxyClassAllOpts.Spec.StatefulSet.Pod.DNSConfig gotSS = applyProxyClassToStatefulSet(proxyClassAllOpts, userspaceProxySS.DeepCopy(), new(tailscaleSTSConfig), zl.Sugar()) if diff := cmp.Diff(gotSS, wantSS); diff != "" { t.Errorf("Unexpected result applying ProxyClass with all options to a StatefulSet for a userspace proxy (-got +want):\n%s", diff) diff --git a/cmd/k8s-proxy/k8s-proxy.go b/cmd/k8s-proxy/k8s-proxy.go index 7a77072140568..9b2bb67494659 100644 --- a/cmd/k8s-proxy/k8s-proxy.go +++ b/cmd/k8s-proxy/k8s-proxy.go @@ -34,6 +34,9 @@ import ( "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/store" + + // we need to import this package so that the `kube:` ipn store gets registered + _ "tailscale.com/ipn/store/kubestore" apiproxy "tailscale.com/k8s-operator/api-proxy" "tailscale.com/kube/certs" healthz "tailscale.com/kube/health" diff --git a/cmd/omitsize/omitsize.go b/cmd/omitsize/omitsize.go new file mode 100644 index 0000000000000..35e03d268e186 --- /dev/null +++ b/cmd/omitsize/omitsize.go @@ -0,0 +1,229 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// The omitsize tool prints out how large the Tailscale binaries are with +// different build tags. +package main + +import ( + "crypto/sha256" + "flag" + "fmt" + "log" + "maps" + "os" + "os/exec" + "path/filepath" + "slices" + "strconv" + "strings" + "sync" + + "tailscale.com/feature/featuretags" + "tailscale.com/util/set" +) + +var ( + cacheDir = flag.String("cachedir", "", "if non-empty, use this directory to store cached size results to speed up subsequent runs. The tool does not consider the git status when deciding whether to use the cache. It's on you to nuke it between runs if the tree changed.") + features = flag.String("features", "", "comma-separated list of features to list in the table, without the ts_omit_ prefix. It may also contain a '+' sign(s) for ANDing features together. If empty, all omittable features are considered one at a time.") + + showRemovals = flag.Bool("show-removals", false, "if true, show a table of sizes removing one feature at a time from the full set.") +) + +// allOmittable returns the list of all build tags that remove features. +var allOmittable = sync.OnceValue(func() []string { + var ret []string // all build tags that can be omitted + for k := range featuretags.Features { + if k.IsOmittable() { + ret = append(ret, k.OmitTag()) + } + } + slices.Sort(ret) + return ret +}) + +func main() { + flag.Parse() + + // rows is a set (usually of size 1) of feature(s) to add/remove, without deps + // included at this point (as dep direction depends on whether we're adding or removing, + // so it's expanded later) + var rows []set.Set[featuretags.FeatureTag] + + if *features == "" { + for _, k := range slices.Sorted(maps.Keys(featuretags.Features)) { + if k.IsOmittable() { + rows = append(rows, set.Of(k)) + } + } + } else { + for v := range strings.SplitSeq(*features, ",") { + s := set.Set[featuretags.FeatureTag]{} + for fts := range strings.SplitSeq(v, "+") { + ft := featuretags.FeatureTag(fts) + if _, ok := featuretags.Features[ft]; !ok { + log.Fatalf("unknown feature %q", v) + } + s.Add(ft) + } + rows = append(rows, s) + } + } + + minD := measure("tailscaled", allOmittable()...) + minC := measure("tailscale", allOmittable()...) + minBoth := measure("tailscaled", append(slices.Clone(allOmittable()), "ts_include_cli")...) + + if *showRemovals { + baseD := measure("tailscaled") + baseC := measure("tailscale") + baseBoth := measure("tailscaled", "ts_include_cli") + + fmt.Printf("Starting with everything and removing a feature...\n\n") + + fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") + fmt.Printf("%9d %9d %9d\n", baseD, baseC, baseBoth) + + fmt.Printf("-%8d -%8d -%8d .. remove *\n", baseD-minD, baseC-minC, baseBoth-minBoth) + + for _, s := range rows { + title, tags := computeRemove(s) + sizeD := measure("tailscaled", tags...) + sizeC := measure("tailscale", tags...) + sizeBoth := measure("tailscaled", append(slices.Clone(tags), "ts_include_cli")...) + saveD := max(baseD-sizeD, 0) + saveC := max(baseC-sizeC, 0) + saveBoth := max(baseBoth-sizeBoth, 0) + fmt.Printf("-%8d -%8d -%8d .. remove %s\n", saveD, saveC, saveBoth, title) + + } + } + + fmt.Printf("\nStarting at a minimal binary and adding one feature back...\n\n") + fmt.Printf("%9s %9s %9s\n", "tailscaled", "tailscale", "combined (linux/amd64)") + fmt.Printf("%9d %9d %9d omitting everything\n", minD, minC, minBoth) + for _, s := range rows { + title, tags := computeAdd(s) + sizeD := measure("tailscaled", tags...) + sizeC := measure("tailscale", tags...) + sizeBoth := measure("tailscaled", append(tags, "ts_include_cli")...) + + fmt.Printf("+%8d +%8d +%8d .. add %s\n", max(sizeD-minD, 0), max(sizeC-minC, 0), max(sizeBoth-minBoth, 0), title) + } + +} + +// computeAdd returns a human-readable title of a set of features and the build +// tags to use to add that set of features to a minimal binary, including their +// feature dependencies. +func computeAdd(s set.Set[featuretags.FeatureTag]) (title string, tags []string) { + allSet := set.Set[featuretags.FeatureTag]{} // s + all their outbound dependencies + var explicitSorted []string // string versions of s, sorted + for ft := range s { + allSet.AddSet(featuretags.Requires(ft)) + if ft.IsOmittable() { + explicitSorted = append(explicitSorted, string(ft)) + } + } + slices.Sort(explicitSorted) + + var removeTags []string + for ft := range allSet { + if ft.IsOmittable() { + removeTags = append(removeTags, ft.OmitTag()) + } + } + + var titleBuf strings.Builder + titleBuf.WriteString(strings.Join(explicitSorted, "+")) + var and []string + for ft := range allSet { + if !s.Contains(ft) { + and = append(and, string(ft)) + } + } + if len(and) > 0 { + slices.Sort(and) + fmt.Fprintf(&titleBuf, " (and %s)", strings.Join(and, "+")) + } + tags = allExcept(allOmittable(), removeTags) + return titleBuf.String(), tags +} + +// computeRemove returns a human-readable title of a set of features and the build +// tags to use to remove that set of features from a full binary, including removing +// any features that depend on features in the provided set. +func computeRemove(s set.Set[featuretags.FeatureTag]) (title string, tags []string) { + allSet := set.Set[featuretags.FeatureTag]{} // s + all their inbound dependencies + var explicitSorted []string // string versions of s, sorted + for ft := range s { + allSet.AddSet(featuretags.RequiredBy(ft)) + if ft.IsOmittable() { + explicitSorted = append(explicitSorted, string(ft)) + } + } + slices.Sort(explicitSorted) + + var removeTags []string + for ft := range allSet { + if ft.IsOmittable() { + removeTags = append(removeTags, ft.OmitTag()) + } + } + + var titleBuf strings.Builder + titleBuf.WriteString(strings.Join(explicitSorted, "+")) + + var and []string + for ft := range allSet { + if !s.Contains(ft) { + and = append(and, string(ft)) + } + } + if len(and) > 0 { + slices.Sort(and) + fmt.Fprintf(&titleBuf, " (and %s)", strings.Join(and, "+")) + } + + return titleBuf.String(), removeTags +} + +func allExcept(all, omit []string) []string { + return slices.DeleteFunc(slices.Clone(all), func(s string) bool { return slices.Contains(omit, s) }) +} + +func measure(bin string, tags ...string) int64 { + tags = slices.Clone(tags) + slices.Sort(tags) + tags = slices.Compact(tags) + comma := strings.Join(tags, ",") + + var cacheFile string + if *cacheDir != "" { + cacheFile = filepath.Join(*cacheDir, fmt.Sprintf("%02x", sha256.Sum256(fmt.Appendf(nil, "%s-%s.size", bin, comma)))) + if v, err := os.ReadFile(cacheFile); err == nil { + if size, err := strconv.ParseInt(strings.TrimSpace(string(v)), 10, 64); err == nil { + return size + } + } + } + + cmd := exec.Command("go", "build", "-trimpath", "-ldflags=-w -s", "-tags", strings.Join(tags, ","), "-o", "tmpbin", "./cmd/"+bin) + log.Printf("# Measuring %v", cmd.Args) + cmd.Env = append(os.Environ(), "CGO_ENABLED=0", "GOOS=linux", "GOARCH=amd64") + out, err := cmd.CombinedOutput() + if err != nil { + log.Fatalf("error measuring %q: %v, %s\n", bin, err, out) + } + fi, err := os.Stat("tmpbin") + if err != nil { + log.Fatal(err) + } + n := fi.Size() + if cacheFile != "" { + if err := os.WriteFile(cacheFile, fmt.Appendf(nil, "%d", n), 0644); err != nil { + log.Fatalf("error writing size to cache: %v\n", err) + } + } + return n +} diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index c8a18eb0752bc..bd8eebb7b1d27 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -51,23 +51,25 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar tailscale.com from tailscale.com/version tailscale.com/envknob from tailscale.com/tsweb+ tailscale.com/feature from tailscale.com/tsweb + tailscale.com/feature/buildfeatures from tailscale.com/feature+ tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/metrics from tailscale.com/net/stunserver+ tailscale.com/net/netaddr from tailscale.com/net/tsaddr tailscale.com/net/stun from tailscale.com/net/stunserver tailscale.com/net/stunserver from tailscale.com/cmd/stund tailscale.com/net/tsaddr from tailscale.com/tsweb - tailscale.com/syncs from tailscale.com/metrics - tailscale.com/tailcfg from tailscale.com/version + tailscale.com/syncs from tailscale.com/metrics+ + tailscale.com/tailcfg from tailscale.com/version+ tailscale.com/tsweb from tailscale.com/cmd/stund+ tailscale.com/tsweb/promvarz from tailscale.com/cmd/stund tailscale.com/tsweb/varz from tailscale.com/tsweb+ tailscale.com/types/dnstype from tailscale.com/tailcfg tailscale.com/types/ipproto from tailscale.com/tailcfg - tailscale.com/types/key from tailscale.com/tailcfg + tailscale.com/types/key from tailscale.com/tailcfg+ tailscale.com/types/lazy from tailscale.com/version+ tailscale.com/types/logger from tailscale.com/tsweb+ tailscale.com/types/opt from tailscale.com/envknob+ + tailscale.com/types/persist from tailscale.com/feature tailscale.com/types/ptr from tailscale.com/tailcfg+ tailscale.com/types/result from tailscale.com/util/lineiter tailscale.com/types/structs from tailscale.com/tailcfg+ @@ -86,29 +88,32 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar tailscale.com/version from tailscale.com/envknob+ tailscale.com/version/distro from tailscale.com/envknob golang.org/x/crypto/blake2b from golang.org/x/crypto/nacl/box - golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 - golang.org/x/crypto/chacha20poly1305 from crypto/tls+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+ - golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ - golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ + golang.org/x/crypto/internal/alias from golang.org/x/crypto/nacl/secretbox + golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/nacl/secretbox golang.org/x/crypto/nacl/box from tailscale.com/types/key golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ golang.org/x/exp/constraints from tailscale.com/tsweb/varz - golang.org/x/net/dns/dnsmessage from net+ - golang.org/x/net/http/httpguts from net/http+ - golang.org/x/net/http/httpproxy from net/http - golang.org/x/net/http2/hpack from net/http+ - golang.org/x/net/idna from golang.org/x/net/http/httpguts+ golang.org/x/sys/cpu from golang.org/x/crypto/blake2b+ LD golang.org/x/sys/unix from github.com/prometheus/procfs+ W golang.org/x/sys/windows from github.com/prometheus/client_golang/prometheus - golang.org/x/text/secure/bidirule from golang.org/x/net/idna - golang.org/x/text/transform from golang.org/x/text/secure/bidirule+ - golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ - golang.org/x/text/unicode/norm from golang.org/x/net/idna + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna bufio from compress/flate+ bytes from bufio+ cmp from slices+ @@ -233,6 +238,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ + internal/saferio from encoding/asn1 internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ @@ -267,7 +273,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/tsweb net/netip from go4.org/netipx+ - net/textproto from golang.org/x/net/http/httpguts+ + net/textproto from mime/multipart+ net/url from crypto/x509+ os from crypto/internal/sysrand+ os/signal from tailscale.com/cmd/stund diff --git a/cmd/stunstamp/stunstamp.go b/cmd/stunstamp/stunstamp.go index c3842e2e8b3be..71ed505690243 100644 --- a/cmd/stunstamp/stunstamp.go +++ b/cmd/stunstamp/stunstamp.go @@ -34,10 +34,10 @@ import ( "github.com/golang/snappy" "github.com/prometheus/prometheus/prompb" "github.com/tcnksm/go-httpstat" - "tailscale.com/logtail/backoff" "tailscale.com/net/stun" "tailscale.com/net/tcpinfo" "tailscale.com/tailcfg" + "tailscale.com/util/backoff" ) var ( diff --git a/cmd/tailscale/cli/appcroutes.go b/cmd/tailscale/cli/appcroutes.go new file mode 100644 index 0000000000000..4a1ba87e35bcc --- /dev/null +++ b/cmd/tailscale/cli/appcroutes.go @@ -0,0 +1,153 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package cli + +import ( + "context" + "encoding/json" + "flag" + "fmt" + "slices" + "strings" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/types/appctype" +) + +var appcRoutesArgs struct { + all bool + domainMap bool + n bool +} + +var appcRoutesCmd = &ffcli.Command{ + Name: "appc-routes", + ShortUsage: "tailscale appc-routes", + Exec: runAppcRoutesInfo, + ShortHelp: "Print the current app connector routes", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("appc-routes") + fs.BoolVar(&appcRoutesArgs.all, "all", false, "Print learned domains and routes and extra policy configured routes.") + fs.BoolVar(&appcRoutesArgs.domainMap, "map", false, "Print the map of learned domains: [routes].") + fs.BoolVar(&appcRoutesArgs.n, "n", false, "Print the total number of routes this node advertises.") + return fs + })(), + LongHelp: strings.TrimSpace(` +The 'tailscale appc-routes' command prints the current App Connector route status. + +By default this command prints the domains configured in the app connector configuration and how many routes have been +learned for each domain. + +--all prints the routes learned from the domains configured in the app connector configuration; and any extra routes provided +in the the policy app connector 'routes' field. + +--map prints the routes learned from the domains configured in the app connector configuration. + +-n prints the total number of routes advertised by this device, whether learned, set in the policy, or set locally. + +For more information about App Connectors, refer to +https://tailscale.com/kb/1281/app-connectors +`), +} + +func getAllOutput(ri *appctype.RouteInfo) (string, error) { + domains, err := json.MarshalIndent(ri.Domains, " ", " ") + if err != nil { + return "", err + } + control, err := json.MarshalIndent(ri.Control, " ", " ") + if err != nil { + return "", err + } + s := fmt.Sprintf(`Learned Routes +============== +%s + +Routes from Policy +================== +%s +`, domains, control) + return s, nil +} + +type domainCount struct { + domain string + count int +} + +func getSummarizeLearnedOutput(ri *appctype.RouteInfo) string { + x := make([]domainCount, len(ri.Domains)) + i := 0 + maxDomainWidth := 0 + for k, v := range ri.Domains { + if len(k) > maxDomainWidth { + maxDomainWidth = len(k) + } + x[i] = domainCount{domain: k, count: len(v)} + i++ + } + slices.SortFunc(x, func(i, j domainCount) int { + if i.count > j.count { + return -1 + } + if i.count < j.count { + return 1 + } + if i.domain > j.domain { + return 1 + } + if i.domain < j.domain { + return -1 + } + return 0 + }) + s := "" + fmtString := fmt.Sprintf("%%-%ds %%d\n", maxDomainWidth) // eg "%-10s %d\n" + for _, dc := range x { + s += fmt.Sprintf(fmtString, dc.domain, dc.count) + } + return s +} + +func runAppcRoutesInfo(ctx context.Context, args []string) error { + prefs, err := localClient.GetPrefs(ctx) + if err != nil { + return err + } + if !prefs.AppConnector.Advertise { + fmt.Println("not a connector") + return nil + } + + if appcRoutesArgs.n { + fmt.Println(len(prefs.AdvertiseRoutes)) + return nil + } + + routeInfo, err := localClient.GetAppConnectorRouteInfo(ctx) + if err != nil { + return err + } + + if appcRoutesArgs.domainMap { + domains, err := json.Marshal(routeInfo.Domains) + if err != nil { + return err + } + fmt.Println(string(domains)) + return nil + } + + if appcRoutesArgs.all { + s, err := getAllOutput(&routeInfo) + if err != nil { + return err + } + fmt.Println(s) + return nil + } + + fmt.Print(getSummarizeLearnedOutput(&routeInfo)) + return nil +} diff --git a/cmd/tailscale/cli/cert.go b/cmd/tailscale/cli/cert.go index 9c8eca5b7d7d0..171eebe1eafc9 100644 --- a/cmd/tailscale/cli/cert.go +++ b/cmd/tailscale/cli/cert.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !js && !ts_omit_acme + package cli import ( @@ -25,19 +27,23 @@ import ( "tailscale.com/version" ) -var certCmd = &ffcli.Command{ - Name: "cert", - Exec: runCert, - ShortHelp: "Get TLS certs", - ShortUsage: "tailscale cert [flags] ", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("cert") - fs.StringVar(&certArgs.certFile, "cert-file", "", "output cert file or \"-\" for stdout; defaults to DOMAIN.crt if --cert-file and --key-file are both unset") - fs.StringVar(&certArgs.keyFile, "key-file", "", "output key file or \"-\" for stdout; defaults to DOMAIN.key if --cert-file and --key-file are both unset") - fs.BoolVar(&certArgs.serve, "serve-demo", false, "if true, serve on port :443 using the cert as a demo, instead of writing out the files to disk") - fs.DurationVar(&certArgs.minValidity, "min-validity", 0, "ensure the certificate is valid for at least this duration; the output certificate is never expired if this flag is unset or 0, but the lifetime may vary; the maximum allowed min-validity depends on the CA") - return fs - })(), +func init() { + maybeCertCmd = func() *ffcli.Command { + return &ffcli.Command{ + Name: "cert", + Exec: runCert, + ShortHelp: "Get TLS certs", + ShortUsage: "tailscale cert [flags] ", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("cert") + fs.StringVar(&certArgs.certFile, "cert-file", "", "output cert file or \"-\" for stdout; defaults to DOMAIN.crt if --cert-file and --key-file are both unset") + fs.StringVar(&certArgs.keyFile, "key-file", "", "output key file or \"-\" for stdout; defaults to DOMAIN.key if --cert-file and --key-file are both unset") + fs.BoolVar(&certArgs.serve, "serve-demo", false, "if true, serve on port :443 using the cert as a demo, instead of writing out the files to disk") + fs.DurationVar(&certArgs.minValidity, "min-validity", 0, "ensure the certificate is valid for at least this duration; the output certificate is never expired if this flag is unset or 0, but the lifetime may vary; the maximum allowed min-validity depends on the CA") + return fs + })(), + } + } } var certArgs struct { diff --git a/cmd/tailscale/cli/cli.go b/cmd/tailscale/cli/cli.go index 46aa29c710333..5ebc23a5befea 100644 --- a/cmd/tailscale/cli/cli.go +++ b/cmd/tailscale/cli/cli.go @@ -18,6 +18,7 @@ import ( "strings" "sync" "text/tabwriter" + "time" "github.com/mattn/go-colorable" "github.com/mattn/go-isatty" @@ -25,6 +26,7 @@ import ( "tailscale.com/client/local" "tailscale.com/cmd/tailscale/cli/ffcomplete" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/paths" "tailscale.com/util/slicesx" "tailscale.com/version/distro" @@ -207,9 +209,17 @@ func noDupFlagify(c *ffcli.Command) { } } -var fileCmd func() *ffcli.Command -var sysPolicyCmd func() *ffcli.Command -var maybeWebCmd func() *ffcli.Command +var ( + fileCmd, + sysPolicyCmd, + maybeWebCmd, + maybeDriveCmd, + maybeNetlockCmd, + maybeFunnelCmd, + maybeServeCmd, + maybeCertCmd, + _ func() *ffcli.Command +) func newRootCmd() *ffcli.Command { rootfs := newFlagSet("tailscale") @@ -249,23 +259,24 @@ change in the future. pingCmd, ncCmd, sshCmd, - funnelCmd(), - serveCmd(), + nilOrCall(maybeFunnelCmd), + nilOrCall(maybeServeCmd), versionCmd, nilOrCall(maybeWebCmd), nilOrCall(fileCmd), bugReportCmd, - certCmd, - netlockCmd, + nilOrCall(maybeCertCmd), + nilOrCall(maybeNetlockCmd), licensesCmd, exitNodeCmd(), updateCmd, whoisCmd, debugCmd(), - driveCmd, + nilOrCall(maybeDriveCmd), idTokenCmd, configureHostCmd(), systrayCmd, + appcRoutesCmd, ), FlagSet: rootfs, Exec: func(ctx context.Context, args []string) error { @@ -530,3 +541,28 @@ func jsonDocsWalk(cmd *ffcli.Command) *commandDoc { } return res } + +func lastSeenFmt(t time.Time) string { + if t.IsZero() { + return "" + } + d := max(time.Since(t), time.Minute) // at least 1 minute + + switch { + case d < time.Hour: + return fmt.Sprintf(", last seen %dm ago", int(d.Minutes())) + case d < 24*time.Hour: + return fmt.Sprintf(", last seen %dh ago", int(d.Hours())) + default: + return fmt.Sprintf(", last seen %dd ago", int(d.Hours()/24)) + } +} + +var hookFixTailscaledConnectError feature.Hook[func(error) error] // for cliconndiag + +func fixTailscaledConnectError(origErr error) error { + if f, ok := hookFixTailscaledConnectError.GetOk(); ok { + return f(origErr) + } + return origErr +} diff --git a/cmd/tailscale/cli/configure-synology-cert.go b/cmd/tailscale/cli/configure-synology-cert.go index 663d0c8790456..b5168ef92d11f 100644 --- a/cmd/tailscale/cli/configure-synology-cert.go +++ b/cmd/tailscale/cli/configure-synology-cert.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && !ts_omit_acme && !ts_omit_synology + package cli import ( @@ -22,6 +24,10 @@ import ( "tailscale.com/version/distro" ) +func init() { + maybeConfigSynologyCertCmd = synologyConfigureCertCmd +} + func synologyConfigureCertCmd() *ffcli.Command { if runtime.GOOS != "linux" || distro.Get() != distro.Synology { return nil diff --git a/cmd/tailscale/cli/configure-synology-cert_test.go b/cmd/tailscale/cli/configure-synology-cert_test.go index 801285e550d9b..c7da5622fb629 100644 --- a/cmd/tailscale/cli/configure-synology-cert_test.go +++ b/cmd/tailscale/cli/configure-synology-cert_test.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && !ts_omit_acme + package cli import ( diff --git a/cmd/tailscale/cli/configure.go b/cmd/tailscale/cli/configure.go index 0354a19446a8f..20236eb28b5f5 100644 --- a/cmd/tailscale/cli/configure.go +++ b/cmd/tailscale/cli/configure.go @@ -10,7 +10,11 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" ) -var maybeJetKVMConfigureCmd func() *ffcli.Command // non-nil only on Linux/arm for JetKVM +var ( + maybeJetKVMConfigureCmd, + maybeConfigSynologyCertCmd, + _ func() *ffcli.Command // non-nil only on Linux/arm for JetKVM +) func configureCmd() *ffcli.Command { return &ffcli.Command{ @@ -28,7 +32,7 @@ services on the host to use Tailscale in more ways. Subcommands: nonNilCmds( configureKubeconfigCmd(), synologyConfigureCmd(), - synologyConfigureCertCmd(), + ccall(maybeConfigSynologyCertCmd), ccall(maybeSysExtCmd), ccall(maybeVPNConfigCmd), ccall(maybeJetKVMConfigureCmd), diff --git a/cmd/tailscale/cli/debug-peer-relay.go b/cmd/tailscale/cli/debug-peer-relay.go new file mode 100644 index 0000000000000..bef8b83693aca --- /dev/null +++ b/cmd/tailscale/cli/debug-peer-relay.go @@ -0,0 +1,77 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ios && !ts_omit_relayserver + +package cli + +import ( + "bytes" + "cmp" + "context" + "fmt" + "net/netip" + "slices" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/net/udprelay/status" +) + +func init() { + debugPeerRelayCmd = mkDebugPeerRelaySessionsCmd +} + +func mkDebugPeerRelaySessionsCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "peer-relay-sessions", + ShortUsage: "tailscale debug peer-relay-sessions", + Exec: runPeerRelaySessions, + ShortHelp: "Print the current set of active peer relay sessions relayed through this node", + } +} + +func runPeerRelaySessions(ctx context.Context, args []string) error { + srv, err := localClient.DebugPeerRelaySessions(ctx) + if err != nil { + return err + } + + var buf bytes.Buffer + f := func(format string, a ...any) { fmt.Fprintf(&buf, format, a...) } + + f("Server port: ") + if srv.UDPPort == nil { + f("not configured (you can configure the port with 'tailscale set --relay-server-port=')") + } else { + f("%d", *srv.UDPPort) + } + f("\n") + f("Sessions count: %d\n", len(srv.Sessions)) + if len(srv.Sessions) == 0 { + Stdout.Write(buf.Bytes()) + return nil + } + + fmtSessionDirection := func(a, z status.ClientInfo) string { + fmtEndpoint := func(ap netip.AddrPort) string { + if ap.IsValid() { + return ap.String() + } + return "" + } + return fmt.Sprintf("%s(%s) --> %s(%s), Packets: %d Bytes: %d", + fmtEndpoint(a.Endpoint), a.ShortDisco, + fmtEndpoint(z.Endpoint), z.ShortDisco, + a.PacketsTx, a.BytesTx) + } + + f("\n") + slices.SortFunc(srv.Sessions, func(s1, s2 status.ServerSession) int { return cmp.Compare(s1.VNI, s2.VNI) }) + for _, s := range srv.Sessions { + f("VNI: %d\n", s.VNI) + f(" %s\n", fmtSessionDirection(s.Client1, s.Client2)) + f(" %s\n", fmtSessionDirection(s.Client2, s.Client1)) + } + Stdout.Write(buf.Bytes()) + return nil +} diff --git a/cmd/tailscale/cli/debug-portmap.go b/cmd/tailscale/cli/debug-portmap.go new file mode 100644 index 0000000000000..d8db1442c7073 --- /dev/null +++ b/cmd/tailscale/cli/debug-portmap.go @@ -0,0 +1,79 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ios && !ts_omit_debugportmapper + +package cli + +import ( + "context" + "flag" + "fmt" + "io" + "net/netip" + "os" + "time" + + "github.com/peterbourgon/ff/v3/ffcli" + "tailscale.com/client/local" +) + +func init() { + debugPortmapCmd = mkDebugPortmapCmd +} + +func mkDebugPortmapCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "portmap", + ShortUsage: "tailscale debug portmap", + Exec: debugPortmap, + ShortHelp: "Run portmap debugging", + FlagSet: (func() *flag.FlagSet { + fs := newFlagSet("portmap") + fs.DurationVar(&debugPortmapArgs.duration, "duration", 5*time.Second, "timeout for port mapping") + fs.StringVar(&debugPortmapArgs.ty, "type", "", `portmap debug type (one of "", "pmp", "pcp", or "upnp")`) + fs.StringVar(&debugPortmapArgs.gatewayAddr, "gateway-addr", "", `override gateway IP (must also pass --self-addr)`) + fs.StringVar(&debugPortmapArgs.selfAddr, "self-addr", "", `override self IP (must also pass --gateway-addr)`) + fs.BoolVar(&debugPortmapArgs.logHTTP, "log-http", false, `print all HTTP requests and responses to the log`) + return fs + })(), + } +} + +var debugPortmapArgs struct { + duration time.Duration + gatewayAddr string + selfAddr string + ty string + logHTTP bool +} + +func debugPortmap(ctx context.Context, args []string) error { + opts := &local.DebugPortmapOpts{ + Duration: debugPortmapArgs.duration, + Type: debugPortmapArgs.ty, + LogHTTP: debugPortmapArgs.logHTTP, + } + if (debugPortmapArgs.gatewayAddr != "") != (debugPortmapArgs.selfAddr != "") { + return fmt.Errorf("if one of --gateway-addr and --self-addr is provided, the other must be as well") + } + if debugPortmapArgs.gatewayAddr != "" { + var err error + opts.GatewayAddr, err = netip.ParseAddr(debugPortmapArgs.gatewayAddr) + if err != nil { + return fmt.Errorf("invalid --gateway-addr: %w", err) + } + opts.SelfAddr, err = netip.ParseAddr(debugPortmapArgs.selfAddr) + if err != nil { + return fmt.Errorf("invalid --self-addr: %w", err) + } + } + rc, err := localClient.DebugPortmap(ctx, opts) + if err != nil { + return err + } + defer rc.Close() + + _, err = io.Copy(os.Stdout, rc) + return err +} diff --git a/cmd/tailscale/cli/debug.go b/cmd/tailscale/cli/debug.go index 4960aeec2d50a..2836ae29814e7 100644 --- a/cmd/tailscale/cli/debug.go +++ b/cmd/tailscale/cli/debug.go @@ -17,6 +17,7 @@ import ( "log" "net" "net/http" + "net/http/httptrace" "net/http/httputil" "net/netip" "net/url" @@ -28,17 +29,18 @@ import ( "time" "github.com/peterbourgon/ff/v3/ffcli" - "golang.org/x/net/http/httpproxy" - "golang.org/x/net/http2" - "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" - "tailscale.com/control/controlhttp" + "tailscale.com/control/ts2021" + "tailscale.com/feature" + _ "tailscale.com/feature/condregister/useproxy" + "tailscale.com/health" "tailscale.com/hostinfo" - "tailscale.com/internal/noiseconn" "tailscale.com/ipn" + "tailscale.com/net/ace" + "tailscale.com/net/dnscache" "tailscale.com/net/netmon" "tailscale.com/net/tsaddr" - "tailscale.com/net/tshttpproxy" + "tailscale.com/net/tsdial" "tailscale.com/paths" "tailscale.com/safesocket" "tailscale.com/tailcfg" @@ -49,7 +51,9 @@ import ( ) var ( - debugCaptureCmd func() *ffcli.Command // or nil + debugCaptureCmd func() *ffcli.Command // or nil + debugPortmapCmd func() *ffcli.Command // or nil + debugPeerRelayCmd func() *ffcli.Command // or nil ) func debugCmd() *ffcli.Command { @@ -287,6 +291,8 @@ func debugCmd() *ffcli.Command { fs.StringVar(&ts2021Args.host, "host", "controlplane.tailscale.com", "hostname of control plane") fs.IntVar(&ts2021Args.version, "version", int(tailcfg.CurrentCapabilityVersion), "protocol version") fs.BoolVar(&ts2021Args.verbose, "verbose", false, "be extra verbose") + fs.StringVar(&ts2021Args.aceHost, "ace", "", "if non-empty, use this ACE server IP/hostname as a candidate path") + fs.StringVar(&ts2021Args.dialPlanJSONFile, "dial-plan", "", "if non-empty, use this JSON file to configure the dial plan") return fs })(), }, @@ -319,21 +325,7 @@ func debugCmd() *ffcli.Command { ShortHelp: "Test a DERP configuration", }, ccall(debugCaptureCmd), - { - Name: "portmap", - ShortUsage: "tailscale debug portmap", - Exec: debugPortmap, - ShortHelp: "Run portmap debugging", - FlagSet: (func() *flag.FlagSet { - fs := newFlagSet("portmap") - fs.DurationVar(&debugPortmapArgs.duration, "duration", 5*time.Second, "timeout for port mapping") - fs.StringVar(&debugPortmapArgs.ty, "type", "", `portmap debug type (one of "", "pmp", "pcp", or "upnp")`) - fs.StringVar(&debugPortmapArgs.gatewayAddr, "gateway-addr", "", `override gateway IP (must also pass --self-addr)`) - fs.StringVar(&debugPortmapArgs.selfAddr, "self-addr", "", `override self IP (must also pass --gateway-addr)`) - fs.BoolVar(&debugPortmapArgs.logHTTP, "log-http", false, `print all HTTP requests and responses to the log`) - return fs - })(), - }, + ccall(debugPortmapCmd), { Name: "peer-endpoint-changes", ShortUsage: "tailscale debug peer-endpoint-changes ", @@ -385,6 +377,7 @@ func debugCmd() *ffcli.Command { return fs })(), }, + ccall(debugPeerRelayCmd), }...), } } @@ -978,6 +971,9 @@ var ts2021Args struct { host string // "controlplane.tailscale.com" version int // 27 or whatever verbose bool + aceHost string // if non-empty, FQDN of https ACE server to use ("ace.example.com") + + dialPlanJSONFile string // if non-empty, path to JSON file [tailcfg.ControlDialPlan] JSON } func runTS2021(ctx context.Context, args []string) error { @@ -986,19 +982,22 @@ func runTS2021(ctx context.Context, args []string) error { keysURL := "https://" + ts2021Args.host + "/key?v=" + strconv.Itoa(ts2021Args.version) + keyTransport := http.DefaultTransport.(*http.Transport).Clone() + if ts2021Args.aceHost != "" { + log.Printf("using ACE server %q", ts2021Args.aceHost) + keyTransport.Proxy = nil + keyTransport.DialContext = (&ace.Dialer{ACEHost: ts2021Args.aceHost}).Dial + } + if ts2021Args.verbose { u, err := url.Parse(keysURL) if err != nil { return err } - envConf := httpproxy.FromEnvironment() - if *envConf == (httpproxy.Config{}) { - log.Printf("HTTP proxy env: (none)") - } else { - log.Printf("HTTP proxy env: %+v", envConf) + if proxyFromEnv, ok := feature.HookProxyFromEnvironment.GetOk(); ok { + proxy, err := proxyFromEnv(&http.Request{URL: u}) + log.Printf("tshttpproxy.ProxyFromEnvironment = (%v, %v)", proxy, err) } - proxy, err := tshttpproxy.ProxyFromEnvironment(&http.Request{URL: u}) - log.Printf("tshttpproxy.ProxyFromEnvironment = (%v, %v)", proxy, err) } machinePrivate := key.NewMachine() var dialer net.Dialer @@ -1011,7 +1010,7 @@ func runTS2021(ctx context.Context, args []string) error { if err != nil { return err } - res, err := http.DefaultClient.Do(req) + res, err := keyTransport.RoundTrip(req) if err != nil { log.Printf("Do: %v", err) return err @@ -1055,20 +1054,45 @@ func runTS2021(ctx context.Context, args []string) error { return fmt.Errorf("creating netmon: %w", err) } - noiseDialer := &controlhttp.Dialer{ - Hostname: ts2021Args.host, - HTTPPort: "80", - HTTPSPort: "443", - MachineKey: machinePrivate, - ControlKey: keys.PublicKey, - ProtocolVersion: uint16(ts2021Args.version), - Dialer: dialFunc, - Logf: logf, - NetMon: netMon, + var dialPlan *tailcfg.ControlDialPlan + if ts2021Args.dialPlanJSONFile != "" { + b, err := os.ReadFile(ts2021Args.dialPlanJSONFile) + if err != nil { + return fmt.Errorf("reading dial plan JSON file: %w", err) + } + dialPlan = new(tailcfg.ControlDialPlan) + if err := json.Unmarshal(b, dialPlan); err != nil { + return fmt.Errorf("unmarshaling dial plan JSON file: %w", err) + } + } else if ts2021Args.aceHost != "" { + dialPlan = &tailcfg.ControlDialPlan{ + Candidates: []tailcfg.ControlIPCandidate{ + { + ACEHost: ts2021Args.aceHost, + DialTimeoutSec: 10, + }, + }, + } + } + + opts := ts2021.ClientOpts{ + ServerURL: "https://" + ts2021Args.host, + DialPlan: func() *tailcfg.ControlDialPlan { + return dialPlan + }, + Logf: logf, + NetMon: netMon, + PrivKey: machinePrivate, + ServerPubKey: keys.PublicKey, + Dialer: tsdial.NewFromFuncForDebug(logf, dialFunc), + DNSCache: &dnscache.Resolver{}, + HealthTracker: &health.Tracker{}, } + + // TODO: ProtocolVersion: uint16(ts2021Args.version), const tries = 2 for i := range tries { - err := tryConnect(ctx, keys.PublicKey, noiseDialer) + err := tryConnect(ctx, keys.PublicKey, opts) if err != nil { log.Printf("error on attempt %d/%d: %v", i+1, tries, err) continue @@ -1078,53 +1102,37 @@ func runTS2021(ctx context.Context, args []string) error { return nil } -func tryConnect(ctx context.Context, controlPublic key.MachinePublic, noiseDialer *controlhttp.Dialer) error { - conn, err := noiseDialer.Dial(ctx) - log.Printf("controlhttp.Dial = %p, %v", conn, err) - if err != nil { - return err - } - log.Printf("did noise handshake") - - gotPeer := conn.Peer() - if gotPeer != controlPublic { - log.Printf("peer = %v, want %v", gotPeer, controlPublic) - return errors.New("key mismatch") - } - - log.Printf("final underlying conn: %v / %v", conn.LocalAddr(), conn.RemoteAddr()) +func tryConnect(ctx context.Context, controlPublic key.MachinePublic, opts ts2021.ClientOpts) error { - h2Transport, err := http2.ConfigureTransports(&http.Transport{ - IdleConnTimeout: time.Second, + ctx = httptrace.WithClientTrace(ctx, &httptrace.ClientTrace{ + GotConn: func(ci httptrace.GotConnInfo) { + log.Printf("GotConn: %T", ci.Conn) + ncc, ok := ci.Conn.(*ts2021.Conn) + if !ok { + return + } + log.Printf("did noise handshake") + log.Printf("final underlying conn: %v / %v", ncc.LocalAddr(), ncc.RemoteAddr()) + gotPeer := ncc.Peer() + if gotPeer != controlPublic { + log.Fatalf("peer = %v, want %v", gotPeer, controlPublic) + } + }, }) - if err != nil { - return fmt.Errorf("http2.ConfigureTransports: %w", err) - } - - // Now, create a Noise conn over the existing conn. - nc, err := noiseconn.New(conn.Conn, h2Transport, 0, nil) - if err != nil { - return fmt.Errorf("noiseconn.New: %w", err) - } - defer nc.Close() - // Reserve a RoundTrip for the whoami request. - ok, _, err := nc.ReserveNewRequest(ctx) + nc, err := ts2021.NewClient(opts) if err != nil { - return fmt.Errorf("ReserveNewRequest: %w", err) - } - if !ok { - return errors.New("ReserveNewRequest failed") + return fmt.Errorf("NewNoiseClient: %w", err) } // Make a /whoami request to the server to verify that we can actually // communicate over the newly-established connection. - whoamiURL := "http://" + ts2021Args.host + "/machine/whoami" + whoamiURL := "https://" + ts2021Args.host + "/machine/whoami" req, err := http.NewRequestWithContext(ctx, "GET", whoamiURL, nil) if err != nil { return err } - resp, err := nc.RoundTrip(req) + resp, err := nc.Do(req) if err != nil { return fmt.Errorf("RoundTrip whoami request: %w", err) } @@ -1210,44 +1218,6 @@ func runSetExpire(ctx context.Context, args []string) error { return localClient.DebugSetExpireIn(ctx, setExpireArgs.in) } -var debugPortmapArgs struct { - duration time.Duration - gatewayAddr string - selfAddr string - ty string - logHTTP bool -} - -func debugPortmap(ctx context.Context, args []string) error { - opts := &local.DebugPortmapOpts{ - Duration: debugPortmapArgs.duration, - Type: debugPortmapArgs.ty, - LogHTTP: debugPortmapArgs.logHTTP, - } - if (debugPortmapArgs.gatewayAddr != "") != (debugPortmapArgs.selfAddr != "") { - return fmt.Errorf("if one of --gateway-addr and --self-addr is provided, the other must be as well") - } - if debugPortmapArgs.gatewayAddr != "" { - var err error - opts.GatewayAddr, err = netip.ParseAddr(debugPortmapArgs.gatewayAddr) - if err != nil { - return fmt.Errorf("invalid --gateway-addr: %w", err) - } - opts.SelfAddr, err = netip.ParseAddr(debugPortmapArgs.selfAddr) - if err != nil { - return fmt.Errorf("invalid --self-addr: %w", err) - } - } - rc, err := localClient.DebugPortmap(ctx, opts) - if err != nil { - return err - } - defer rc.Close() - - _, err = io.Copy(os.Stdout, rc) - return err -} - func runPeerEndpointChanges(ctx context.Context, args []string) error { st, err := localClient.Status(ctx) if err != nil { diff --git a/cmd/tailscale/cli/diag.go b/cmd/tailscale/cli/diag.go index ebf26985fe0bd..3b2aa504b9ea7 100644 --- a/cmd/tailscale/cli/diag.go +++ b/cmd/tailscale/cli/diag.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux || windows || darwin +//go:build (linux || windows || darwin) && !ts_omit_cliconndiag package cli @@ -16,11 +16,15 @@ import ( "tailscale.com/version/distro" ) -// fixTailscaledConnectError is called when the local tailscaled has +func init() { + hookFixTailscaledConnectError.Set(fixTailscaledConnectErrorImpl) +} + +// fixTailscaledConnectErrorImpl is called when the local tailscaled has // been determined unreachable due to the provided origErr value. It // returns either the same error or a better one to help the user // understand why tailscaled isn't running for their platform. -func fixTailscaledConnectError(origErr error) error { +func fixTailscaledConnectErrorImpl(origErr error) error { procs, err := ps.Processes() if err != nil { return fmt.Errorf("failed to connect to local Tailscaled process and failed to enumerate processes while looking for it") diff --git a/cmd/tailscale/cli/diag_other.go b/cmd/tailscale/cli/diag_other.go deleted file mode 100644 index ece10cc79a822..0000000000000 --- a/cmd/tailscale/cli/diag_other.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !linux && !windows && !darwin - -package cli - -import "fmt" - -// The github.com/mitchellh/go-ps package doesn't work on all platforms, -// so just don't diagnose connect failures. - -func fixTailscaledConnectError(origErr error) error { - return fmt.Errorf("failed to connect to local tailscaled process (is it running?); got: %w", origErr) -} diff --git a/cmd/tailscale/cli/drive.go b/cmd/tailscale/cli/drive.go index 929852b4c5a32..131f468477314 100644 --- a/cmd/tailscale/cli/drive.go +++ b/cmd/tailscale/cli/drive.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_drive && !ts_mac_gui + package cli import ( @@ -20,43 +22,49 @@ const ( driveListUsage = "tailscale drive list" ) -var driveCmd = &ffcli.Command{ - Name: "drive", - ShortHelp: "Share a directory with your tailnet", - ShortUsage: strings.Join([]string{ - driveShareUsage, - driveRenameUsage, - driveUnshareUsage, - driveListUsage, - }, "\n"), - LongHelp: buildShareLongHelp(), - UsageFunc: usageFuncNoDefaultValues, - Subcommands: []*ffcli.Command{ - { - Name: "share", - ShortUsage: driveShareUsage, - Exec: runDriveShare, - ShortHelp: "[ALPHA] Create or modify a share", - }, - { - Name: "rename", - ShortUsage: driveRenameUsage, - ShortHelp: "[ALPHA] Rename a share", - Exec: runDriveRename, - }, - { - Name: "unshare", - ShortUsage: driveUnshareUsage, - ShortHelp: "[ALPHA] Remove a share", - Exec: runDriveUnshare, - }, - { - Name: "list", - ShortUsage: driveListUsage, - ShortHelp: "[ALPHA] List current shares", - Exec: runDriveList, +func init() { + maybeDriveCmd = driveCmd +} + +func driveCmd() *ffcli.Command { + return &ffcli.Command{ + Name: "drive", + ShortHelp: "Share a directory with your tailnet", + ShortUsage: strings.Join([]string{ + driveShareUsage, + driveRenameUsage, + driveUnshareUsage, + driveListUsage, + }, "\n"), + LongHelp: buildShareLongHelp(), + UsageFunc: usageFuncNoDefaultValues, + Subcommands: []*ffcli.Command{ + { + Name: "share", + ShortUsage: driveShareUsage, + Exec: runDriveShare, + ShortHelp: "[ALPHA] Create or modify a share", + }, + { + Name: "rename", + ShortUsage: driveRenameUsage, + ShortHelp: "[ALPHA] Rename a share", + Exec: runDriveRename, + }, + { + Name: "unshare", + ShortUsage: driveUnshareUsage, + ShortHelp: "[ALPHA] Remove a share", + Exec: runDriveUnshare, + }, + { + Name: "list", + ShortUsage: driveListUsage, + ShortHelp: "[ALPHA] List current shares", + Exec: runDriveList, + }, }, - }, + } } // runDriveShare is the entry point for the "tailscale drive share" command. diff --git a/cmd/tailscale/cli/exitnode.go b/cmd/tailscale/cli/exitnode.go index b153f096d6869..b47b9f0bd4949 100644 --- a/cmd/tailscale/cli/exitnode.go +++ b/cmd/tailscale/cli/exitnode.go @@ -173,11 +173,13 @@ func hasAnyExitNodeSuggestions(peers []*ipnstate.PeerStatus) bool { // a peer. If there is no notable state, a - is returned. func peerStatus(peer *ipnstate.PeerStatus) string { if !peer.Active { + lastseen := lastSeenFmt(peer.LastSeen) + if peer.ExitNode { - return "selected but offline" + return "selected but offline" + lastseen } if !peer.Online { - return "offline" + return "offline" + lastseen } } diff --git a/cmd/tailscale/cli/ffcomplete/internal/complete_test.go b/cmd/tailscale/cli/ffcomplete/internal/complete_test.go index 7e36b1bcd1437..c216bdeec500d 100644 --- a/cmd/tailscale/cli/ffcomplete/internal/complete_test.go +++ b/cmd/tailscale/cli/ffcomplete/internal/complete_test.go @@ -196,7 +196,6 @@ func TestComplete(t *testing.T) { // Run the tests. for _, test := range tests { - test := test name := strings.Join(test.args, "␣") if test.showFlags { name += "+flags" diff --git a/cmd/tailscale/cli/funnel.go b/cmd/tailscale/cli/funnel.go index f4a1c6bfdb3b8..34b0c74c23949 100644 --- a/cmd/tailscale/cli/funnel.go +++ b/cmd/tailscale/cli/funnel.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_serve + package cli import ( @@ -16,6 +18,10 @@ import ( "tailscale.com/tailcfg" ) +func init() { + maybeFunnelCmd = funnelCmd +} + var funnelCmd = func() *ffcli.Command { se := &serveEnv{lc: &localClient} // previously used to serve legacy newFunnelCommand unless useWIPCode is true @@ -174,3 +180,42 @@ func printFunnelWarning(sc *ipn.ServeConfig) { fmt.Fprintf(Stderr, " run: `tailscale serve --help` to see how to configure handlers\n") } } + +func init() { + hookPrintFunnelStatus.Set(printFunnelStatus) +} + +// printFunnelStatus prints the status of the funnel, if it's running. +// It prints nothing if the funnel is not running. +func printFunnelStatus(ctx context.Context) { + sc, err := localClient.GetServeConfig(ctx) + if err != nil { + outln() + printf("# Funnel:\n") + printf("# - Unable to get Funnel status: %v\n", err) + return + } + if !sc.IsFunnelOn() { + return + } + outln() + printf("# Funnel on:\n") + for hp, on := range sc.AllowFunnel { + if !on { // if present, should be on + continue + } + sni, portStr, _ := net.SplitHostPort(string(hp)) + p, _ := strconv.ParseUint(portStr, 10, 16) + isTCP := sc.IsTCPForwardingOnPort(uint16(p), noService) + url := "https://" + if isTCP { + url = "tcp://" + } + url += sni + if isTCP || p != 443 { + url += ":" + portStr + } + printf("# - %s\n", url) + } + outln() +} diff --git a/cmd/tailscale/cli/netcheck.go b/cmd/tailscale/cli/netcheck.go index 0bdab59cb8beb..5ae8db8fa3fbb 100644 --- a/cmd/tailscale/cli/netcheck.go +++ b/cmd/tailscale/cli/netcheck.go @@ -17,14 +17,23 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/net/netcheck" "tailscale.com/net/netmon" - "tailscale.com/net/portmapper" + "tailscale.com/net/portmapper/portmappertype" "tailscale.com/net/tlsdial" "tailscale.com/tailcfg" "tailscale.com/types/logger" "tailscale.com/util/eventbus" + + // The "netcheck" command also wants the portmapper linked. + // + // TODO: make that subcommand either hit LocalAPI for that info, or use a + // tailscaled subcommand, to avoid making the CLI also link in the portmapper. + // For now (2025-09-15), keep doing what we've done for the past five years and + // keep linking it here. + _ "tailscale.com/feature/condregister/portmapper" ) var netcheckCmd = &ffcli.Command{ @@ -56,14 +65,13 @@ func runNetcheck(ctx context.Context, args []string) error { return err } - // Ensure that we close the portmapper after running a netcheck; this - // will release any port mappings created. - pm := portmapper.NewClient(portmapper.Config{ - Logf: logf, - NetMon: netMon, - EventBus: bus, - }) - defer pm.Close() + var pm portmappertype.Client + if buildfeatures.HasPortMapper { + // Ensure that we close the portmapper after running a netcheck; this + // will release any port mappings created. + pm = portmappertype.HookNewPortMapper.Get()(logf, bus, netMon, nil, nil) + defer pm.Close() + } c := &netcheck.Client{ NetMon: netMon, @@ -210,6 +218,9 @@ func printReport(dm *tailcfg.DERPMap, report *netcheck.Report) error { } func portMapping(r *netcheck.Report) string { + if !buildfeatures.HasPortMapper { + return "binary built without portmapper support" + } if !r.AnyPortMappingChecked() { return "not checked" } diff --git a/cmd/tailscale/cli/network-lock.go b/cmd/tailscale/cli/network-lock.go index d19909576c090..a15d9ab88b596 100644 --- a/cmd/tailscale/cli/network-lock.go +++ b/cmd/tailscale/cli/network-lock.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package cli import ( @@ -27,6 +29,10 @@ import ( "tailscale.com/util/prompt" ) +func init() { + maybeNetlockCmd = func() *ffcli.Command { return netlockCmd } +} + var netlockCmd = &ffcli.Command{ Name: "lock", ShortUsage: "tailscale lock [arguments...]", @@ -219,18 +225,18 @@ func runNetworkLockStatus(ctx context.Context, args []string) error { } if st.Enabled { - fmt.Println("Tailnet lock is ENABLED.") + fmt.Println("Tailnet Lock is ENABLED.") } else { - fmt.Println("Tailnet lock is NOT enabled.") + fmt.Println("Tailnet Lock is NOT enabled.") } fmt.Println() if st.Enabled && st.NodeKey != nil && !st.PublicKey.IsZero() { if st.NodeKeySigned { - fmt.Println("This node is accessible under tailnet lock. Node signature:") + fmt.Println("This node is accessible under Tailnet Lock. Node signature:") fmt.Println(st.NodeKeySignature.String()) } else { - fmt.Println("This node is LOCKED OUT by tailnet-lock, and action is required to establish connectivity.") + fmt.Println("This node is LOCKED OUT by Tailnet Lock, and action is required to establish connectivity.") fmt.Printf("Run the following command on a node with a trusted key:\n\ttailscale lock sign %v %s\n", st.NodeKey, st.PublicKey.CLIString()) } fmt.Println() @@ -378,7 +384,7 @@ Removal of a signing key(s) without resigning nodes (--re-sign=false) will cause any nodes signed by the the given key(s) to be locked out of the Tailscale network. Proceed with caution. `) - if !prompt.YesNo("Are you sure you want to remove the signing key(s)?") { + if !prompt.YesNo("Are you sure you want to remove the signing key(s)?", true) { fmt.Printf("aborting removal of signing key(s)\n") os.Exit(0) } @@ -684,6 +690,14 @@ func nlDescribeUpdate(update ipnstate.NetworkLockUpdate, color bool) (string, er } func runNetworkLockLog(ctx context.Context, args []string) error { + st, err := localClient.NetworkLockStatus(ctx) + if err != nil { + return fixTailscaledConnectError(err) + } + if !st.Enabled { + return errors.New("Tailnet Lock is not enabled") + } + updates, err := localClient.NetworkLockLog(ctx, nlLogArgs.limit) if err != nil { return fixTailscaledConnectError(err) diff --git a/cmd/tailscale/cli/risks.go b/cmd/tailscale/cli/risks.go index dfde87f640a16..d4572842bf758 100644 --- a/cmd/tailscale/cli/risks.go +++ b/cmd/tailscale/cli/risks.go @@ -66,7 +66,7 @@ func presentRiskToUser(riskType, riskMessage, acceptedRisks string) error { outln(riskMessage) printf("To skip this warning, use --accept-risk=%s\n", riskType) - if prompt.YesNo("Continue?") { + if prompt.YesNo("Continue?", false) { return nil } diff --git a/cmd/tailscale/cli/serve_legacy.go b/cmd/tailscale/cli/serve_legacy.go index 3fbddeabf8d4e..95808fdf2eb34 100644 --- a/cmd/tailscale/cli/serve_legacy.go +++ b/cmd/tailscale/cli/serve_legacy.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_serve + package cli import ( @@ -31,6 +33,10 @@ import ( "tailscale.com/version" ) +func init() { + maybeServeCmd = serveCmd +} + var serveCmd = func() *ffcli.Command { se := &serveEnv{lc: &localClient} // previously used to serve legacy newFunnelCommand unless useWIPCode is true @@ -166,6 +172,7 @@ type serveEnv struct { yes bool // update without prompt service tailcfg.ServiceName // service name tun bool // redirect traffic to OS for service + allServices bool // apply config file to all services lc localServeClient // localClient interface, specific to serve diff --git a/cmd/tailscale/cli/serve_v2.go b/cmd/tailscale/cli/serve_v2.go index 903036db4a6e7..9b0af2cad7a0c 100644 --- a/cmd/tailscale/cli/serve_v2.go +++ b/cmd/tailscale/cli/serve_v2.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_serve + package cli import ( @@ -26,10 +28,13 @@ import ( "github.com/peterbourgon/ff/v3/ffcli" "tailscale.com/client/local" "tailscale.com/ipn" + "tailscale.com/ipn/conffile" "tailscale.com/ipn/ipnstate" "tailscale.com/tailcfg" + "tailscale.com/types/ipproto" "tailscale.com/util/mak" "tailscale.com/util/prompt" + "tailscale.com/util/set" "tailscale.com/util/slicesx" "tailscale.com/version" ) @@ -126,6 +131,22 @@ const ( serveTypeTUN ) +func serveTypeFromConfString(sp conffile.ServiceProtocol) (st serveType, ok bool) { + switch sp { + case conffile.ProtoHTTP: + return serveTypeHTTP, true + case conffile.ProtoHTTPS, conffile.ProtoHTTPSInsecure, conffile.ProtoFile: + return serveTypeHTTPS, true + case conffile.ProtoTCP: + return serveTypeTCP, true + case conffile.ProtoTLSTerminatedTCP: + return serveTypeTLSTerminatedTCP, true + case conffile.ProtoTUN: + return serveTypeTUN, true + } + return -1, false +} + const noService tailcfg.ServiceName = "" var infoMap = map[serveMode]commandInfo{ @@ -230,6 +251,33 @@ func newServeV2Command(e *serveEnv, subcmd serveMode) *ffcli.Command { "`tailscale serve drain `). This is not needed if you are using `tailscale serve` to initialize a service.", Exec: e.runServeAdvertise, }, + { + Name: "get-config", + ShortUsage: fmt.Sprintf("tailscale %s get-config [--service=] [--all]", info.Name), + ShortHelp: "Get service configuration to save to a file", + LongHelp: hidden + "Get the configuration for services that this node is currently hosting in a\n" + + "format that can later be provided to set-config. This can be used to declaratively set\n" + + "configuration for a service host.", + Exec: e.runServeGetConfig, + FlagSet: e.newFlags("serve-get-config", func(fs *flag.FlagSet) { + fs.BoolVar(&e.allServices, "all", false, "read config from all services") + fs.Var(&serviceNameFlag{Value: &e.service}, "service", "read config from a particular service") + }), + }, + { + Name: "set-config", + ShortUsage: fmt.Sprintf("tailscale %s set-config [--service=] [--all]", info.Name), + ShortHelp: "Define service configuration from a file", + LongHelp: hidden + "Read the provided configuration file and use it to declaratively set the configuration\n" + + "for either a single service, or for all services that this node is hosting. If --service is specified,\n" + + "all endpoint handlers for that service are overwritten. If --all is specified, all endpoint handlers for\n" + + "all services are overwritten.", + Exec: e.runServeSetConfig, + FlagSet: e.newFlags("serve-set-config", func(fs *flag.FlagSet) { + fs.BoolVar(&e.allServices, "all", false, "apply config to all services") + fs.Var(&serviceNameFlag{Value: &e.service}, "service", "apply config to a particular service") + }), + }, }, } } @@ -538,7 +586,7 @@ func (e *serveEnv) runServeClear(ctx context.Context, args []string) error { func (e *serveEnv) runServeAdvertise(ctx context.Context, args []string) error { if len(args) == 0 { - return fmt.Errorf("error: missing service name argument") + return errors.New("error: missing service name argument") } if len(args) != 1 { fmt.Fprintf(Stderr, "error: invalid number of arguments\n\n") @@ -551,6 +599,258 @@ func (e *serveEnv) runServeAdvertise(ctx context.Context, args []string) error { return e.addServiceToPrefs(ctx, svc) } +func (e *serveEnv) runServeGetConfig(ctx context.Context, args []string) (err error) { + forSingleService := e.service.Validate() == nil + sc, err := e.lc.GetServeConfig(ctx) + if err != nil { + return err + } + + prefs, err := e.lc.GetPrefs(ctx) + if err != nil { + return err + } + advertised := set.SetOf(prefs.AdvertiseServices) + + st, err := e.getLocalClientStatusWithoutPeers(ctx) + if err != nil { + return err + } + magicDNSSuffix := st.CurrentTailnet.MagicDNSSuffix + + handleService := func(svcName tailcfg.ServiceName, serviceConfig *ipn.ServiceConfig) (*conffile.ServiceDetailsFile, error) { + var sdf conffile.ServiceDetailsFile + // Leave unset for true case since that's the default. + if !advertised.Contains(svcName.String()) { + sdf.Advertised.Set(false) + } + + if serviceConfig.Tun { + mak.Set(&sdf.Endpoints, &tailcfg.ProtoPortRange{Ports: tailcfg.PortRangeAny}, &conffile.Target{ + Protocol: conffile.ProtoTUN, + Destination: "", + DestinationPorts: tailcfg.PortRange{}, + }) + } + + for port, config := range serviceConfig.TCP { + sniName := fmt.Sprintf("%s.%s", svcName.WithoutPrefix(), magicDNSSuffix) + ppr := tailcfg.ProtoPortRange{Proto: int(ipproto.TCP), Ports: tailcfg.PortRange{First: port, Last: port}} + if config.TCPForward != "" { + var proto conffile.ServiceProtocol + if config.TerminateTLS != "" { + proto = conffile.ProtoTLSTerminatedTCP + } else { + proto = conffile.ProtoTCP + } + destHost, destPortStr, err := net.SplitHostPort(config.TCPForward) + if err != nil { + return nil, fmt.Errorf("parse TCPForward=%q: %w", config.TCPForward, err) + } + destPort, err := strconv.ParseUint(destPortStr, 10, 16) + if err != nil { + return nil, fmt.Errorf("parse port %q: %w", destPortStr, err) + } + mak.Set(&sdf.Endpoints, &ppr, &conffile.Target{ + Protocol: proto, + Destination: destHost, + DestinationPorts: tailcfg.PortRange{First: uint16(destPort), Last: uint16(destPort)}, + }) + } else if config.HTTP || config.HTTPS { + webKey := ipn.HostPort(net.JoinHostPort(sniName, strconv.FormatUint(uint64(port), 10))) + handlers, ok := serviceConfig.Web[webKey] + if !ok { + return nil, fmt.Errorf("service %q: HTTP/HTTPS is set but no handlers in config", svcName) + } + defaultHandler, ok := handlers.Handlers["/"] + if !ok { + return nil, fmt.Errorf("service %q: root handler not set", svcName) + } + if defaultHandler.Path != "" { + mak.Set(&sdf.Endpoints, &ppr, &conffile.Target{ + Protocol: conffile.ProtoFile, + Destination: defaultHandler.Path, + DestinationPorts: tailcfg.PortRange{}, + }) + } else if defaultHandler.Proxy != "" { + proto, rest, ok := strings.Cut(defaultHandler.Proxy, "://") + if !ok { + return nil, fmt.Errorf("service %q: invalid proxy handler %q", svcName, defaultHandler.Proxy) + } + host, portStr, err := net.SplitHostPort(rest) + if err != nil { + return nil, fmt.Errorf("service %q: invalid proxy handler %q: %w", svcName, defaultHandler.Proxy, err) + } + + port, err := strconv.ParseUint(portStr, 10, 16) + if err != nil { + return nil, fmt.Errorf("service %q: parse port %q: %w", svcName, portStr, err) + } + + mak.Set(&sdf.Endpoints, &ppr, &conffile.Target{ + Protocol: conffile.ServiceProtocol(proto), + Destination: host, + DestinationPorts: tailcfg.PortRange{First: uint16(port), Last: uint16(port)}, + }) + } + } + } + + return &sdf, nil + } + + var j []byte + + if e.allServices && forSingleService { + return errors.New("cannot specify both --all and --service") + } else if e.allServices { + var scf conffile.ServicesConfigFile + scf.Version = "0.0.1" + for svcName, serviceConfig := range sc.Services { + sdf, err := handleService(svcName, serviceConfig) + if err != nil { + return err + } + mak.Set(&scf.Services, svcName, sdf) + } + j, err = json.MarshalIndent(scf, "", " ") + if err != nil { + return err + } + } else if forSingleService { + serviceConfig, ok := sc.Services[e.service] + if !ok { + j = []byte("{}") + } else { + sdf, err := handleService(e.service, serviceConfig) + if err != nil { + return err + } + sdf.Version = "0.0.1" + j, err = json.MarshalIndent(sdf, "", " ") + if err != nil { + return err + } + } + } else { + return errors.New("must specify either --service=svc: or --all") + } + + j = append(j, '\n') + _, err = e.stdout().Write(j) + return err +} + +func (e *serveEnv) runServeSetConfig(ctx context.Context, args []string) (err error) { + if len(args) != 1 { + return errors.New("must specify filename") + } + forSingleService := e.service.Validate() == nil + + var scf *conffile.ServicesConfigFile + if e.allServices && forSingleService { + return errors.New("cannot specify both --all and --service") + } else if e.allServices { + scf, err = conffile.LoadServicesConfig(args[0], "") + } else if forSingleService { + scf, err = conffile.LoadServicesConfig(args[0], e.service.String()) + } else { + return errors.New("must specify either --service=svc: or --all") + } + if err != nil { + return fmt.Errorf("could not read config from file %q: %w", args[0], err) + } + + st, err := e.getLocalClientStatusWithoutPeers(ctx) + if err != nil { + return fmt.Errorf("getting client status: %w", err) + } + magicDNSSuffix := st.CurrentTailnet.MagicDNSSuffix + sc, err := e.lc.GetServeConfig(ctx) + if err != nil { + return fmt.Errorf("getting current serve config: %w", err) + } + + // Clear all existing config. + if forSingleService { + if sc.Services != nil { + if sc.Services[e.service] != nil { + delete(sc.Services, e.service) + } + } + } else { + sc.Services = map[tailcfg.ServiceName]*ipn.ServiceConfig{} + } + advertisedServices := set.Set[string]{} + + for name, details := range scf.Services { + for ppr, ep := range details.Endpoints { + if ep.Protocol == conffile.ProtoTUN { + err := e.setServe(sc, name.String(), serveTypeTUN, 0, "", "", false, magicDNSSuffix) + if err != nil { + return err + } + // TUN mode is exclusive. + break + } + + if ppr.Proto != int(ipproto.TCP) { + return fmt.Errorf("service %q: source ports must be TCP", name) + } + serveType, _ := serveTypeFromConfString(ep.Protocol) + for port := ppr.Ports.First; port <= ppr.Ports.Last; port++ { + var target string + if ep.Protocol == conffile.ProtoFile { + target = ep.Destination + } else { + // map source port range 1-1 to destination port range + destPort := ep.DestinationPorts.First + (port - ppr.Ports.First) + portStr := fmt.Sprint(destPort) + target = fmt.Sprintf("%s://%s", ep.Protocol, net.JoinHostPort(ep.Destination, portStr)) + } + err := e.setServe(sc, name.String(), serveType, port, "/", target, false, magicDNSSuffix) + if err != nil { + return fmt.Errorf("service %q: %w", name, err) + } + } + } + if v, set := details.Advertised.Get(); !set || v { + advertisedServices.Add(name.String()) + } + } + + var changed bool + var servicesList []string + if e.allServices { + servicesList = advertisedServices.Slice() + changed = true + } else if advertisedServices.Contains(e.service.String()) { + // If allServices wasn't set, the only service that could have been + // advertised is the one that was provided as a flag. + prefs, err := e.lc.GetPrefs(ctx) + if err != nil { + return err + } + if !slices.Contains(prefs.AdvertiseServices, e.service.String()) { + servicesList = append(prefs.AdvertiseServices, e.service.String()) + changed = true + } + } + if changed { + _, err = e.lc.EditPrefs(ctx, &ipn.MaskedPrefs{ + AdvertiseServicesSet: true, + Prefs: ipn.Prefs{ + AdvertiseServices: servicesList, + }, + }) + if err != nil { + return err + } + } + + return e.lc.SetServeConfig(ctx, sc) +} + const backgroundExistsMsg = "background configuration already exists, use `tailscale %s --%s=%d off` to remove the existing configuration" // validateConfig checks if the serve config is valid to serve the type wanted on the port. @@ -1084,7 +1384,7 @@ func (e *serveEnv) removeWebServe(sc *ipn.ServeConfig, dnsName string, srvPort u if len(mounts) > 1 { msg := fmt.Sprintf("Are you sure you want to delete %d handlers under port %s?", len(mounts), portStr) - if !e.yes && !prompt.YesNo(msg) { + if !e.yes && !prompt.YesNo(msg, true) { return nil } } diff --git a/cmd/tailscale/cli/set.go b/cmd/tailscale/cli/set.go index c0ce0b1c137ac..43f8bbbc34afd 100644 --- a/cmd/tailscale/cli/set.go +++ b/cmd/tailscale/cli/set.go @@ -15,8 +15,8 @@ import ( "strings" "github.com/peterbourgon/ff/v3/ffcli" - "tailscale.com/clientupdate" "tailscale.com/cmd/tailscale/cli/ffcomplete" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/net/netutil" "tailscale.com/net/tsaddr" @@ -85,7 +85,7 @@ func newSetFlagSet(goos string, setArgs *setArgsT) *flag.FlagSet { setf.BoolVar(&setArgs.updateApply, "auto-update", false, "automatically update to the latest available version") setf.BoolVar(&setArgs.reportPosture, "report-posture", false, "allow management plane to gather device posture information") setf.BoolVar(&setArgs.runWebClient, "webclient", false, "expose the web interface for managing this node over Tailscale at port 5252") - setf.StringVar(&setArgs.relayServerPort, "relay-server-port", "", hidden+"UDP port number (0 will pick a random unused port) for the relay server to bind to, on all interfaces, or empty string to disable relay server functionality") + setf.StringVar(&setArgs.relayServerPort, "relay-server-port", "", "UDP port number (0 will pick a random unused port) for the relay server to bind to, on all interfaces, or empty string to disable relay server functionality") ffcomplete.Flag(setf, "exit-node", func(args []string) ([]string, ffcomplete.ShellCompDirective, error) { st, err := localClient.Status(context.Background()) @@ -226,21 +226,14 @@ func runSet(ctx context.Context, args []string) (retErr error) { return err } } - if maskedPrefs.AutoUpdateSet.ApplySet { - if !clientupdate.CanAutoUpdate() { - return errors.New("automatic updates are not supported on this platform") + if maskedPrefs.AutoUpdateSet.ApplySet && buildfeatures.HasClientUpdate && version.IsMacSysExt() { + apply := "0" + if maskedPrefs.AutoUpdate.Apply.EqualBool(true) { + apply = "1" } - // On macsys, tailscaled will set the Sparkle auto-update setting. It - // does not use clientupdate. - if version.IsMacSysExt() { - apply := "0" - if maskedPrefs.AutoUpdate.Apply.EqualBool(true) { - apply = "1" - } - out, err := exec.Command("defaults", "write", "io.tailscale.ipn.macsys", "SUAutomaticallyUpdate", apply).CombinedOutput() - if err != nil { - return fmt.Errorf("failed to enable automatic updates: %v, %q", err, out) - } + out, err := exec.Command("defaults", "write", "io.tailscale.ipn.macsys", "SUAutomaticallyUpdate", apply).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to enable automatic updates: %v, %q", err, out) } } diff --git a/cmd/tailscale/cli/status.go b/cmd/tailscale/cli/status.go index 726606109aa15..89b18335b4ee0 100644 --- a/cmd/tailscale/cli/status.go +++ b/cmd/tailscale/cli/status.go @@ -4,7 +4,6 @@ package cli import ( - "bytes" "cmp" "context" "encoding/json" @@ -15,12 +14,13 @@ import ( "net/http" "net/netip" "os" - "strconv" "strings" + "text/tabwriter" "github.com/peterbourgon/ff/v3/ffcli" "github.com/toqueteos/webbrowser" "golang.org/x/net/idna" + "tailscale.com/feature" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/net/netmon" @@ -56,6 +56,7 @@ https://github.com/tailscale/tailscale/blob/main/ipn/ipnstate/ipnstate.go fs.BoolVar(&statusArgs.peers, "peers", true, "show status of peers") fs.StringVar(&statusArgs.listen, "listen", "127.0.0.1:8384", "listen address for web mode; use port 0 for automatic") fs.BoolVar(&statusArgs.browser, "browser", true, "Open a browser in web mode") + fs.BoolVar(&statusArgs.header, "header", false, "show column headers in table format") return fs })(), } @@ -68,6 +69,7 @@ var statusArgs struct { active bool // in CLI mode, filter output to only peers with active sessions self bool // in CLI mode, show status of local machine peers bool // in CLI mode, show status of peer machines + header bool // in CLI mode, show column headers in table format } const mullvadTCD = "mullvad.ts.net." @@ -151,10 +153,15 @@ func runStatus(ctx context.Context, args []string) error { os.Exit(1) } - var buf bytes.Buffer - f := func(format string, a ...any) { fmt.Fprintf(&buf, format, a...) } + w := tabwriter.NewWriter(Stdout, 0, 0, 2, ' ', 0) + f := func(format string, a ...any) { fmt.Fprintf(w, format, a...) } + if statusArgs.header { + fmt.Fprintln(w, "IP\tHostname\tOwner\tOS\tStatus\t") + fmt.Fprintln(w, "--\t--------\t-----\t--\t------\t") + } + printPS := func(ps *ipnstate.PeerStatus) { - f("%-15s %-20s %-12s %-7s ", + f("%s\t%s\t%s\t%s\t", firstIPString(ps.TailscaleIPs), dnsOrQuoteHostname(st, ps), ownerLogin(st, ps), @@ -164,7 +171,7 @@ func runStatus(ctx context.Context, args []string) error { anyTraffic := ps.TxBytes != 0 || ps.RxBytes != 0 var offline string if !ps.Online { - offline = "; offline" + offline = "; offline" + lastSeenFmt(ps.LastSeen) } if !ps.Active { if ps.ExitNode { @@ -174,7 +181,7 @@ func runStatus(ctx context.Context, args []string) error { } else if anyTraffic { f("idle" + offline) } else if !ps.Online { - f("offline") + f("offline" + lastSeenFmt(ps.LastSeen)) } else { f("-") } @@ -193,13 +200,13 @@ func runStatus(ctx context.Context, args []string) error { f("peer-relay %s", ps.PeerRelay) } if !ps.Online { - f("; offline") + f(offline) } } if anyTraffic { f(", tx %d rx %d", ps.TxBytes, ps.RxBytes) } - f("\n") + f("\t\n") } if statusArgs.self && st.Self != nil { @@ -229,7 +236,8 @@ func runStatus(ctx context.Context, args []string) error { printPS(ps) } } - Stdout.Write(buf.Bytes()) + w.Flush() + if locBasedExitNode { outln() printf("# To see the full list of exit nodes, including location-based exit nodes, run `tailscale exit-node list` \n") @@ -238,44 +246,13 @@ func runStatus(ctx context.Context, args []string) error { outln() printHealth() } - printFunnelStatus(ctx) + if f, ok := hookPrintFunnelStatus.GetOk(); ok { + f(ctx) + } return nil } -// printFunnelStatus prints the status of the funnel, if it's running. -// It prints nothing if the funnel is not running. -func printFunnelStatus(ctx context.Context) { - sc, err := localClient.GetServeConfig(ctx) - if err != nil { - outln() - printf("# Funnel:\n") - printf("# - Unable to get Funnel status: %v\n", err) - return - } - if !sc.IsFunnelOn() { - return - } - outln() - printf("# Funnel on:\n") - for hp, on := range sc.AllowFunnel { - if !on { // if present, should be on - continue - } - sni, portStr, _ := net.SplitHostPort(string(hp)) - p, _ := strconv.ParseUint(portStr, 10, 16) - isTCP := sc.IsTCPForwardingOnPort(uint16(p), noService) - url := "https://" - if isTCP { - url = "tcp://" - } - url += sni - if isTCP || p != 443 { - url += ":" + portStr - } - printf("# - %s\n", url) - } - outln() -} +var hookPrintFunnelStatus feature.Hook[func(context.Context)] // isRunningOrStarting reports whether st is in state Running or Starting. // It also returns a description of the status suitable to display to a user. diff --git a/cmd/tailscale/cli/switch.go b/cmd/tailscale/cli/switch.go index 0677da1b31868..b315a21e7437f 100644 --- a/cmd/tailscale/cli/switch.go +++ b/cmd/tailscale/cli/switch.go @@ -34,6 +34,22 @@ This command is currently in alpha and may change in the future.`, return fs }(), Exec: switchProfile, + + // Add remove subcommand + Subcommands: []*ffcli.Command{ + { + Name: "remove", + ShortUsage: "tailscale switch remove ", + ShortHelp: "Remove a Tailscale account", + LongHelp: `"tailscale switch remove" removes a Tailscale account from the +local machine. This does not delete the account itself, but +it will no longer be available for switching to. You can +add it back by logging in again. + +This command is currently in alpha and may change in the future.`, + Exec: removeProfile, + }, + }, } func init() { @@ -106,40 +122,8 @@ func switchProfile(ctx context.Context, args []string) error { errf("Failed to switch to account: %v\n", err) os.Exit(1) } - var profID ipn.ProfileID - // Allow matching by ID, Tailnet, Account, or Display Name - // in that order. - for _, p := range all { - if p.ID == ipn.ProfileID(args[0]) { - profID = p.ID - break - } - } - if profID == "" { - for _, p := range all { - if p.NetworkProfile.DomainName == args[0] { - profID = p.ID - break - } - } - } - if profID == "" { - for _, p := range all { - if p.Name == args[0] { - profID = p.ID - break - } - } - } - if profID == "" { - for _, p := range all { - if p.NetworkProfile.DisplayName == args[0] { - profID = p.ID - break - } - } - } - if profID == "" { + profID, ok := matchProfile(args[0], all) + if !ok { errf("No profile named %q\n", args[0]) os.Exit(1) } @@ -186,3 +170,54 @@ func switchProfile(ctx context.Context, args []string) error { } } } + +func removeProfile(ctx context.Context, args []string) error { + if len(args) != 1 { + outln("usage: tailscale switch remove NAME") + os.Exit(1) + } + cp, all, err := localClient.ProfileStatus(ctx) + if err != nil { + errf("Failed to remove account: %v\n", err) + os.Exit(1) + } + + profID, ok := matchProfile(args[0], all) + if !ok { + errf("No profile named %q\n", args[0]) + os.Exit(1) + } + + if profID == cp.ID { + printf("Already on account %q\n", args[0]) + os.Exit(0) + } + + return localClient.DeleteProfile(ctx, profID) +} + +func matchProfile(arg string, all []ipn.LoginProfile) (ipn.ProfileID, bool) { + // Allow matching by ID, Tailnet, Account, or Display Name + // in that order. + for _, p := range all { + if p.ID == ipn.ProfileID(arg) { + return p.ID, true + } + } + for _, p := range all { + if p.NetworkProfile.DomainName == arg { + return p.ID, true + } + } + for _, p := range all { + if p.Name == arg { + return p.ID, true + } + } + for _, p := range all { + if p.NetworkProfile.DisplayName == arg { + return p.ID, true + } + } + return "", false +} diff --git a/cmd/tailscale/cli/up.go b/cmd/tailscale/cli/up.go index 097af725b9d78..91a6b60878a93 100644 --- a/cmd/tailscale/cli/up.go +++ b/cmd/tailscale/cli/up.go @@ -12,13 +12,11 @@ import ( "fmt" "log" "net/netip" - "net/url" "os" "os/signal" "reflect" "runtime" "sort" - "strconv" "strings" "syscall" "time" @@ -26,7 +24,9 @@ import ( shellquote "github.com/kballard/go-shellquote" "github.com/peterbourgon/ff/v3/ffcli" qrcode "github.com/skip2/go-qrcode" - "golang.org/x/oauth2/clientcredentials" + "tailscale.com/feature/buildfeatures" + _ "tailscale.com/feature/condregister/identityfederation" + _ "tailscale.com/feature/condregister/oauthkey" "tailscale.com/health/healthmsg" "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" @@ -95,7 +95,11 @@ func newUpFlagSet(goos string, upArgs *upArgsT, cmd string) *flag.FlagSet { // When adding new flags, prefer to put them under "tailscale set" instead // of here. Setting preferences via "tailscale up" is deprecated. upf.BoolVar(&upArgs.qr, "qr", false, "show QR code for login URLs") + upf.StringVar(&upArgs.qrFormat, "qr-format", "small", "QR code formatting (small or large)") upf.StringVar(&upArgs.authKeyOrFile, "auth-key", "", `node authorization key; if it begins with "file:", then it's a path to a file containing the authkey`) + upf.StringVar(&upArgs.clientID, "client-id", "", "Client ID used to generate authkeys via workload identity federation") + upf.StringVar(&upArgs.clientSecretOrFile, "client-secret", "", `Client Secret used to generate authkeys via OAuth; if it begins with "file:", then it's a path to a file containing the secret`) + upf.StringVar(&upArgs.idTokenOrFile, "id-token", "", `ID token from the identity provider to exchange with the control server for workload identity federation; if it begins with "file:", then it's a path to a file containing the token`) upf.StringVar(&upArgs.server, "login-server", ipn.DefaultControlURL, "base URL of control server") upf.BoolVar(&upArgs.acceptRoutes, "accept-routes", acceptRouteDefault(goos), "accept routes advertised by other Tailscale nodes") @@ -164,6 +168,7 @@ func defaultNetfilterMode() string { // added to it. Add new arguments to setArgsT instead. type upArgsT struct { qr bool + qrFormat string reset bool server string acceptRoutes bool @@ -183,6 +188,9 @@ type upArgsT struct { statefulFiltering bool netfilterMode string authKeyOrFile string // "secret" or "file:/path/to/secret" + clientID string + clientSecretOrFile string // "secret" or "file:/path/to/secret" + idTokenOrFile string // "secret" or "file:/path/to/secret" hostname string opUser string json bool @@ -192,8 +200,9 @@ type upArgsT struct { postureChecking bool } -func (a upArgsT) getAuthKey() (string, error) { - v := a.authKeyOrFile +// resolveValueFromFile returns the value as-is, or if it starts with "file:", +// reads and returns the trimmed contents of the file. +func resolveValueFromFile(v string) (string, error) { if file, ok := strings.CutPrefix(v, "file:"); ok { b, err := os.ReadFile(file) if err != nil { @@ -204,6 +213,18 @@ func (a upArgsT) getAuthKey() (string, error) { return v, nil } +func (a upArgsT) getAuthKey() (string, error) { + return resolveValueFromFile(a.authKeyOrFile) +} + +func (a upArgsT) getClientSecret() (string, error) { + return resolveValueFromFile(a.clientSecretOrFile) +} + +func (a upArgsT) getIDToken() (string, error) { + return resolveValueFromFile(a.idTokenOrFile) +} + var upArgsGlobal upArgsT // Fields output when `tailscale up --json` is used. Two JSON blocks will be output. @@ -356,6 +377,13 @@ func netfilterModeFromFlag(v string) (_ preftype.NetfilterMode, warning string, // It returns simpleUp if we're running a simple "tailscale up" to // transition to running from a previously-logged-in but down state, // without changing any settings. +// +// Note this can also mutate prefs to add implicit preferences for the +// user operator. +// +// TODO(alexc): the name of this function is confusing, and perhaps a +// sign that it's doing too much. Consider refactoring this so it's just +// telling the caller what to do next, but not changing anything itself. func updatePrefs(prefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, justEditMP *ipn.MaskedPrefs, err error) { if !env.upArgs.reset { applyImplicitPrefs(prefs, curPrefs, env) @@ -385,7 +413,7 @@ func updatePrefs(prefs, curPrefs *ipn.Prefs, env upCheckEnv) (simpleUp bool, jus } if env.upArgs.forceReauth && isSSHOverTailscale() { - if err := presentRiskToUser(riskLoseSSH, `You are connected over Tailscale; this action will result in your SSH session disconnecting.`, env.upArgs.acceptedRisks); err != nil { + if err := presentRiskToUser(riskLoseSSH, `You are connected over Tailscale; this action may result in your SSH session disconnecting.`, env.upArgs.acceptedRisks); err != nil { return false, nil, err } } @@ -446,6 +474,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE return fixTailscaledConnectError(err) } origAuthURL := st.AuthURL + origNodeKey := st.Self.PublicKey // printAuthURL reports whether we should print out the // provided auth URL from an IPN notify. @@ -495,6 +524,8 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE if err != nil { return err } + effectivePrefs := curPrefs + if cmd == "up" { // "tailscale up" should not be able to change the // profile name. @@ -540,8 +571,16 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE } }() - running := make(chan bool, 1) // gets value once in state ipn.Running - watchErr := make(chan error, 1) + // Start watching the IPN bus before we call Start() or StartLoginInteractive(), + // or we could miss IPN notifications. + // + // In particular, if we're doing a force-reauth, we could miss the + // notification with the auth URL we should print for the user. + watcher, err := localClient.WatchIPNBus(watchCtx, 0) + if err != nil { + return err + } + defer watcher.Close() // Special case: bare "tailscale up" means to just start // running, if there's ever been a login. @@ -564,10 +603,36 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE if err != nil { return err } - authKey, err = resolveAuthKey(ctx, authKey, upArgs.advertiseTags) - if err != nil { - return err + // Try to use an OAuth secret to generate an auth key if that functionality + // is available. + if f, ok := tailscale.HookResolveAuthKey.GetOk(); ok { + clientSecret := authKey // the authkey argument accepts client secrets, if both arguments are provided authkey has precedence + if clientSecret == "" { + clientSecret, err = upArgs.getClientSecret() + if err != nil { + return err + } + } + + authKey, err = f(ctx, clientSecret, strings.Split(upArgs.advertiseTags, ",")) + if err != nil { + return err + } } + // Try to resolve the auth key via workload identity federation if that functionality + // is available and no auth key is yet determined. + if f, ok := tailscale.HookResolveAuthKeyViaWIF.GetOk(); ok && authKey == "" { + idToken, err := upArgs.getIDToken() + if err != nil { + return err + } + + authKey, err = f(ctx, prefs.ControlURL, upArgs.clientID, idToken, strings.Split(upArgs.advertiseTags, ",")) + if err != nil { + return err + } + } + err = localClient.Start(ctx, ipn.Options{ AuthKey: authKey, UpdatePrefs: prefs, @@ -575,6 +640,7 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE if err != nil { return err } + effectivePrefs = prefs if upArgs.forceReauth || !st.HaveNodeKey { err := localClient.StartLoginInteractive(ctx) if err != nil { @@ -583,15 +649,32 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE } } - watcher, err := localClient.WatchIPNBus(watchCtx, ipn.NotifyInitialState) - if err != nil { - return err - } - defer watcher.Close() + upComplete := make(chan bool, 1) + watchErr := make(chan error, 1) go func() { var printed bool // whether we've yet printed anything to stdout or stderr - var lastURLPrinted string + lastURLPrinted := "" + + // If we're doing a force-reauth, we need to get two notifications: + // + // 1. IPN is running + // 2. The node key has changed + // + // These two notifications arrive separately, and trying to combine them + // has caused unexpected issues elsewhere in `tailscale up`. For now, we + // track them separately. + ipnIsRunning := false + waitingForKeyChange := upArgs.forceReauth + + // If we're doing a simple up (i.e. `tailscale up`, no flags) and + // the initial state is NeedsMachineAuth, then we never receive a + // state notification from ipn, so we print the device approval URL + // immediately. + if simpleUp && st.BackendState == ipn.NeedsMachineAuth.String() { + printed = true + printDeviceApprovalInfo(env.upArgs.json, effectivePrefs, &lastURLPrinted) + } for { n, err := watcher.Next() @@ -603,29 +686,30 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE msg := *n.ErrMessage fatalf("backend error: %v\n", msg) } + if s := n.State; s != nil && *s == ipn.NeedsMachineAuth { + printed = true + printDeviceApprovalInfo(env.upArgs.json, effectivePrefs, &lastURLPrinted) + } if s := n.State; s != nil { - switch *s { - case ipn.NeedsMachineAuth: - printed = true - if env.upArgs.json { - printUpDoneJSON(ipn.NeedsMachineAuth, "") - } else { - fmt.Fprintf(Stderr, "\nTo approve your machine, visit (as admin):\n\n\t%s\n\n", prefs.AdminPageURL(policyclient.Get())) - } - case ipn.Running: - // Done full authentication process - if env.upArgs.json { - printUpDoneJSON(ipn.Running, "") - } else if printed { - // Only need to print an update if we printed the "please click" message earlier. - fmt.Fprintf(Stderr, "Success.\n") - } - select { - case running <- true: - default: - } - cancelWatch() + ipnIsRunning = *s == ipn.Running + } + if n.NetMap != nil && n.NetMap.NodeKey != origNodeKey { + waitingForKeyChange = false + } + if ipnIsRunning && !waitingForKeyChange { + // Done full authentication process + if env.upArgs.json { + printUpDoneJSON(ipn.Running, "") + } else if printed { + // Only need to print an update if we printed the "please click" message earlier. + fmt.Fprintf(Stderr, "Success.\n") } + select { + case upComplete <- true: + default: + } + cancelWatch() + return } if url := n.BrowseToURL; url != nil { authURL := *url @@ -658,7 +742,14 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE if err != nil { log.Printf("QR code error: %v", err) } else { - fmt.Fprintf(Stderr, "%s\n", q.ToString(false)) + switch upArgs.qrFormat { + case "large": + fmt.Fprintf(Stderr, "%s\n", q.ToString(false)) + case "small": + fmt.Fprintf(Stderr, "%s\n", q.ToSmallString(false)) + default: + log.Printf("unknown QR code format: %q", upArgs.qrFormat) + } } } } @@ -680,18 +771,18 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE timeoutCh = timeoutTimer.C } select { - case <-running: + case <-upComplete: return nil case <-watchCtx.Done(): select { - case <-running: + case <-upComplete: return nil default: } return watchCtx.Err() case err := <-watchErr: select { - case <-running: + case <-upComplete: return nil default: } @@ -701,6 +792,21 @@ func runUp(ctx context.Context, cmd string, args []string, upArgs upArgsT) (retE } } +func printDeviceApprovalInfo(printJson bool, prefs *ipn.Prefs, lastURLPrinted *string) { + if printJson { + printUpDoneJSON(ipn.NeedsMachineAuth, "") + } else { + deviceApprovalURL := prefs.AdminPageURL(policyclient.Get()) + + if lastURLPrinted != nil && deviceApprovalURL == *lastURLPrinted { + return + } + + *lastURLPrinted = deviceApprovalURL + errf("\nTo approve your machine, visit (as admin):\n\n\t%s\n\n", deviceApprovalURL) + } +} + // upWorthWarning reports whether the health check message s is worth warning // about during "tailscale up". Many of the health checks are noisy or confusing // or very ephemeral and happen especially briefly at startup. @@ -805,7 +911,7 @@ func addPrefFlagMapping(flagName string, prefNames ...string) { // correspond to an ipn.Pref. func preflessFlag(flagName string) bool { switch flagName { - case "auth-key", "force-reauth", "reset", "qr", "json", "timeout", "accept-risk", "host-routes": + case "auth-key", "force-reauth", "reset", "qr", "qr-format", "json", "timeout", "accept-risk", "host-routes", "client-id", "client-secret", "id-token": return true } return false @@ -1100,92 +1206,9 @@ func exitNodeIP(p *ipn.Prefs, st *ipnstate.Status) (ip netip.Addr) { return } -// resolveAuthKey either returns v unchanged (in the common case) or, if it -// starts with "tskey-client-" (as Tailscale OAuth secrets do) parses it like -// -// tskey-client-xxxx[?ephemeral=false&bar&preauthorized=BOOL&baseURL=...] -// -// and does the OAuth2 dance to get and return an authkey. The "ephemeral" -// property defaults to true if unspecified. The "preauthorized" defaults to -// false. The "baseURL" defaults to https://api.tailscale.com. -// The passed in tags are required, and must be non-empty. These will be -// set on the authkey generated by the OAuth2 dance. -func resolveAuthKey(ctx context.Context, v, tags string) (string, error) { - if !strings.HasPrefix(v, "tskey-client-") { - return v, nil - } - if tags == "" { - return "", errors.New("oauth authkeys require --advertise-tags") - } - - clientSecret, named, _ := strings.Cut(v, "?") - attrs, err := url.ParseQuery(named) - if err != nil { - return "", err - } - for k := range attrs { - switch k { - case "ephemeral", "preauthorized", "baseURL": - default: - return "", fmt.Errorf("unknown attribute %q", k) - } - } - getBool := func(name string, def bool) (bool, error) { - v := attrs.Get(name) - if v == "" { - return def, nil - } - ret, err := strconv.ParseBool(v) - if err != nil { - return false, fmt.Errorf("invalid attribute boolean attribute %s value %q", name, v) - } - return ret, nil - } - ephemeral, err := getBool("ephemeral", true) - if err != nil { - return "", err - } - preauth, err := getBool("preauthorized", false) - if err != nil { - return "", err - } - - baseURL := "https://api.tailscale.com" - if v := attrs.Get("baseURL"); v != "" { - baseURL = v - } - - credentials := clientcredentials.Config{ - ClientID: "some-client-id", // ignored - ClientSecret: clientSecret, - TokenURL: baseURL + "/api/v2/oauth/token", - } - - tsClient := tailscale.NewClient("-", nil) - tsClient.UserAgent = "tailscale-cli" - tsClient.HTTPClient = credentials.Client(ctx) - tsClient.BaseURL = baseURL - - caps := tailscale.KeyCapabilities{ - Devices: tailscale.KeyDeviceCapabilities{ - Create: tailscale.KeyDeviceCreateCapabilities{ - Reusable: false, - Ephemeral: ephemeral, - Preauthorized: preauth, - Tags: strings.Split(tags, ","), - }, - }, - } - - authkey, _, err := tsClient.CreateKey(ctx, caps) - if err != nil { - return "", err - } - return authkey, nil -} - func warnOnAdvertiseRoutes(ctx context.Context, prefs *ipn.Prefs) { - if len(prefs.AdvertiseRoutes) > 0 || prefs.AppConnector.Advertise { + if buildfeatures.HasAdvertiseRoutes && len(prefs.AdvertiseRoutes) > 0 || + buildfeatures.HasAppConnectors && prefs.AppConnector.Advertise { // TODO(jwhited): compress CheckIPForwarding and CheckUDPGROForwarding // into a single HTTP req. if err := localClient.CheckIPForwarding(ctx); err != nil { diff --git a/cmd/tailscale/cli/up_test.go b/cmd/tailscale/cli/up_test.go index eb06f84dce2ea..fe2f1b555a2bc 100644 --- a/cmd/tailscale/cli/up_test.go +++ b/cmd/tailscale/cli/up_test.go @@ -35,6 +35,7 @@ var validUpFlags = set.Of( "operator", "report-posture", "qr", + "qr-format", "reset", "shields-up", "snat-subnet-routes", @@ -42,6 +43,9 @@ var validUpFlags = set.Of( "stateful-filtering", "timeout", "unattended", + "client-id", + "client-secret", + "id-token", ) // TestUpFlagSetIsFrozen complains when new flags are added to tailscale up. diff --git a/cmd/tailscale/cli/update.go b/cmd/tailscale/cli/update.go index 7c0269f6a7687..7eb0dccace7a8 100644 --- a/cmd/tailscale/cli/update.go +++ b/cmd/tailscale/cli/update.go @@ -87,5 +87,5 @@ func confirmUpdate(ver string) bool { } msg := fmt.Sprintf("This will update Tailscale from %v to %v. Continue?", version.Short(), ver) - return prompt.YesNo(msg) + return prompt.YesNo(msg, true) } diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index a983f1c09f0bf..b249639bc80bc 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -14,11 +14,13 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket - L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/pe+ W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/winutil/authenticode L github.com/fogleman/gg from tailscale.com/client/systray github.com/fxamacker/cbor/v2 from tailscale.com/tka + github.com/gaissmai/bart from tailscale.com/net/tsdial + github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart github.com/go-json-experiment/json from tailscale.com/types/opt+ github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ @@ -31,12 +33,6 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L github.com/golang/freetype/raster from github.com/fogleman/gg+ L github.com/golang/freetype/truetype from github.com/fogleman/gg github.com/golang/groupcache/lru from tailscale.com/net/dnscache - L github.com/google/nftables from tailscale.com/util/linuxfw - L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt - L 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ - L github.com/google/nftables/expr from github.com/google/nftables+ - L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ - L github.com/google/nftables/xt from github.com/google/nftables/expr+ DW github.com/google/uuid from tailscale.com/clientupdate+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon @@ -44,11 +40,9 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/kballard/go-shellquote from tailscale.com/cmd/tailscale/cli 💣 github.com/mattn/go-colorable from tailscale.com/cmd/tailscale/cli 💣 github.com/mattn/go-isatty from tailscale.com/cmd/tailscale/cli+ - L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ + L 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - L github.com/mdlayher/netlink/nltest from github.com/google/nftables L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink - github.com/miekg/dns from tailscale.com/net/dns/recursive 💣 github.com/mitchellh/go-ps from tailscale.com/cmd/tailscale/cli+ github.com/peterbourgon/ff/v3 from github.com/peterbourgon/ff/v3/ffcli+ github.com/peterbourgon/ff/v3/ffcli from tailscale.com/cmd/tailscale/cli+ @@ -67,11 +61,9 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep github.com/tailscale/goupnp/scpd from github.com/tailscale/goupnp github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp - L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw - L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink + github.com/tailscale/hujson from tailscale.com/ipn/conffile github.com/tailscale/web-client-prebuilt from tailscale.com/client/web github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli+ - L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 💣 go4.org/mem from tailscale.com/client/local+ go4.org/netipx from tailscale.com/net/tsaddr @@ -88,57 +80,68 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale tailscale.com/client/tailscale/apitype from tailscale.com/client/tailscale+ tailscale.com/client/web from tailscale.com/cmd/tailscale/cli - tailscale.com/clientupdate from tailscale.com/client/web+ + tailscale.com/clientupdate from tailscale.com/cmd/tailscale/cli LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/cmd/tailscale/cli from tailscale.com/cmd/tailscale tailscale.com/cmd/tailscale/cli/ffcomplete from tailscale.com/cmd/tailscale/cli tailscale.com/cmd/tailscale/cli/ffcomplete/internal from tailscale.com/cmd/tailscale/cli/ffcomplete tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ - tailscale.com/control/controlhttp from tailscale.com/cmd/tailscale/cli + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp - tailscale.com/control/controlknobs from tailscale.com/net/portmapper + tailscale.com/control/ts2021 from tailscale.com/cmd/tailscale/cli tailscale.com/derp from tailscale.com/derp/derphttp+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/net/netcheck - tailscale.com/disco from tailscale.com/derp tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web - tailscale.com/feature from tailscale.com/tsweb + tailscale.com/feature from tailscale.com/tsweb+ + tailscale.com/feature/buildfeatures from tailscale.com/cmd/tailscale/cli+ tailscale.com/feature/capture/dissector from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/identityfederation from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/portmapper from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/useproxy from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/identityfederation from tailscale.com/feature/condregister/identityfederation + tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey + tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/health/healthmsg from tailscale.com/cmd/tailscale/cli tailscale.com/hostinfo from tailscale.com/client/web+ - tailscale.com/internal/client/tailscale from tailscale.com/cmd/tailscale/cli - tailscale.com/internal/noiseconn from tailscale.com/cmd/tailscale/cli + tailscale.com/internal/client/tailscale from tailscale.com/cmd/tailscale/cli+ tailscale.com/ipn from tailscale.com/client/local+ + tailscale.com/ipn/conffile from tailscale.com/cmd/tailscale/cli tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/licenses from tailscale.com/client/web+ - tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/metrics from tailscale.com/tsweb+ + tailscale.com/net/ace from tailscale.com/cmd/tailscale/cli tailscale.com/net/bakedroots from tailscale.com/net/tlsdial tailscale.com/net/captivedetection from tailscale.com/net/netcheck - tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dnscache from tailscale.com/control/controlhttp+ tailscale.com/net/dnsfallback from tailscale.com/control/controlhttp+ tailscale.com/net/netaddr from tailscale.com/ipn+ tailscale.com/net/netcheck from tailscale.com/cmd/tailscale/cli tailscale.com/net/neterror from tailscale.com/net/netcheck+ - tailscale.com/net/netknob from tailscale.com/net/netns + tailscale.com/net/netknob from tailscale.com/net/netns+ 💣 tailscale.com/net/netmon from tailscale.com/cmd/tailscale/cli+ 💣 tailscale.com/net/netns from tailscale.com/derp/derphttp+ tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/netx from tailscale.com/control/controlhttp+ tailscale.com/net/ping from tailscale.com/net/netcheck - tailscale.com/net/portmapper from tailscale.com/cmd/tailscale/cli+ + tailscale.com/net/portmapper from tailscale.com/feature/portmapper + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/sockstats from tailscale.com/control/controlhttp+ tailscale.com/net/stun from tailscale.com/net/netcheck - L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/cmd/tailscale/cli+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + tailscale.com/net/tsdial from tailscale.com/cmd/tailscale/cli+ + 💣 tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy + tailscale.com/net/udprelay/status from tailscale.com/client/local+ + tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ tailscale.com/syncs from tailscale.com/control/controlhttp+ @@ -148,9 +151,10 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tstime from tailscale.com/control/controlhttp+ tailscale.com/tstime/mono from tailscale.com/tstime/rate - tailscale.com/tstime/rate from tailscale.com/cmd/tailscale/cli+ + tailscale.com/tstime/rate from tailscale.com/cmd/tailscale/cli tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/util/usermetric+ + tailscale.com/types/appctype from tailscale.com/client/local+ tailscale.com/types/dnstype from tailscale.com/tailcfg+ tailscale.com/types/empty from tailscale.com/ipn tailscale.com/types/ipproto from tailscale.com/ipn+ @@ -160,7 +164,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep tailscale.com/types/netmap from tailscale.com/ipn+ tailscale.com/types/nettype from tailscale.com/net/netcheck+ tailscale.com/types/opt from tailscale.com/client/tailscale+ - tailscale.com/types/persist from tailscale.com/ipn + tailscale.com/types/persist from tailscale.com/ipn+ tailscale.com/types/preftype from tailscale.com/cmd/tailscale/cli+ tailscale.com/types/ptr from tailscale.com/hostinfo+ tailscale.com/types/result from tailscale.com/util/lineiter @@ -175,22 +179,20 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/cmd/tailscale/cli+ - tailscale.com/util/eventbus from tailscale.com/net/portmapper+ + tailscale.com/util/eventbus from tailscale.com/client/local+ tailscale.com/util/groupmember from tailscale.com/client/web 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/tailscale+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns tailscale.com/util/mak from tailscale.com/cmd/tailscale/cli+ - tailscale.com/util/multierr from tailscale.com/control/controlhttp+ tailscale.com/util/must from tailscale.com/clientupdate/distsign+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto tailscale.com/util/prompt from tailscale.com/cmd/tailscale/cli tailscale.com/util/quarantine from tailscale.com/cmd/tailscale/cli tailscale.com/util/rands from tailscale.com/tsweb - tailscale.com/util/set from tailscale.com/derp+ - tailscale.com/util/singleflight from tailscale.com/net/dnscache+ - tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ + tailscale.com/util/set from tailscale.com/ipn+ + tailscale.com/util/singleflight from tailscale.com/net/dnscache + tailscale.com/util/slicesx from tailscale.com/client/systray+ L tailscale.com/util/stringsx from tailscale.com/client/systray tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ @@ -217,9 +219,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from tailscale.com/clientupdate/distsign+ golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 - golang.org/x/crypto/chacha20poly1305 from crypto/tls+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/chacha20poly1305 from tailscale.com/control/controlbase golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+ golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ @@ -236,27 +236,23 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep L golang.org/x/image/math/f64 from github.com/fogleman/gg+ L golang.org/x/image/math/fixed from github.com/fogleman/gg+ golang.org/x/net/bpf from github.com/mdlayher/netlink+ - golang.org/x/net/dns/dnsmessage from net+ - golang.org/x/net/http/httpguts from net/http+ - golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2 from tailscale.com/cmd/tailscale/cli+ - golang.org/x/net/http2/hpack from net/http+ + golang.org/x/net/dns/dnsmessage from tailscale.com/cmd/tailscale/cli+ + golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/icmp from tailscale.com/net/ping - golang.org/x/net/idna from golang.org/x/net/http/httpguts+ - golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 + golang.org/x/net/idna from golang.org/x/net/http/httpproxy+ golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy - golang.org/x/net/ipv4 from github.com/miekg/dns+ - golang.org/x/net/ipv6 from github.com/miekg/dns+ + golang.org/x/net/ipv4 from golang.org/x/net/icmp+ + golang.org/x/net/ipv6 from golang.org/x/net/icmp+ golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from tailscale.com/net/netmon+ - golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials - golang.org/x/oauth2/clientcredentials from tailscale.com/cmd/tailscale/cli + golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials+ + golang.org/x/oauth2/clientcredentials from tailscale.com/feature/oauthkey golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from golang.org/x/crypto/argon2+ - LD golang.org/x/sys/unix from github.com/google/nftables+ + LD golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/svc from golang.org/x/sys/windows/svc/mgr+ @@ -266,6 +262,22 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from tailscale.com/cmd/tailscale/cli+ + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna archive/tar from tailscale.com/clientupdate bufio from compress/flate+ bytes from archive/tar+ @@ -334,7 +346,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep crypto/sha3 from crypto/internal/fips140hash crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ - crypto/tls from github.com/miekg/dns+ + crypto/tls from net/http+ crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ D crypto/x509/internal/macos from crypto/x509 @@ -351,9 +363,9 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ - encoding/xml from github.com/tailscale/goupnp+ + encoding/xml from github.com/godbus/dbus/v5/introspect+ errors from archive/tar+ - expvar from tailscale.com/derp+ + expvar from tailscale.com/health+ flag from github.com/peterbourgon/ff/v3+ fmt from archive/tar+ hash from compress/zlib+ @@ -402,7 +414,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ - W internal/saferio from debug/pe + internal/saferio from debug/pe+ internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ @@ -426,30 +438,30 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep math/big from crypto/dsa+ math/bits from compress/flate+ math/rand from github.com/mdlayher/netlink+ - math/rand/v2 from tailscale.com/derp+ + math/rand/v2 from crypto/ecdsa+ mime from golang.org/x/oauth2/internal+ mime/multipart from net/http mime/quotedprintable from mime/multipart net from crypto/tls+ net/http from expvar+ net/http/cgi from tailscale.com/cmd/tailscale/cli - net/http/httptrace from golang.org/x/net/http2+ + net/http/httptrace from net/http+ net/http/httputil from tailscale.com/client/web+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/tsweb net/netip from go4.org/netipx+ - net/textproto from golang.org/x/net/http/httpguts+ + net/textproto from github.com/coder/websocket+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from github.com/coreos/go-iptables/iptables+ + os/exec from github.com/atotto/clipboard+ os/signal from tailscale.com/cmd/tailscale/cli+ os/user from archive/tar+ path from archive/tar+ path/filepath from archive/tar+ reflect from archive/tar+ - regexp from github.com/coreos/go-iptables/iptables+ + regexp from github.com/tailscale/goupnp/httpu+ regexp/syntax from regexp runtime from archive/tar+ runtime/debug from tailscale.com+ diff --git a/cmd/tailscale/tailscale_test.go b/cmd/tailscale/tailscale_test.go index dc477fb6e4357..a7a3c2323cb8f 100644 --- a/cmd/tailscale/tailscale_test.go +++ b/cmd/tailscale/tailscale_test.go @@ -19,7 +19,6 @@ func TestDeps(t *testing.T) { "gvisor.dev/gvisor/pkg/tcpip/header": "https://github.com/tailscale/tailscale/issues/9756", "tailscale.com/wgengine/filter": "brings in bart, etc", "github.com/bits-and-blooms/bitset": "unneeded in CLI", - "github.com/gaissmai/bart": "unneeded in CLI", "tailscale.com/net/ipset": "unneeded in CLI", }, }.Check(t) diff --git a/cmd/tailscaled/debug.go b/cmd/tailscaled/debug.go index 2f469a0d189f7..b16cb28e0df54 100644 --- a/cmd/tailscaled/debug.go +++ b/cmd/tailscaled/debug.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build go1.19 +//go:build !ts_omit_debug package main @@ -16,17 +16,21 @@ import ( "log" "net/http" "net/http/httptrace" + "net/http/pprof" "net/url" "os" "time" "tailscale.com/derp/derphttp" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/net/netmon" - "tailscale.com/net/tshttpproxy" "tailscale.com/tailcfg" + "tailscale.com/tsweb/varz" "tailscale.com/types/key" + "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" ) @@ -38,7 +42,29 @@ var debugArgs struct { portmap bool } -var debugModeFunc = debugMode // so it can be addressable +func init() { + debugModeFunc := debugMode // to be addressable + subCommands["debug"] = &debugModeFunc + + hookNewDebugMux.Set(newDebugMux) +} + +func newDebugMux() *http.ServeMux { + mux := http.NewServeMux() + mux.HandleFunc("/debug/metrics", servePrometheusMetrics) + mux.HandleFunc("/debug/pprof/", pprof.Index) + mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + mux.HandleFunc("/debug/pprof/profile", pprof.Profile) + mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + mux.HandleFunc("/debug/pprof/trace", pprof.Trace) + return mux +} + +func servePrometheusMetrics(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + varz.Handler(w, r) + clientmetric.WritePrometheusExpositionFormat(w) +} func debugMode(args []string) error { fs := flag.NewFlagSet("debug", flag.ExitOnError) @@ -86,14 +112,10 @@ func runMonitor(ctx context.Context, loop bool) error { } defer mon.Close() - mon.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { - if !delta.Major { - log.Printf("Network monitor fired; not a major change") - return - } - log.Printf("Network monitor fired. New state:") - dump(delta.New) - }) + eventClient := b.Client("debug.runMonitor") + m := eventClient.Monitor(changeDeltaWatcher(eventClient, ctx, dump)) + defer m.Close() + if loop { log.Printf("Starting link change monitor; initial state:") } @@ -106,6 +128,27 @@ func runMonitor(ctx context.Context, loop bool) error { select {} } +func changeDeltaWatcher(ec *eventbus.Client, ctx context.Context, dump func(st *netmon.State)) func(*eventbus.Client) { + changeSub := eventbus.Subscribe[netmon.ChangeDelta](ec) + return func(ec *eventbus.Client) { + for { + select { + case <-ctx.Done(): + return + case <-ec.Done(): + return + case delta := <-changeSub.Events(): + if !delta.Major { + log.Printf("Network monitor fired; not a major change") + return + } + log.Printf("Network monitor fired. New state:") + dump(delta.New) + } + } + } +} + func getURL(ctx context.Context, urlStr string) error { if urlStr == "login" { urlStr = "https://login.tailscale.com" @@ -124,9 +167,14 @@ func getURL(ctx context.Context, urlStr string) error { if err != nil { return fmt.Errorf("http.NewRequestWithContext: %v", err) } - proxyURL, err := tshttpproxy.ProxyFromEnvironment(req) - if err != nil { - return fmt.Errorf("tshttpproxy.ProxyFromEnvironment: %v", err) + var proxyURL *url.URL + if buildfeatures.HasUseProxy { + if proxyFromEnv, ok := feature.HookProxyFromEnvironment.GetOk(); ok { + proxyURL, err = proxyFromEnv(req) + if err != nil { + return fmt.Errorf("tshttpproxy.ProxyFromEnvironment: %v", err) + } + } } log.Printf("proxy: %v", proxyURL) tr := &http.Transport{ @@ -135,7 +183,10 @@ func getURL(ctx context.Context, urlStr string) error { DisableKeepAlives: true, } if proxyURL != nil { - auth, err := tshttpproxy.GetAuthHeader(proxyURL) + var auth string + if f, ok := feature.HookProxyGetAuthHeader.GetOk(); ok { + auth, err = f(proxyURL) + } if err == nil && auth != "" { tr.ProxyConnectHeader.Set("Proxy-Authorization", auth) } @@ -161,7 +212,9 @@ func getURL(ctx context.Context, urlStr string) error { } func checkDerp(ctx context.Context, derpRegion string) (err error) { - ht := new(health.Tracker) + bus := eventbus.New() + defer bus.Close() + ht := health.NewTracker(bus) req, err := http.NewRequestWithContext(ctx, "GET", ipn.DefaultControlURL+"/derpmap/default", nil) if err != nil { return fmt.Errorf("create derp map request: %w", err) diff --git a/cmd/tailscaled/depaware-min.txt b/cmd/tailscaled/depaware-min.txt new file mode 100644 index 0000000000000..224026f25368d --- /dev/null +++ b/cmd/tailscaled/depaware-min.txt @@ -0,0 +1,413 @@ +tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/depaware) + + github.com/gaissmai/bart from tailscale.com/net/ipset+ + github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart + github.com/go-json-experiment/json from tailscale.com/drive+ + github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ + github.com/golang/groupcache/lru from tailscale.com/net/dnscache + 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon + github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink + github.com/klauspost/compress from github.com/klauspost/compress/zstd + github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 + github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd + github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ + github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd + github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe + github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd + 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ + 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ + 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink + 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf + 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ + 💣 github.com/tailscale/wireguard-go/device from tailscale.com/net/tstun+ + github.com/tailscale/wireguard-go/ipc from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/ratelimiter from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/replay from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ + github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device + 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ + 💣 go4.org/mem from tailscale.com/control/controlbase+ + go4.org/netipx from tailscale.com/ipn/ipnlocal+ + tailscale.com from tailscale.com/version + tailscale.com/appc from tailscale.com/ipn/ipnlocal + tailscale.com/atomicfile from tailscale.com/ipn+ + tailscale.com/client/tailscale/apitype from tailscale.com/ipn/ipnauth+ + tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled + tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ + tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 + tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp + tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient + tailscale.com/derp from tailscale.com/derp/derphttp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ + tailscale.com/derp/derphttp from tailscale.com/net/netcheck+ + tailscale.com/disco from tailscale.com/net/tstun+ + tailscale.com/drive from tailscale.com/ipn+ + tailscale.com/envknob from tailscale.com/cmd/tailscaled+ + tailscale.com/envknob/featureknob from tailscale.com/ipn/ipnlocal + tailscale.com/feature from tailscale.com/cmd/tailscaled+ + tailscale.com/feature/buildfeatures from tailscale.com/cmd/tailscaled+ + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock + tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled + tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister + tailscale.com/feature/condregister/useproxy from tailscale.com/feature/condregister + tailscale.com/health from tailscale.com/control/controlclient+ + tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ + tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled + tailscale.com/ipn/ipnstate from tailscale.com/control/controlclient+ + tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver + tailscale.com/ipn/store from tailscale.com/cmd/tailscaled + tailscale.com/ipn/store/mem from tailscale.com/ipn/store + tailscale.com/kube/kubetypes from tailscale.com/envknob + tailscale.com/log/filelogger from tailscale.com/logpolicy + tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal + tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ + tailscale.com/logtail from tailscale.com/cmd/tailscaled+ + tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ + tailscale.com/net/bakedroots from tailscale.com/net/tlsdial + 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock + tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ + tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ + tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ + tailscale.com/net/dns/resolver from tailscale.com/net/dns+ + tailscale.com/net/dnscache from tailscale.com/control/controlclient+ + tailscale.com/net/dnsfallback from tailscale.com/cmd/tailscaled+ + tailscale.com/net/flowtrack from tailscale.com/wgengine/filter + tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/netaddr from tailscale.com/ipn+ + tailscale.com/net/netcheck from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/neterror from tailscale.com/net/batching+ + tailscale.com/net/netkernelconf from tailscale.com/ipn/ipnlocal + tailscale.com/net/netknob from tailscale.com/logpolicy+ + tailscale.com/net/netmon from tailscale.com/cmd/tailscaled+ + tailscale.com/net/netns from tailscale.com/cmd/tailscaled+ + tailscale.com/net/netutil from tailscale.com/control/controlclient+ + tailscale.com/net/netx from tailscale.com/control/controlclient+ + tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/packet/checksum from tailscale.com/net/tstun + tailscale.com/net/ping from tailscale.com/net/netcheck+ + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ + tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock + tailscale.com/net/sockstats from tailscale.com/control/controlclient+ + tailscale.com/net/stun from tailscale.com/net/netcheck+ + tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ + tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial + tailscale.com/net/tsaddr from tailscale.com/ipn+ + tailscale.com/net/tsdial from tailscale.com/cmd/tailscaled+ + tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ + tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock + tailscale.com/omit from tailscale.com/ipn/conffile + tailscale.com/paths from tailscale.com/cmd/tailscaled+ + tailscale.com/proxymap from tailscale.com/tsd + tailscale.com/safesocket from tailscale.com/cmd/tailscaled+ + tailscale.com/syncs from tailscale.com/cmd/tailscaled+ + tailscale.com/tailcfg from tailscale.com/client/tailscale/apitype+ + tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock + tailscale.com/tka from tailscale.com/control/controlclient+ + tailscale.com/tsconst from tailscale.com/net/netns+ + tailscale.com/tsd from tailscale.com/cmd/tailscaled+ + tailscale.com/tstime from tailscale.com/control/controlclient+ + tailscale.com/tstime/mono from tailscale.com/net/tstun+ + tailscale.com/tstime/rate from tailscale.com/wgengine/filter + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ + tailscale.com/types/dnstype from tailscale.com/client/tailscale/apitype+ + tailscale.com/types/empty from tailscale.com/ipn+ + tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled + tailscale.com/types/ipproto from tailscale.com/ipn+ + tailscale.com/types/key from tailscale.com/control/controlbase+ + tailscale.com/types/lazy from tailscale.com/hostinfo+ + tailscale.com/types/logger from tailscale.com/appc+ + tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ + tailscale.com/types/mapx from tailscale.com/ipn/ipnext + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ + tailscale.com/types/netmap from tailscale.com/control/controlclient+ + tailscale.com/types/nettype from tailscale.com/net/batching+ + tailscale.com/types/opt from tailscale.com/control/controlknobs+ + tailscale.com/types/persist from tailscale.com/control/controlclient+ + tailscale.com/types/preftype from tailscale.com/ipn+ + tailscale.com/types/ptr from tailscale.com/control/controlclient+ + tailscale.com/types/result from tailscale.com/util/lineiter + tailscale.com/types/structs from tailscale.com/control/controlclient+ + tailscale.com/types/tkatype from tailscale.com/control/controlclient+ + tailscale.com/types/views from tailscale.com/appc+ + tailscale.com/util/backoff from tailscale.com/control/controlclient+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/clientmetric from tailscale.com/appc+ + tailscale.com/util/cloudenv from tailscale.com/hostinfo+ + tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ + tailscale.com/util/dnsname from tailscale.com/appc+ + tailscale.com/util/eventbus from tailscale.com/control/controlclient+ + tailscale.com/util/execqueue from tailscale.com/appc+ + tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal + tailscale.com/util/groupmember from tailscale.com/ipn/ipnauth + tailscale.com/util/httpm from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/lineiter from tailscale.com/hostinfo+ + tailscale.com/util/mak from tailscale.com/control/controlclient+ + tailscale.com/util/must from tailscale.com/logpolicy+ + tailscale.com/util/nocasemaps from tailscale.com/types/ipproto + tailscale.com/util/osdiag from tailscale.com/ipn/localapi + tailscale.com/util/osshare from tailscale.com/cmd/tailscaled + tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/race from tailscale.com/net/dns/resolver + tailscale.com/util/racebuild from tailscale.com/logpolicy + tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock + tailscale.com/util/set from tailscale.com/control/controlclient+ + tailscale.com/util/singleflight from tailscale.com/control/controlclient+ + tailscale.com/util/slicesx from tailscale.com/appc+ + tailscale.com/util/syspolicy/pkey from tailscale.com/cmd/tailscaled+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/cmd/tailscaled+ + tailscale.com/util/syspolicy/ptype from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/testenv from tailscale.com/control/controlclient+ + tailscale.com/util/usermetric from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/vizerror from tailscale.com/tailcfg+ + tailscale.com/util/winutil from tailscale.com/ipn/ipnauth + tailscale.com/util/zstdframe from tailscale.com/control/controlclient + tailscale.com/version from tailscale.com/cmd/tailscaled+ + tailscale.com/version/distro from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine/filter from tailscale.com/control/controlclient+ + tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap+ + 💣 tailscale.com/wgengine/magicsock from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/netlog from tailscale.com/wgengine + tailscale.com/wgengine/netstack/gro from tailscale.com/net/tstun+ + tailscale.com/wgengine/router from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine/wgcfg from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal + 💣 tailscale.com/wgengine/wgint from tailscale.com/wgengine+ + tailscale.com/wgengine/wglog from tailscale.com/wgengine + golang.org/x/crypto/blake2b from golang.org/x/crypto/nacl/box + golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/hkdf from tailscale.com/control/controlbase + golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ + golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ + golang.org/x/crypto/nacl/box from tailscale.com/types/key + golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box + golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device + golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ + golang.org/x/exp/constraints from tailscale.com/util/set + golang.org/x/exp/maps from tailscale.com/ipn/store/mem + golang.org/x/net/bpf from github.com/mdlayher/netlink+ + golang.org/x/net/dns/dnsmessage from tailscale.com/ipn/ipnlocal+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal + golang.org/x/net/icmp from tailscale.com/net/ping + golang.org/x/net/idna from golang.org/x/net/http/httpguts + golang.org/x/net/internal/iana from golang.org/x/net/icmp+ + golang.org/x/net/internal/socket from golang.org/x/net/icmp+ + golang.org/x/net/ipv4 from github.com/tailscale/wireguard-go/conn+ + golang.org/x/net/ipv6 from github.com/tailscale/wireguard-go/conn+ + golang.org/x/sync/errgroup from github.com/mdlayher/socket + golang.org/x/sys/cpu from github.com/tailscale/wireguard-go/tun+ + golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ + golang.org/x/term from tailscale.com/logpolicy + golang.org/x/text/secure/bidirule from golang.org/x/net/idna + golang.org/x/text/transform from golang.org/x/text/secure/bidirule+ + golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ + golang.org/x/text/unicode/norm from golang.org/x/net/idna + golang.org/x/time/rate from tailscale.com/derp + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna + bufio from compress/flate+ + bytes from bufio+ + cmp from encoding/json+ + compress/flate from compress/gzip + compress/gzip from net/http + container/list from crypto/tls+ + context from crypto/tls+ + crypto from crypto/ecdh+ + crypto/aes from crypto/internal/hpke+ + crypto/cipher from crypto/aes+ + crypto/des from crypto/tls+ + crypto/dsa from crypto/x509 + crypto/ecdh from crypto/ecdsa+ + crypto/ecdsa from crypto/tls+ + crypto/ed25519 from crypto/tls+ + crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls + crypto/hkdf from crypto/internal/hpke+ + crypto/hmac from crypto/tls+ + crypto/internal/boring from crypto/aes+ + crypto/internal/boring/bbig from crypto/ecdsa+ + crypto/internal/boring/sig from crypto/internal/boring + crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/fips140 from crypto/fips140+ + crypto/internal/fips140/aes from crypto/aes+ + crypto/internal/fips140/aes/gcm from crypto/cipher+ + crypto/internal/fips140/alias from crypto/cipher+ + crypto/internal/fips140/bigmod from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/check from crypto/fips140+ + crypto/internal/fips140/drbg from crypto/internal/fips140/aes/gcm+ + crypto/internal/fips140/ecdh from crypto/ecdh + crypto/internal/fips140/ecdsa from crypto/ecdsa + crypto/internal/fips140/ed25519 from crypto/ed25519 + crypto/internal/fips140/edwards25519 from crypto/internal/fips140/ed25519 + crypto/internal/fips140/edwards25519/field from crypto/ecdh+ + crypto/internal/fips140/hkdf from crypto/hkdf+ + crypto/internal/fips140/hmac from crypto/hmac+ + crypto/internal/fips140/mlkem from crypto/tls + crypto/internal/fips140/nistec from crypto/ecdsa+ + crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec + crypto/internal/fips140/rsa from crypto/rsa + crypto/internal/fips140/sha256 from crypto/internal/fips140/check+ + crypto/internal/fips140/sha3 from crypto/internal/fips140/hmac+ + crypto/internal/fips140/sha512 from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ + crypto/internal/fips140/tls12 from crypto/tls + crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ + crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140hash from crypto/ecdsa+ + crypto/internal/fips140only from crypto/cipher+ + crypto/internal/hpke from crypto/tls + crypto/internal/impl from crypto/internal/fips140/aes+ + crypto/internal/randutil from crypto/dsa+ + crypto/internal/sysrand from crypto/internal/entropy+ + crypto/md5 from crypto/tls+ + crypto/rand from crypto/ed25519+ + crypto/rc4 from crypto/tls + crypto/rsa from crypto/tls+ + crypto/sha1 from crypto/tls+ + crypto/sha256 from crypto/tls+ + crypto/sha3 from crypto/internal/fips140hash + crypto/sha512 from crypto/ecdsa+ + crypto/subtle from crypto/cipher+ + crypto/tls from net/http+ + crypto/tls/internal/fips140tls from crypto/tls + crypto/x509 from crypto/tls+ + crypto/x509/pkix from crypto/x509 + embed from tailscale.com+ + encoding from encoding/json+ + encoding/asn1 from crypto/x509+ + encoding/base32 from github.com/go-json-experiment/json + encoding/base64 from encoding/json+ + encoding/binary from compress/gzip+ + encoding/hex from crypto/x509+ + encoding/json from github.com/gaissmai/bart+ + encoding/pem from crypto/tls+ + errors from bufio+ + flag from tailscale.com/cmd/tailscaled+ + fmt from compress/flate+ + hash from crypto+ + hash/crc32 from compress/gzip+ + hash/maphash from go4.org/mem + html from tailscale.com/ipn/ipnlocal+ + internal/abi from hash/maphash+ + internal/asan from internal/runtime/maps+ + internal/bisect from internal/godebug + internal/bytealg from bytes+ + internal/byteorder from crypto/cipher+ + internal/chacha8rand from math/rand/v2+ + internal/coverage/rtcov from runtime + internal/cpu from crypto/internal/fips140deps/cpu+ + internal/filepathlite from os+ + internal/fmtsort from fmt + internal/goarch from crypto/internal/fips140deps/cpu+ + internal/godebug from crypto/internal/fips140deps/godebug+ + internal/godebugs from internal/godebug+ + internal/goexperiment from hash/maphash+ + internal/goos from crypto/x509+ + internal/itoa from internal/poll+ + internal/msan from internal/runtime/maps+ + internal/nettrace from net+ + internal/oserror from io/fs+ + internal/poll from net+ + internal/profilerecord from runtime + internal/race from internal/runtime/maps+ + internal/reflectlite from context+ + internal/runtime/atomic from internal/runtime/exithook+ + internal/runtime/cgroup from runtime + internal/runtime/exithook from runtime + internal/runtime/gc from runtime + internal/runtime/maps from reflect+ + internal/runtime/math from internal/runtime/maps+ + internal/runtime/strconv from internal/runtime/cgroup+ + internal/runtime/sys from crypto/subtle+ + internal/runtime/syscall from internal/runtime/cgroup+ + internal/saferio from encoding/asn1 + internal/singleflight from net + internal/stringslite from embed+ + internal/sync from sync+ + internal/synctest from sync + internal/syscall/execenv from os+ + internal/syscall/unix from crypto/internal/sysrand+ + internal/testlog from os + internal/trace/tracev2 from runtime + internal/unsafeheader from internal/reflectlite+ + io from bufio+ + io/fs from crypto/x509+ + iter from bytes+ + log from github.com/klauspost/compress/zstd+ + log/internal from log + maps from crypto/x509+ + math from compress/flate+ + math/big from crypto/dsa+ + math/bits from bytes+ + math/rand from github.com/mdlayher/netlink+ + math/rand/v2 from crypto/ecdsa+ + mime from mime/multipart+ + mime/multipart from net/http + mime/quotedprintable from mime/multipart + net from crypto/tls+ + net/http from tailscale.com/cmd/tailscaled+ + net/http/httptrace from net/http+ + net/http/internal from net/http + net/http/internal/ascii from net/http + net/http/internal/httpcommon from net/http + net/netip from crypto/x509+ + net/textproto from golang.org/x/net/http/httpguts+ + net/url from crypto/x509+ + os from crypto/internal/sysrand+ + os/exec from tailscale.com/hostinfo+ + os/signal from tailscale.com/cmd/tailscaled + os/user from tailscale.com/ipn/ipnauth+ + path from io/fs+ + path/filepath from crypto/x509+ + reflect from crypto/x509+ + runtime from crypto/internal/fips140+ + runtime/debug from github.com/klauspost/compress/zstd+ + slices from crypto/tls+ + sort from compress/flate+ + strconv from compress/flate+ + strings from bufio+ + sync from compress/flate+ + sync/atomic from context+ + syscall from crypto/internal/sysrand+ + time from compress/gzip+ + unicode from bytes+ + unicode/utf16 from crypto/x509+ + unicode/utf8 from bufio+ + unique from net/netip + unsafe from bytes+ + weak from crypto/internal/fips140cache+ diff --git a/cmd/tailscaled/depaware-minbox.txt b/cmd/tailscaled/depaware-minbox.txt new file mode 100644 index 0000000000000..9633e73989046 --- /dev/null +++ b/cmd/tailscaled/depaware-minbox.txt @@ -0,0 +1,453 @@ +tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/depaware) + + filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus + filippo.io/edwards25519/field from filippo.io/edwards25519 + github.com/gaissmai/bart from tailscale.com/net/ipset+ + github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ + github.com/gaissmai/bart/internal/sparse from github.com/gaissmai/bart + github.com/go-json-experiment/json from tailscale.com/drive+ + github.com/go-json-experiment/json/internal from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonflags from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ + github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ + github.com/golang/groupcache/lru from tailscale.com/net/dnscache + github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign + 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon + github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink + github.com/kballard/go-shellquote from tailscale.com/cmd/tailscale/cli + github.com/klauspost/compress from github.com/klauspost/compress/zstd + github.com/klauspost/compress/fse from github.com/klauspost/compress/huff0 + github.com/klauspost/compress/huff0 from github.com/klauspost/compress/zstd + github.com/klauspost/compress/internal/cpuinfo from github.com/klauspost/compress/huff0+ + github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd + github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe + github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd + github.com/mattn/go-colorable from tailscale.com/cmd/tailscale/cli + github.com/mattn/go-isatty from github.com/mattn/go-colorable+ + 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ + 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ + 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink + github.com/peterbourgon/ff/v3 from github.com/peterbourgon/ff/v3/ffcli+ + github.com/peterbourgon/ff/v3/ffcli from tailscale.com/cmd/tailscale/cli+ + github.com/peterbourgon/ff/v3/internal from github.com/peterbourgon/ff/v3 + 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf + github.com/skip2/go-qrcode from tailscale.com/cmd/tailscale/cli + github.com/skip2/go-qrcode/bitset from github.com/skip2/go-qrcode+ + github.com/skip2/go-qrcode/reedsolomon from github.com/skip2/go-qrcode + 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ + 💣 github.com/tailscale/wireguard-go/device from tailscale.com/net/tstun+ + github.com/tailscale/wireguard-go/ipc from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/ratelimiter from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/replay from github.com/tailscale/wireguard-go/device + github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ + github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device + 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ + github.com/toqueteos/webbrowser from tailscale.com/cmd/tailscale/cli + 💣 go4.org/mem from tailscale.com/control/controlbase+ + go4.org/netipx from tailscale.com/ipn/ipnlocal+ + tailscale.com from tailscale.com/version + tailscale.com/appc from tailscale.com/ipn/ipnlocal + tailscale.com/atomicfile from tailscale.com/ipn+ + tailscale.com/client/local from tailscale.com/client/tailscale+ + tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale + tailscale.com/client/tailscale/apitype from tailscale.com/ipn/ipnauth+ + tailscale.com/clientupdate from tailscale.com/cmd/tailscale/cli + tailscale.com/clientupdate/distsign from tailscale.com/clientupdate + tailscale.com/cmd/tailscale/cli from tailscale.com/cmd/tailscaled + tailscale.com/cmd/tailscale/cli/ffcomplete from tailscale.com/cmd/tailscale/cli + tailscale.com/cmd/tailscale/cli/ffcomplete/internal from tailscale.com/cmd/tailscale/cli/ffcomplete + tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled + tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ + tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 + tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp + tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient+ + tailscale.com/derp from tailscale.com/derp/derphttp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ + tailscale.com/derp/derphttp from tailscale.com/net/netcheck+ + tailscale.com/disco from tailscale.com/net/tstun+ + tailscale.com/drive from tailscale.com/ipn+ + tailscale.com/envknob from tailscale.com/cmd/tailscaled+ + tailscale.com/envknob/featureknob from tailscale.com/ipn/ipnlocal + tailscale.com/feature from tailscale.com/cmd/tailscaled+ + tailscale.com/feature/buildfeatures from tailscale.com/ipn/ipnlocal+ + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock + tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled + tailscale.com/feature/condregister/identityfederation from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/oauthkey from tailscale.com/cmd/tailscale/cli + tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister+ + tailscale.com/feature/condregister/useproxy from tailscale.com/cmd/tailscale/cli+ + tailscale.com/health from tailscale.com/control/controlclient+ + tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ + tailscale.com/hostinfo from tailscale.com/cmd/tailscaled+ + tailscale.com/internal/client/tailscale from tailscale.com/cmd/tailscale/cli + tailscale.com/ipn from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ + tailscale.com/ipn/ipnext from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/ipnlocal from tailscale.com/cmd/tailscaled+ + tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled + tailscale.com/ipn/ipnstate from tailscale.com/control/controlclient+ + tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver + tailscale.com/ipn/store from tailscale.com/cmd/tailscaled + tailscale.com/ipn/store/mem from tailscale.com/ipn/store + tailscale.com/kube/kubetypes from tailscale.com/envknob + tailscale.com/licenses from tailscale.com/cmd/tailscale/cli + tailscale.com/log/filelogger from tailscale.com/logpolicy + tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal + tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ + tailscale.com/logtail from tailscale.com/cmd/tailscaled+ + tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ + tailscale.com/net/ace from tailscale.com/cmd/tailscale/cli + tailscale.com/net/bakedroots from tailscale.com/net/tlsdial + 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock + tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ + tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ + tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ + tailscale.com/net/dns/resolver from tailscale.com/net/dns+ + tailscale.com/net/dnscache from tailscale.com/control/controlclient+ + tailscale.com/net/dnsfallback from tailscale.com/cmd/tailscaled+ + tailscale.com/net/flowtrack from tailscale.com/wgengine/filter + tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/netaddr from tailscale.com/ipn+ + tailscale.com/net/netcheck from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/neterror from tailscale.com/net/batching+ + tailscale.com/net/netkernelconf from tailscale.com/ipn/ipnlocal + tailscale.com/net/netknob from tailscale.com/logpolicy+ + tailscale.com/net/netmon from tailscale.com/cmd/tailscaled+ + tailscale.com/net/netns from tailscale.com/cmd/tailscaled+ + tailscale.com/net/netutil from tailscale.com/control/controlclient+ + tailscale.com/net/netx from tailscale.com/control/controlclient+ + tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ + tailscale.com/net/packet/checksum from tailscale.com/net/tstun + tailscale.com/net/ping from tailscale.com/net/netcheck+ + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ + tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock + tailscale.com/net/sockstats from tailscale.com/control/controlclient+ + tailscale.com/net/stun from tailscale.com/net/netcheck+ + tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ + tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial + tailscale.com/net/tsaddr from tailscale.com/ipn+ + tailscale.com/net/tsdial from tailscale.com/cmd/tailscaled+ + tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ + tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock + tailscale.com/net/udprelay/status from tailscale.com/client/local + tailscale.com/omit from tailscale.com/ipn/conffile + tailscale.com/paths from tailscale.com/cmd/tailscaled+ + tailscale.com/proxymap from tailscale.com/tsd + tailscale.com/safesocket from tailscale.com/cmd/tailscaled+ + tailscale.com/syncs from tailscale.com/cmd/tailscaled+ + tailscale.com/tailcfg from tailscale.com/client/tailscale/apitype+ + tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock + tailscale.com/tempfork/spf13/cobra from tailscale.com/cmd/tailscale/cli/ffcomplete+ + tailscale.com/tka from tailscale.com/control/controlclient+ + tailscale.com/tsconst from tailscale.com/net/netns+ + tailscale.com/tsd from tailscale.com/cmd/tailscaled+ + tailscale.com/tstime from tailscale.com/control/controlclient+ + tailscale.com/tstime/mono from tailscale.com/net/tstun+ + tailscale.com/tstime/rate from tailscale.com/wgengine/filter + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ + tailscale.com/types/dnstype from tailscale.com/client/tailscale/apitype+ + tailscale.com/types/empty from tailscale.com/ipn+ + tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled + tailscale.com/types/ipproto from tailscale.com/ipn+ + tailscale.com/types/key from tailscale.com/client/local+ + tailscale.com/types/lazy from tailscale.com/hostinfo+ + tailscale.com/types/logger from tailscale.com/appc+ + tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ + tailscale.com/types/mapx from tailscale.com/ipn/ipnext + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ + tailscale.com/types/netmap from tailscale.com/control/controlclient+ + tailscale.com/types/nettype from tailscale.com/net/batching+ + tailscale.com/types/opt from tailscale.com/control/controlknobs+ + tailscale.com/types/persist from tailscale.com/control/controlclient+ + tailscale.com/types/preftype from tailscale.com/ipn+ + tailscale.com/types/ptr from tailscale.com/control/controlclient+ + tailscale.com/types/result from tailscale.com/util/lineiter + tailscale.com/types/structs from tailscale.com/control/controlclient+ + tailscale.com/types/tkatype from tailscale.com/control/controlclient+ + tailscale.com/types/views from tailscale.com/appc+ + tailscale.com/util/backoff from tailscale.com/control/controlclient+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/cibuild from tailscale.com/health + tailscale.com/util/clientmetric from tailscale.com/appc+ + tailscale.com/util/cloudenv from tailscale.com/hostinfo+ + tailscale.com/util/cmpver from tailscale.com/clientupdate + tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ + tailscale.com/util/dnsname from tailscale.com/appc+ + tailscale.com/util/eventbus from tailscale.com/client/local+ + tailscale.com/util/execqueue from tailscale.com/appc+ + tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal + tailscale.com/util/groupmember from tailscale.com/ipn/ipnauth + tailscale.com/util/httpm from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/lineiter from tailscale.com/hostinfo+ + tailscale.com/util/mak from tailscale.com/control/controlclient+ + tailscale.com/util/must from tailscale.com/logpolicy+ + tailscale.com/util/nocasemaps from tailscale.com/types/ipproto + tailscale.com/util/osdiag from tailscale.com/ipn/localapi + tailscale.com/util/osshare from tailscale.com/cmd/tailscaled + tailscale.com/util/osuser from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/prompt from tailscale.com/cmd/tailscale/cli + tailscale.com/util/race from tailscale.com/net/dns/resolver + tailscale.com/util/racebuild from tailscale.com/logpolicy + tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock + tailscale.com/util/set from tailscale.com/control/controlclient+ + tailscale.com/util/singleflight from tailscale.com/control/controlclient+ + tailscale.com/util/slicesx from tailscale.com/appc+ + tailscale.com/util/syspolicy/pkey from tailscale.com/cmd/tailscaled+ + tailscale.com/util/syspolicy/policyclient from tailscale.com/cmd/tailscaled+ + tailscale.com/util/syspolicy/ptype from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/testenv from tailscale.com/control/controlclient+ + tailscale.com/util/usermetric from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/vizerror from tailscale.com/tailcfg+ + tailscale.com/util/winutil from tailscale.com/ipn/ipnauth + tailscale.com/util/zstdframe from tailscale.com/control/controlclient + tailscale.com/version from tailscale.com/cmd/tailscaled+ + tailscale.com/version/distro from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine/filter from tailscale.com/control/controlclient+ + tailscale.com/wgengine/filter/filtertype from tailscale.com/types/netmap+ + 💣 tailscale.com/wgengine/magicsock from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/netlog from tailscale.com/wgengine + tailscale.com/wgengine/netstack/gro from tailscale.com/net/tstun+ + tailscale.com/wgengine/router from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine/wgcfg from tailscale.com/ipn/ipnlocal+ + tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal + 💣 tailscale.com/wgengine/wgint from tailscale.com/wgengine+ + tailscale.com/wgengine/wglog from tailscale.com/wgengine + golang.org/x/crypto/blake2b from golang.org/x/crypto/nacl/box + golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305 + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/hkdf from tailscale.com/control/controlbase + golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ + golang.org/x/crypto/internal/poly1305 from golang.org/x/crypto/chacha20poly1305+ + golang.org/x/crypto/nacl/box from tailscale.com/types/key + golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box + golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device + golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ + golang.org/x/exp/constraints from tailscale.com/util/set + golang.org/x/exp/maps from tailscale.com/ipn/store/mem + golang.org/x/net/bpf from github.com/mdlayher/netlink+ + golang.org/x/net/dns/dnsmessage from tailscale.com/cmd/tailscale/cli+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal + golang.org/x/net/icmp from tailscale.com/net/ping + golang.org/x/net/idna from golang.org/x/net/http/httpguts+ + golang.org/x/net/internal/iana from golang.org/x/net/icmp+ + golang.org/x/net/internal/socket from golang.org/x/net/icmp+ + golang.org/x/net/ipv4 from github.com/tailscale/wireguard-go/conn+ + golang.org/x/net/ipv6 from github.com/tailscale/wireguard-go/conn+ + golang.org/x/sync/errgroup from github.com/mdlayher/socket + golang.org/x/sys/cpu from github.com/tailscale/wireguard-go/tun+ + golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ + golang.org/x/term from tailscale.com/logpolicy + golang.org/x/text/secure/bidirule from golang.org/x/net/idna + golang.org/x/text/transform from golang.org/x/text/secure/bidirule+ + golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ + golang.org/x/text/unicode/norm from golang.org/x/net/idna + golang.org/x/time/rate from tailscale.com/derp + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna + archive/tar from tailscale.com/clientupdate + bufio from compress/flate+ + bytes from bufio+ + cmp from encoding/json+ + compress/flate from compress/gzip+ + compress/gzip from net/http+ + compress/zlib from image/png + container/list from crypto/tls+ + context from crypto/tls+ + crypto from crypto/ecdh+ + crypto/aes from crypto/internal/hpke+ + crypto/cipher from crypto/aes+ + crypto/des from crypto/tls+ + crypto/dsa from crypto/x509 + crypto/ecdh from crypto/ecdsa+ + crypto/ecdsa from crypto/tls+ + crypto/ed25519 from crypto/tls+ + crypto/elliptic from crypto/ecdsa+ + crypto/fips140 from crypto/tls/internal/fips140tls + crypto/hkdf from crypto/internal/hpke+ + crypto/hmac from crypto/tls+ + crypto/internal/boring from crypto/aes+ + crypto/internal/boring/bbig from crypto/ecdsa+ + crypto/internal/boring/sig from crypto/internal/boring + crypto/internal/entropy from crypto/internal/fips140/drbg + crypto/internal/fips140 from crypto/fips140+ + crypto/internal/fips140/aes from crypto/aes+ + crypto/internal/fips140/aes/gcm from crypto/cipher+ + crypto/internal/fips140/alias from crypto/cipher+ + crypto/internal/fips140/bigmod from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/check from crypto/fips140+ + crypto/internal/fips140/drbg from crypto/internal/fips140/aes/gcm+ + crypto/internal/fips140/ecdh from crypto/ecdh + crypto/internal/fips140/ecdsa from crypto/ecdsa + crypto/internal/fips140/ed25519 from crypto/ed25519 + crypto/internal/fips140/edwards25519 from crypto/internal/fips140/ed25519 + crypto/internal/fips140/edwards25519/field from crypto/ecdh+ + crypto/internal/fips140/hkdf from crypto/hkdf+ + crypto/internal/fips140/hmac from crypto/hmac+ + crypto/internal/fips140/mlkem from crypto/tls + crypto/internal/fips140/nistec from crypto/ecdsa+ + crypto/internal/fips140/nistec/fiat from crypto/internal/fips140/nistec + crypto/internal/fips140/rsa from crypto/rsa + crypto/internal/fips140/sha256 from crypto/internal/fips140/check+ + crypto/internal/fips140/sha3 from crypto/internal/fips140/hmac+ + crypto/internal/fips140/sha512 from crypto/internal/fips140/ecdsa+ + crypto/internal/fips140/subtle from crypto/internal/fips140/aes+ + crypto/internal/fips140/tls12 from crypto/tls + crypto/internal/fips140/tls13 from crypto/tls + crypto/internal/fips140cache from crypto/ecdsa+ + crypto/internal/fips140deps/byteorder from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/cpu from crypto/internal/fips140/aes+ + crypto/internal/fips140deps/godebug from crypto/internal/fips140+ + crypto/internal/fips140hash from crypto/ecdsa+ + crypto/internal/fips140only from crypto/cipher+ + crypto/internal/hpke from crypto/tls + crypto/internal/impl from crypto/internal/fips140/aes+ + crypto/internal/randutil from crypto/dsa+ + crypto/internal/sysrand from crypto/internal/entropy+ + crypto/md5 from crypto/tls+ + crypto/rand from crypto/ed25519+ + crypto/rc4 from crypto/tls + crypto/rsa from crypto/tls+ + crypto/sha1 from crypto/tls+ + crypto/sha256 from crypto/tls+ + crypto/sha3 from crypto/internal/fips140hash + crypto/sha512 from crypto/ecdsa+ + crypto/subtle from crypto/cipher+ + crypto/tls from net/http+ + crypto/tls/internal/fips140tls from crypto/tls + crypto/x509 from crypto/tls+ + crypto/x509/pkix from crypto/x509 + embed from tailscale.com+ + encoding from encoding/json+ + encoding/asn1 from crypto/x509+ + encoding/base32 from github.com/go-json-experiment/json + encoding/base64 from encoding/json+ + encoding/binary from compress/gzip+ + encoding/hex from crypto/x509+ + encoding/json from github.com/gaissmai/bart+ + encoding/pem from crypto/tls+ + errors from bufio+ + flag from tailscale.com/cmd/tailscaled+ + fmt from compress/flate+ + hash from crypto+ + hash/adler32 from compress/zlib + hash/crc32 from compress/gzip+ + hash/maphash from go4.org/mem + html from tailscale.com/ipn/ipnlocal+ + image from github.com/skip2/go-qrcode+ + image/color from github.com/skip2/go-qrcode+ + image/png from github.com/skip2/go-qrcode + internal/abi from hash/maphash+ + internal/asan from internal/runtime/maps+ + internal/bisect from internal/godebug + internal/bytealg from bytes+ + internal/byteorder from crypto/cipher+ + internal/chacha8rand from math/rand/v2+ + internal/coverage/rtcov from runtime + internal/cpu from crypto/internal/fips140deps/cpu+ + internal/filepathlite from os+ + internal/fmtsort from fmt + internal/goarch from crypto/internal/fips140deps/cpu+ + internal/godebug from crypto/internal/fips140deps/godebug+ + internal/godebugs from internal/godebug+ + internal/goexperiment from hash/maphash+ + internal/goos from crypto/x509+ + internal/itoa from internal/poll+ + internal/msan from internal/runtime/maps+ + internal/nettrace from net+ + internal/oserror from io/fs+ + internal/poll from net+ + internal/profilerecord from runtime + internal/race from internal/runtime/maps+ + internal/reflectlite from context+ + internal/runtime/atomic from internal/runtime/exithook+ + internal/runtime/cgroup from runtime + internal/runtime/exithook from runtime + internal/runtime/gc from runtime + internal/runtime/maps from reflect+ + internal/runtime/math from internal/runtime/maps+ + internal/runtime/strconv from internal/runtime/cgroup+ + internal/runtime/sys from crypto/subtle+ + internal/runtime/syscall from internal/runtime/cgroup+ + internal/saferio from encoding/asn1 + internal/singleflight from net + internal/stringslite from embed+ + internal/sync from sync+ + internal/synctest from sync + internal/syscall/execenv from os+ + internal/syscall/unix from crypto/internal/sysrand+ + internal/testlog from os + internal/trace/tracev2 from runtime + internal/unsafeheader from internal/reflectlite+ + io from bufio+ + io/fs from crypto/x509+ + io/ioutil from github.com/skip2/go-qrcode + iter from bytes+ + log from github.com/klauspost/compress/zstd+ + log/internal from log + maps from crypto/x509+ + math from compress/flate+ + math/big from crypto/dsa+ + math/bits from bytes+ + math/rand from github.com/mdlayher/netlink+ + math/rand/v2 from crypto/ecdsa+ + mime from mime/multipart+ + mime/multipart from net/http + mime/quotedprintable from mime/multipart + net from crypto/tls+ + net/http from net/http/httputil+ + net/http/httptrace from net/http+ + net/http/httputil from tailscale.com/cmd/tailscale/cli + net/http/internal from net/http+ + net/http/internal/ascii from net/http+ + net/http/internal/httpcommon from net/http + net/netip from crypto/x509+ + net/textproto from golang.org/x/net/http/httpguts+ + net/url from crypto/x509+ + os from crypto/internal/sysrand+ + os/exec from tailscale.com/hostinfo+ + os/signal from tailscale.com/cmd/tailscaled+ + os/user from tailscale.com/ipn/ipnauth+ + path from io/fs+ + path/filepath from crypto/x509+ + reflect from crypto/x509+ + regexp from tailscale.com/clientupdate + regexp/syntax from regexp + runtime from crypto/internal/fips140+ + runtime/debug from github.com/klauspost/compress/zstd+ + slices from crypto/tls+ + sort from compress/flate+ + strconv from compress/flate+ + strings from bufio+ + sync from compress/flate+ + sync/atomic from context+ + syscall from crypto/internal/sysrand+ + text/tabwriter from github.com/peterbourgon/ff/v3/ffcli+ + time from compress/gzip+ + unicode from bytes+ + unicode/utf16 from crypto/x509+ + unicode/utf8 from bufio+ + unique from net/netip + unsafe from bytes+ + weak from crypto/internal/fips140cache+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 3ca57077254f7..e92d41b9855df 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -124,7 +124,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de L github.com/google/nftables/xt from github.com/google/nftables/expr+ DW github.com/google/uuid from tailscale.com/clientupdate+ github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns + L 💣 github.com/illarion/gonotify/v3 from tailscale.com/feature/linuxdnsfight L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 L github.com/insomniacslk/dhcp/dhcpv4 from tailscale.com/feature/tap L github.com/insomniacslk/dhcp/iana from github.com/insomniacslk/dhcp/dhcpv4 @@ -143,13 +143,12 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd github.com/kortschak/wol from tailscale.com/feature/wakeonlan LD github.com/kr/fs from github.com/pkg/sftp - L github.com/mdlayher/genetlink from tailscale.com/net/tstun + L github.com/mdlayher/genetlink from tailscale.com/feature/linkspeed L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ L github.com/mdlayher/netlink/nltest from github.com/google/nftables - L github.com/mdlayher/sdnotify from tailscale.com/util/systemd + L github.com/mdlayher/sdnotify from tailscale.com/feature/sdnotify L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ - github.com/miekg/dns from tailscale.com/net/dns/recursive 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket L github.com/pierrec/lz4/v4 from github.com/u-root/uio/uio L github.com/pierrec/lz4/v4/internal/lz4block from github.com/pierrec/lz4/v4+ @@ -175,7 +174,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de github.com/tailscale/hujson from tailscale.com/ipn/conffile L 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink - github.com/tailscale/peercred from tailscale.com/ipn/ipnauth + LD github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web W 💣 github.com/tailscale/wf from tailscale.com/wf 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ @@ -247,23 +246,24 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/client/local from tailscale.com/client/web+ tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal - tailscale.com/clientupdate from tailscale.com/client/web+ + tailscale.com/clientupdate from tailscale.com/feature/clientupdate LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/cmd/tailscaled/childproc from tailscale.com/cmd/tailscaled+ tailscale.com/cmd/tailscaled/tailscaledhooks from tailscale.com/cmd/tailscaled+ tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/cmd/tailscaled+ - tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp from tailscale.com/control/ts2021+ tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient tailscale.com/derp from tailscale.com/derp/derphttp+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/cmd/tailscaled+ - tailscale.com/disco from tailscale.com/derp+ - tailscale.com/doctor from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal - 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal + tailscale.com/disco from tailscale.com/feature/relayserver+ + tailscale.com/doctor from tailscale.com/feature/doctor + tailscale.com/doctor/ethtool from tailscale.com/feature/doctor + 💣 tailscale.com/doctor/permissions from tailscale.com/feature/doctor + tailscale.com/doctor/routetable from tailscale.com/feature/doctor tailscale.com/drive from tailscale.com/client/local+ tailscale.com/drive/driveimpl from tailscale.com/cmd/tailscaled tailscale.com/drive/driveimpl/compositedav from tailscale.com/drive/driveimpl @@ -272,18 +272,35 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/feature/wakeonlan+ + tailscale.com/feature/ace from tailscale.com/feature/condregister + tailscale.com/feature/appconnectors from tailscale.com/feature/condregister + tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/c2n from tailscale.com/feature/condregister tailscale.com/feature/capture from tailscale.com/feature/condregister + tailscale.com/feature/clientupdate from tailscale.com/feature/condregister + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock tailscale.com/feature/condregister from tailscale.com/cmd/tailscaled + tailscale.com/feature/condregister/portmapper from tailscale.com/feature/condregister + tailscale.com/feature/condregister/useproxy from tailscale.com/feature/condregister + tailscale.com/feature/debugportmapper from tailscale.com/feature/condregister + tailscale.com/feature/doctor from tailscale.com/feature/condregister + tailscale.com/feature/drive from tailscale.com/feature/condregister + L tailscale.com/feature/linkspeed from tailscale.com/feature/condregister + L tailscale.com/feature/linuxdnsfight from tailscale.com/feature/condregister + tailscale.com/feature/portlist from tailscale.com/feature/condregister + tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper + tailscale.com/feature/posture from tailscale.com/feature/condregister tailscale.com/feature/relayserver from tailscale.com/feature/condregister + L tailscale.com/feature/sdnotify from tailscale.com/feature/condregister tailscale.com/feature/syspolicy from tailscale.com/feature/condregister+ tailscale.com/feature/taildrop from tailscale.com/feature/condregister L tailscale.com/feature/tap from tailscale.com/feature/condregister tailscale.com/feature/tpm from tailscale.com/feature/condregister + tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/feature/wakeonlan from tailscale.com/feature/condregister tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ - tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ W tailscale.com/ipn/auditlog from tailscale.com/cmd/tailscaled tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ @@ -294,10 +311,10 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/ipn/ipnserver from tailscale.com/cmd/tailscaled tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver+ - tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal + tailscale.com/ipn/policy from tailscale.com/feature/portlist tailscale.com/ipn/store from tailscale.com/cmd/tailscaled+ - L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store - L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store + L tailscale.com/ipn/store/awsstore from tailscale.com/feature/condregister + L tailscale.com/ipn/store/kubestore from tailscale.com/feature/condregister tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ L tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ L tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore @@ -307,21 +324,19 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal tailscale.com/logpolicy from tailscale.com/cmd/tailscaled+ tailscale.com/logtail from tailscale.com/cmd/tailscaled+ - tailscale.com/logtail/backoff from tailscale.com/cmd/tailscaled+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/metrics from tailscale.com/tsweb+ + tailscale.com/net/ace from tailscale.com/feature/ace tailscale.com/net/bakedroots from tailscale.com/net/tlsdial+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock+ tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/cmd/tailscaled+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ - tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ tailscale.com/net/dnsfallback from tailscale.com/cmd/tailscaled+ - tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/flowtrack from tailscale.com/wgengine+ tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ tailscale.com/net/netaddr from tailscale.com/ipn+ tailscale.com/net/netcheck from tailscale.com/wgengine/magicsock+ @@ -333,29 +348,30 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de W 💣 tailscale.com/net/netstat from tailscale.com/portlist tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/netx from tailscale.com/control/controlclient+ - tailscale.com/net/packet from tailscale.com/net/connstats+ + tailscale.com/net/packet from tailscale.com/feature/capture+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ - tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ + tailscale.com/net/portmapper from tailscale.com/feature/portmapper+ + tailscale.com/net/portmapper/portmappertype from tailscale.com/feature/portmapper+ tailscale.com/net/proxymux from tailscale.com/cmd/tailscaled tailscale.com/net/routetable from tailscale.com/doctor/routetable 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock+ tailscale.com/net/socks5 from tailscale.com/cmd/tailscaled tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ - L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/cmd/tailscaled+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + 💣 tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/tstun from tailscale.com/cmd/tailscaled+ tailscale.com/net/udprelay from tailscale.com/feature/relayserver - tailscale.com/net/udprelay/endpoint from tailscale.com/feature/relayserver+ + tailscale.com/net/udprelay/endpoint from tailscale.com/net/udprelay+ + tailscale.com/net/udprelay/status from tailscale.com/client/local+ tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ - 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal - tailscale.com/posture from tailscale.com/ipn/ipnlocal + 💣 tailscale.com/portlist from tailscale.com/feature/portlist + tailscale.com/posture from tailscale.com/feature/posture tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ LD tailscale.com/sessionrecording from tailscale.com/ssh/tailssh @@ -365,16 +381,16 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal LD tailscale.com/tempfork/gliderlabs/ssh from tailscale.com/ssh/tailssh tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock - tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tempfork/httprec from tailscale.com/feature/c2n tailscale.com/tka from tailscale.com/client/local+ tailscale.com/tsconst from tailscale.com/net/netmon+ tailscale.com/tsd from tailscale.com/cmd/tailscaled+ tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ - tailscale.com/tstime/rate from tailscale.com/derp+ + tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/cmd/tailscaled+ - tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/dnstype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/flagtype from tailscale.com/cmd/tailscaled @@ -384,7 +400,8 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/cmd/tailscaled+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext - tailscale.com/types/netlogtype from tailscale.com/net/connstats+ + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ + tailscale.com/types/netlogtype from tailscale.com/wgengine/netlog tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ tailscale.com/types/opt from tailscale.com/control/controlknobs+ @@ -395,12 +412,14 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/tka+ tailscale.com/types/views from tailscale.com/ipn/ipnlocal+ + tailscale.com/util/backoff from tailscale.com/cmd/tailscaled+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/control/controlclient+ tailscale.com/util/cloudenv from tailscale.com/net/dns/resolver+ tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/ipn/ipnlocal+ - 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ + 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics+ tailscale.com/util/dnsname from tailscale.com/appc+ tailscale.com/util/eventbus from tailscale.com/tsd+ @@ -411,9 +430,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/httphdr from tailscale.com/feature/taildrop tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns+ + L tailscale.com/util/linuxfw from tailscale.com/wgengine/router/osrouter tailscale.com/util/mak from tailscale.com/control/controlclient+ - tailscale.com/util/multierr from tailscale.com/cmd/tailscaled+ + tailscale.com/util/multierr from tailscale.com/feature/taildrop tailscale.com/util/must from tailscale.com/clientupdate/distsign+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto 💣 tailscale.com/util/osdiag from tailscale.com/cmd/tailscaled+ @@ -425,9 +444,9 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/racebuild from tailscale.com/logpolicy tailscale.com/util/rands from tailscale.com/ipn/ipnlocal+ tailscale.com/util/ringlog from tailscale.com/wgengine/magicsock - tailscale.com/util/set from tailscale.com/derp+ + tailscale.com/util/set from tailscale.com/control/controlclient+ tailscale.com/util/singleflight from tailscale.com/control/controlclient+ - tailscale.com/util/slicesx from tailscale.com/net/dns/recursive+ + tailscale.com/util/slicesx from tailscale.com/appc+ tailscale.com/util/syspolicy from tailscale.com/feature/syspolicy tailscale.com/util/syspolicy/internal from tailscale.com/util/syspolicy/setting+ tailscale.com/util/syspolicy/internal/loggerx from tailscale.com/util/syspolicy/internal/metrics+ @@ -438,7 +457,6 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/util/syspolicy/rsop from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/setting from tailscale.com/util/syspolicy+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/ipn/ipnlocal+ tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ @@ -460,19 +478,20 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/wgengine/netstack from tailscale.com/cmd/tailscaled tailscale.com/wgengine/netstack/gro from tailscale.com/net/tstun+ tailscale.com/wgengine/router from tailscale.com/cmd/tailscaled+ + tailscale.com/wgengine/router/osrouter from tailscale.com/feature/condregister tailscale.com/wgengine/wgcfg from tailscale.com/ipn/ipnlocal+ tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal 💣 tailscale.com/wgengine/wgint from tailscale.com/wgengine+ tailscale.com/wgengine/wglog from tailscale.com/wgengine - W 💣 tailscale.com/wgengine/winnet from tailscale.com/wgengine/router + W 💣 tailscale.com/wgengine/winnet from tailscale.com/wgengine/router/osrouter golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305+ - golang.org/x/crypto/chacha20poly1305 from crypto/tls+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ + golang.org/x/crypto/cryptobyte from tailscale.com/feature/tpm + golang.org/x/crypto/cryptobyte/asn1 from golang.org/x/crypto/cryptobyte+ golang.org/x/crypto/curve25519 from golang.org/x/crypto/ssh+ golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ @@ -486,20 +505,16 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ golang.org/x/net/bpf from github.com/mdlayher/genetlink+ - golang.org/x/net/dns/dnsmessage from net+ - golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ - golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal - golang.org/x/net/http2/hpack from golang.org/x/net/http2+ + golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal + golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/icmp from tailscale.com/net/ping+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ - golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy - golang.org/x/net/ipv4 from github.com/miekg/dns+ - golang.org/x/net/ipv6 from github.com/miekg/dns+ + golang.org/x/net/ipv4 from github.com/prometheus-community/pro-bing+ + golang.org/x/net/ipv6 from github.com/prometheus-community/pro-bing+ golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from tailscale.com/net/netmon+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ @@ -517,12 +532,28 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna archive/tar from tailscale.com/clientupdate bufio from compress/flate+ bytes from archive/tar+ cmp from slices+ compress/flate from compress/gzip+ - compress/gzip from golang.org/x/net/http2+ + compress/gzip from github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding+ W compress/zlib from debug/pe container/heap from github.com/jellydator/ttlcache/v3+ container/list from crypto/tls+ @@ -606,7 +637,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de encoding/pem from crypto/tls+ encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+ errors from archive/tar+ - expvar from tailscale.com/derp+ + expvar from tailscale.com/cmd/tailscaled+ flag from tailscale.com/cmd/tailscaled+ fmt from archive/tar+ hash from compress/zlib+ @@ -649,7 +680,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ - W internal/saferio from debug/pe + internal/saferio from debug/pe+ internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ diff --git a/cmd/tailscaled/deps_test.go b/cmd/tailscaled/deps_test.go index a672e32e2d63e..64d1beca7cd75 100644 --- a/cmd/tailscaled/deps_test.go +++ b/cmd/tailscaled/deps_test.go @@ -4,9 +4,12 @@ package main import ( + "maps" + "slices" "strings" "testing" + "tailscale.com/feature/featuretags" "tailscale.com/tstest/deptest" ) @@ -15,8 +18,9 @@ func TestOmitSSH(t *testing.T) { deptest.DepChecker{ GOOS: "linux", GOARCH: "amd64", - Tags: "ts_omit_ssh", + Tags: "ts_omit_ssh,ts_include_cli", BadDeps: map[string]string{ + "golang.org/x/crypto/ssh": msg, "tailscale.com/ssh/tailssh": msg, "tailscale.com/sessionrecording": msg, "github.com/anmitsu/go-shlex": msg, @@ -43,6 +47,17 @@ func TestOmitSyspolicy(t *testing.T) { }.Check(t) } +func TestOmitLocalClient(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_webclient,ts_omit_relayserver,ts_omit_oauthkey,ts_omit_acme", + BadDeps: map[string]string{ + "tailscale.com/client/local": "unexpected", + }, + }.Check(t) +} + // Test that we can build a binary without reflect.MethodByName. // See https://github.com/tailscale/tailscale/issues/17063 func TestOmitReflectThings(t *testing.T) { @@ -61,3 +76,218 @@ func TestOmitReflectThings(t *testing.T) { }, }.Check(t) } + +func TestOmitDrive(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_drive,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "driveimpl") { + t.Errorf("unexpected dep with ts_omit_drive: %q", dep) + } + if strings.Contains(dep, "webdav") { + t.Errorf("unexpected dep with ts_omit_drive: %q", dep) + } + }, + }.Check(t) +} + +func TestOmitPortmapper(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_portmapper,ts_include_cli,ts_omit_debugportmapper", + OnDep: func(dep string) { + if dep == "tailscale.com/net/portmapper" { + t.Errorf("unexpected dep with ts_omit_portmapper: %q", dep) + return + } + if strings.Contains(dep, "goupnp") || strings.Contains(dep, "/soap") || + strings.Contains(dep, "internetgateway2") { + t.Errorf("unexpected dep with ts_omit_portmapper: %q", dep) + } + }, + }.Check(t) +} + +func TestOmitACME(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_acme,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "/acme") { + t.Errorf("unexpected dep with ts_omit_acme: %q", dep) + } + }, + }.Check(t) +} + +func TestOmitCaptivePortal(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_captiveportal,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "captive") { + t.Errorf("unexpected dep with ts_omit_captiveportal: %q", dep) + } + }, + }.Check(t) +} + +func TestOmitAuth(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_oauthkey,ts_omit_identityfederation,ts_include_cli", + OnDep: func(dep string) { + if strings.HasPrefix(dep, "golang.org/x/oauth2") { + t.Errorf("unexpected oauth2 dep: %q", dep) + } + }, + }.Check(t) +} + +func TestOmitOutboundProxy(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_outboundproxy,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "socks5") || strings.Contains(dep, "proxymux") { + t.Errorf("unexpected dep with ts_omit_outboundproxy: %q", dep) + } + }, + }.Check(t) +} + +func TestOmitDBus(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_networkmanager,ts_omit_dbus,ts_omit_resolved,ts_omit_systray,ts_omit_ssh,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "dbus") { + t.Errorf("unexpected DBus dep: %q", dep) + } + }, + }.Check(t) +} + +func TestNetstack(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_gro,ts_omit_netstack,ts_omit_outboundproxy,ts_omit_serve,ts_omit_ssh,ts_omit_webclient,ts_omit_tap", + OnDep: func(dep string) { + if strings.Contains(dep, "gvisor") { + t.Errorf("unexpected gvisor dep: %q", dep) + } + }, + }.Check(t) +} + +func TestOmitPortlist(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_portlist,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "portlist") { + t.Errorf("unexpected dep: %q", dep) + } + }, + }.Check(t) +} + +func TestOmitGRO(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_gro,ts_include_cli", + BadDeps: map[string]string{ + "gvisor.dev/gvisor/pkg/tcpip/stack/gro": "unexpected dep with ts_omit_gro", + }, + }.Check(t) +} + +func TestOmitUseProxy(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: "ts_omit_useproxy,ts_include_cli", + OnDep: func(dep string) { + if strings.Contains(dep, "tshttproxy") { + t.Errorf("unexpected dep: %q", dep) + } + }, + }.Check(t) +} + +func minTags() string { + var tags []string + for _, f := range slices.Sorted(maps.Keys(featuretags.Features)) { + if f.IsOmittable() { + tags = append(tags, f.OmitTag()) + } + } + return strings.Join(tags, ",") +} + +func TestMinTailscaledNoCLI(t *testing.T) { + badSubstrs := []string{ + "cbor", + "regexp", + "golang.org/x/net/proxy", + "internal/socks", + "github.com/tailscale/peercred", + "tailscale.com/types/netlogtype", + "deephash", + "util/hashx", + } + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: minTags(), + OnDep: func(dep string) { + for _, bad := range badSubstrs { + if strings.Contains(dep, bad) { + t.Errorf("unexpected dep: %q", dep) + } + } + }, + }.Check(t) +} + +func TestMinTailscaledWithCLI(t *testing.T) { + badSubstrs := []string{ + "cbor", + "hujson", + "pprof", + "multierr", // https://github.com/tailscale/tailscale/pull/17379 + "tailscale.com/metrics", + "tailscale.com/tsweb/varz", + "dirwalk", + "deephash", + "util/hashx", + } + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + Tags: minTags() + ",ts_include_cli", + OnDep: func(dep string) { + for _, bad := range badSubstrs { + if strings.Contains(dep, bad) { + t.Errorf("unexpected dep: %q", dep) + } + } + }, + BadDeps: map[string]string{ + "golang.org/x/net/http2": "unexpected x/net/http2 dep; tailscale/tailscale#17305", + "expvar": "unexpected expvar dep", + "github.com/mdlayher/genetlink": "unexpected genetlink dep", + }, + }.Check(t) +} diff --git a/cmd/tailscaled/flag.go b/cmd/tailscaled/flag.go new file mode 100644 index 0000000000000..f640aceed45d8 --- /dev/null +++ b/cmd/tailscaled/flag.go @@ -0,0 +1,31 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package main + +import "strconv" + +// boolFlag is a flag.Value that tracks whether it was ever set. +type boolFlag struct { + set bool + v bool +} + +func (b *boolFlag) String() string { + if b == nil || !b.set { + return "unset" + } + return strconv.FormatBool(b.v) +} + +func (b *boolFlag) Set(s string) error { + v, err := strconv.ParseBool(s) + if err != nil { + return err + } + b.v = v + b.set = true + return nil +} + +func (b *boolFlag) IsBoolFlag() bool { return true } diff --git a/cmd/tailscaled/install_windows.go b/cmd/tailscaled/install_windows.go index e98a6461ea57b..6013660f5aa20 100644 --- a/cmd/tailscaled/install_windows.go +++ b/cmd/tailscaled/install_windows.go @@ -16,8 +16,8 @@ import ( "golang.org/x/sys/windows/svc" "golang.org/x/sys/windows/svc/mgr" "tailscale.com/cmd/tailscaled/tailscaledhooks" - "tailscale.com/logtail/backoff" "tailscale.com/types/logger" + "tailscale.com/util/backoff" ) func init() { diff --git a/cmd/tailscaled/netstack.go b/cmd/tailscaled/netstack.go new file mode 100644 index 0000000000000..c0b34ed411c78 --- /dev/null +++ b/cmd/tailscaled/netstack.go @@ -0,0 +1,75 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_netstack + +package main + +import ( + "context" + "expvar" + "net" + "net/netip" + + "tailscale.com/tsd" + "tailscale.com/types/logger" + "tailscale.com/wgengine/netstack" +) + +func init() { + hookNewNetstack.Set(newNetstack) +} + +func newNetstack(logf logger.Logf, sys *tsd.System, onlyNetstack bool) (tsd.NetstackImpl, error) { + ns, err := netstack.Create(logf, + sys.Tun.Get(), + sys.Engine.Get(), + sys.MagicSock.Get(), + sys.Dialer.Get(), + sys.DNSManager.Get(), + sys.ProxyMapper(), + ) + if err != nil { + return nil, err + } + // Only register debug info if we have a debug mux + if debugMux != nil { + expvar.Publish("netstack", ns.ExpVar()) + } + + sys.Set(ns) + ns.ProcessLocalIPs = onlyNetstack + ns.ProcessSubnets = onlyNetstack || handleSubnetsInNetstack() + + dialer := sys.Dialer.Get() // must be set by caller already + + if onlyNetstack { + e := sys.Engine.Get() + dialer.UseNetstackForIP = func(ip netip.Addr) bool { + _, ok := e.PeerForIP(ip) + return ok + } + dialer.NetstackDialTCP = func(ctx context.Context, dst netip.AddrPort) (net.Conn, error) { + // Note: don't just return ns.DialContextTCP or we'll return + // *gonet.TCPConn(nil) instead of a nil interface which trips up + // callers. + tcpConn, err := ns.DialContextTCP(ctx, dst) + if err != nil { + return nil, err + } + return tcpConn, nil + } + dialer.NetstackDialUDP = func(ctx context.Context, dst netip.AddrPort) (net.Conn, error) { + // Note: don't just return ns.DialContextUDP or we'll return + // *gonet.UDPConn(nil) instead of a nil interface which trips up + // callers. + udpConn, err := ns.DialContextUDP(ctx, dst) + if err != nil { + return nil, err + } + return udpConn, nil + } + } + + return ns, nil +} diff --git a/cmd/tailscaled/proxy.go b/cmd/tailscaled/proxy.go index a91c62bfa44ac..85c3d91f9de96 100644 --- a/cmd/tailscaled/proxy.go +++ b/cmd/tailscaled/proxy.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build go1.19 +//go:build !ts_omit_outboundproxy // HTTP proxy code @@ -9,13 +9,107 @@ package main import ( "context" + "flag" "io" + "log" "net" "net/http" "net/http/httputil" "strings" + + "tailscale.com/feature" + "tailscale.com/net/proxymux" + "tailscale.com/net/socks5" + "tailscale.com/net/tsdial" + "tailscale.com/types/logger" ) +func init() { + hookRegisterOutboundProxyFlags.Set(registerOutboundProxyFlags) + hookOutboundProxyListen.Set(outboundProxyListen) +} + +func registerOutboundProxyFlags() { + flag.StringVar(&args.socksAddr, "socks5-server", "", `optional [ip]:port to run a SOCK5 server (e.g. "localhost:1080")`) + flag.StringVar(&args.httpProxyAddr, "outbound-http-proxy-listen", "", `optional [ip]:port to run an outbound HTTP proxy (e.g. "localhost:8080")`) +} + +// outboundProxyListen creates listeners for local SOCKS and HTTP proxies, if +// the respective addresses are not empty. args.socksAddr and args.httpProxyAddr +// can be the same, in which case the SOCKS5 Listener will receive connections +// that look like they're speaking SOCKS and httpListener will receive +// everything else. +// +// socksListener and httpListener can be nil, if their respective addrs are +// empty. +// +// The returned func closes over those two (possibly nil) listeners and +// starts the respective servers on the listener when called. +func outboundProxyListen() proxyStartFunc { + socksAddr, httpAddr := args.socksAddr, args.httpProxyAddr + + if socksAddr == httpAddr && socksAddr != "" && !strings.HasSuffix(socksAddr, ":0") { + ln, err := net.Listen("tcp", socksAddr) + if err != nil { + log.Fatalf("proxy listener: %v", err) + } + return mkProxyStartFunc(proxymux.SplitSOCKSAndHTTP(ln)) + } + + var socksListener, httpListener net.Listener + var err error + if socksAddr != "" { + socksListener, err = net.Listen("tcp", socksAddr) + if err != nil { + log.Fatalf("SOCKS5 listener: %v", err) + } + if strings.HasSuffix(socksAddr, ":0") { + // Log kernel-selected port number so integration tests + // can find it portably. + log.Printf("SOCKS5 listening on %v", socksListener.Addr()) + } + } + if httpAddr != "" { + httpListener, err = net.Listen("tcp", httpAddr) + if err != nil { + log.Fatalf("HTTP proxy listener: %v", err) + } + if strings.HasSuffix(httpAddr, ":0") { + // Log kernel-selected port number so integration tests + // can find it portably. + log.Printf("HTTP proxy listening on %v", httpListener.Addr()) + } + } + + return mkProxyStartFunc(socksListener, httpListener) +} + +func mkProxyStartFunc(socksListener, httpListener net.Listener) proxyStartFunc { + return func(logf logger.Logf, dialer *tsdial.Dialer) { + var addrs []string + if httpListener != nil { + hs := &http.Server{Handler: httpProxyHandler(dialer.UserDial)} + go func() { + log.Fatalf("HTTP proxy exited: %v", hs.Serve(httpListener)) + }() + addrs = append(addrs, httpListener.Addr().String()) + } + if socksListener != nil { + ss := &socks5.Server{ + Logf: logger.WithPrefix(logf, "socks5: "), + Dialer: dialer.UserDial, + } + go func() { + log.Fatalf("SOCKS5 server exited: %v", ss.Serve(socksListener)) + }() + addrs = append(addrs, socksListener.Addr().String()) + } + if set, ok := feature.HookProxySetSelfProxy.GetOk(); ok { + set(addrs...) + } + } +} + // httpProxyHandler returns an HTTP proxy http.Handler using the // provided backend dialer. func httpProxyHandler(dialer func(ctx context.Context, netw, addr string) (net.Conn, error)) http.Handler { diff --git a/cmd/tailscaled/tailscaled.go b/cmd/tailscaled/tailscaled.go index ddf6d9ef68f5d..f14cdcff072b1 100644 --- a/cmd/tailscaled/tailscaled.go +++ b/cmd/tailscaled/tailscaled.go @@ -13,14 +13,11 @@ package main // import "tailscale.com/cmd/tailscaled" import ( "context" "errors" - "expvar" "flag" "fmt" "log" "net" "net/http" - "net/http/pprof" - "net/netip" "os" "os/signal" "path/filepath" @@ -30,11 +27,11 @@ import ( "syscall" "time" - "tailscale.com/client/local" "tailscale.com/cmd/tailscaled/childproc" "tailscale.com/control/controlclient" - "tailscale.com/drive/driveimpl" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" "tailscale.com/hostinfo" "tailscale.com/ipn" @@ -48,28 +45,22 @@ import ( "tailscale.com/net/dnsfallback" "tailscale.com/net/netmon" "tailscale.com/net/netns" - "tailscale.com/net/proxymux" - "tailscale.com/net/socks5" "tailscale.com/net/tsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/net/tstun" "tailscale.com/paths" "tailscale.com/safesocket" "tailscale.com/syncs" "tailscale.com/tsd" - "tailscale.com/tsweb/varz" "tailscale.com/types/flagtype" + "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" - "tailscale.com/util/clientmetric" - "tailscale.com/util/multierr" "tailscale.com/util/osshare" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" "tailscale.com/version" "tailscale.com/version/distro" "tailscale.com/wgengine" - "tailscale.com/wgengine/netstack" "tailscale.com/wgengine/router" ) @@ -89,13 +80,11 @@ func defaultTunName() string { case "aix", "solaris", "illumos": return "userspace-networking" case "linux": - switch distro.Get() { - case distro.Synology: + if buildfeatures.HasSynology && buildfeatures.HasNetstack && distro.Get() == distro.Synology { // Try TUN, but fall back to userspace networking if needed. // See https://github.com/tailscale/tailscale-synology/issues/35 return "tailscale0,userspace-networking" } - } return "tailscale0" } @@ -123,19 +112,20 @@ var args struct { // or comma-separated list thereof. tunname string - cleanUp bool - confFile string // empty, file path, or "vm:user-data" - debug string - port uint16 - statepath string - encryptState bool - statedir string - socketpath string - birdSocketPath string - verbose int - socksAddr string // listen address for SOCKS5 server - httpProxyAddr string // listen address for HTTP proxy server - disableLogs bool + cleanUp bool + confFile string // empty, file path, or "vm:user-data" + debug string + port uint16 + statepath string + encryptState boolFlag + statedir string + socketpath string + birdSocketPath string + verbose int + socksAddr string // listen address for SOCKS5 server + httpProxyAddr string // listen address for HTTP proxy server + disableLogs bool + hardwareAttestation boolFlag } var ( @@ -151,9 +141,7 @@ var ( var subCommands = map[string]*func([]string) error{ "install-system-daemon": &installSystemDaemon, "uninstall-system-daemon": &uninstallSystemDaemon, - "debug": &debugModeFunc, "be-child": &beChildFunc, - "serve-taildrive": &serveDriveFunc, } var beCLI func() // non-nil if CLI is linked in with the "ts_include_cli" build tag @@ -177,6 +165,17 @@ func shouldRunCLI() bool { return false } +// Outbound Proxy hooks +var ( + hookRegisterOutboundProxyFlags feature.Hook[func()] + hookOutboundProxyListen feature.Hook[func() proxyStartFunc] +) + +// proxyStartFunc is the type of the function returned by +// outboundProxyListen, to start the servers on the Listeners +// started by hookOutboundProxyListen. +type proxyStartFunc = func(logf logger.Logf, dialer *tsdial.Dialer) + func main() { envknob.PanicIfAnyEnvCheckedInInit() if shouldRunCLI() { @@ -190,19 +189,29 @@ func main() { printVersion := false flag.IntVar(&args.verbose, "verbose", defaultVerbosity(), "log verbosity level; 0 is default, 1 or higher are increasingly verbose") flag.BoolVar(&args.cleanUp, "cleanup", false, "clean up system state and exit") - flag.StringVar(&args.debug, "debug", "", "listen address ([ip]:port) of optional debug server") - flag.StringVar(&args.socksAddr, "socks5-server", "", `optional [ip]:port to run a SOCK5 server (e.g. "localhost:1080")`) - flag.StringVar(&args.httpProxyAddr, "outbound-http-proxy-listen", "", `optional [ip]:port to run an outbound HTTP proxy (e.g. "localhost:8080")`) + if buildfeatures.HasDebug { + flag.StringVar(&args.debug, "debug", "", "listen address ([ip]:port) of optional debug server") + } flag.StringVar(&args.tunname, "tun", defaultTunName(), `tunnel interface name; use "userspace-networking" (beta) to not use TUN`) flag.Var(flagtype.PortValue(&args.port, defaultPort()), "port", "UDP port to listen on for WireGuard and peer-to-peer traffic; 0 means automatically select") flag.StringVar(&args.statepath, "state", "", "absolute path of state file; use 'kube:' to use Kubernetes secrets or 'arn:aws:ssm:...' to store in AWS SSM; use 'mem:' to not store state and register as an ephemeral node. If empty and --statedir is provided, the default is /tailscaled.state. Default: "+paths.DefaultTailscaledStateFile()) - flag.BoolVar(&args.encryptState, "encrypt-state", defaultEncryptState(), "encrypt the state file on disk; uses TPM on Linux and Windows, on all other platforms this flag is not supported") + if buildfeatures.HasTPM { + flag.Var(&args.encryptState, "encrypt-state", `encrypt the state file on disk; when not set encryption will be enabled if supported on this platform; uses TPM on Linux and Windows, on all other platforms this flag is not supported`) + } flag.StringVar(&args.statedir, "statedir", "", "path to directory for storage of config state, TLS certs, temporary incoming Taildrop files, etc. If empty, it's derived from --state when possible.") flag.StringVar(&args.socketpath, "socket", paths.DefaultTailscaledSocket(), "path of the service unix socket") - flag.StringVar(&args.birdSocketPath, "bird-socket", "", "path of the bird unix socket") + if buildfeatures.HasBird { + flag.StringVar(&args.birdSocketPath, "bird-socket", "", "path of the bird unix socket") + } flag.BoolVar(&printVersion, "version", false, "print version information and exit") flag.BoolVar(&args.disableLogs, "no-logs-no-support", false, "disable log uploads; this also disables any technical support") flag.StringVar(&args.confFile, "config", "", "path to config file, or 'vm:user-data' to use the VM's user-data (EC2)") + if buildfeatures.HasTPM { + flag.Var(&args.hardwareAttestation, "hardware-attestation", "use hardware-backed keys to bind node identity to this device when supported by the OS and hardware. Uses TPM 2.0 on Linux and Windows; SecureEnclave on macOS and iOS; and Keystore on Android") + } + if f, ok := hookRegisterOutboundProxyFlags.GetOk(); ok { + f() + } if runtime.GOOS == "plan9" && os.Getenv("_NETSHELL_CHILD_") != "" { os.Args = []string{"tailscaled", "be-child", "plan9-netshell"} @@ -250,7 +259,7 @@ func main() { log.Fatalf("--socket is required") } - if args.birdSocketPath != "" && createBIRDClient == nil { + if buildfeatures.HasBird && args.birdSocketPath != "" && createBIRDClient == nil { log.SetFlags(0) log.Fatalf("--bird-socket is not supported on %s", runtime.GOOS) } @@ -271,26 +280,8 @@ func main() { } } - if args.encryptState { - if runtime.GOOS != "linux" && runtime.GOOS != "windows" { - log.SetFlags(0) - log.Fatalf("--encrypt-state is not supported on %s", runtime.GOOS) - } - // Check if we have TPM support in this build. - if !store.HasKnownProviderPrefix(store.TPMPrefix + "/") { - log.SetFlags(0) - log.Fatal("--encrypt-state is not supported in this build of tailscaled") - } - // Check if we have TPM access. - if !hostinfo.New().TPM.Present() { - log.SetFlags(0) - log.Fatal("--encrypt-state is not supported on this device or a TPM is not accessible") - } - // Check for conflicting prefix in --state, like arn: or kube:. - if args.statepath != "" && store.HasKnownProviderPrefix(args.statepath) { - log.SetFlags(0) - log.Fatal("--encrypt-state can only be used with --state set to a local file path") - } + if buildfeatures.HasTPM { + handleTPMFlags() } if args.disableLogs { @@ -303,8 +294,10 @@ func main() { err := run() - // Remove file sharing from Windows shell (noop in non-windows) - osshare.SetFileSharingEnabled(false, logger.Discard) + if buildfeatures.HasTaildrop { + // Remove file sharing from Windows shell (noop in non-windows) + osshare.SetFileSharingEnabled(false, logger.Discard) + } if err != nil { log.Fatal(err) @@ -347,7 +340,7 @@ func statePathOrDefault() string { if path == "" && args.statedir != "" { path = filepath.Join(args.statedir, "tailscaled.state") } - if path != "" && !store.HasKnownProviderPrefix(path) && args.encryptState { + if path != "" && !store.HasKnownProviderPrefix(path) && args.encryptState.v { path = store.TPMPrefix + path } return path @@ -397,7 +390,7 @@ func ipnServerOpts() (o serverOptions) { return o } -var logPol *logpolicy.Policy +var logPol *logpolicy.Policy // or nil if not used var debugMux *http.ServeMux func run() (err error) { @@ -427,15 +420,25 @@ func run() (err error) { sys.Set(netMon) } - pol := logpolicy.New(logtail.CollectionNode, netMon, sys.HealthTracker(), nil /* use log.Printf */) - pol.SetVerbosityLevel(args.verbose) - logPol = pol - defer func() { - // Finish uploading logs after closing everything else. - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - pol.Shutdown(ctx) - }() + var publicLogID logid.PublicID + if buildfeatures.HasLogTail { + + pol := logpolicy.Options{ + Collection: logtail.CollectionNode, + NetMon: netMon, + Health: sys.HealthTracker.Get(), + Bus: sys.Bus.Get(), + }.New() + pol.SetVerbosityLevel(args.verbose) + publicLogID = pol.PublicID + logPol = pol + defer func() { + // Finish uploading logs after closing everything else. + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + pol.Shutdown(ctx) + }() + } if err := envknob.ApplyDiskConfigError(); err != nil { log.Printf("Error reading environment config: %v", err) @@ -444,7 +447,7 @@ func run() (err error) { if isWinSvc { // Run the IPN server from the Windows service manager. log.Printf("Running service...") - if err := runWindowsService(pol); err != nil { + if err := runWindowsService(logPol); err != nil { log.Printf("runservice: %v", err) } log.Printf("Service ended.") @@ -462,7 +465,7 @@ func run() (err error) { // Always clean up, even if we're going to run the server. This covers cases // such as when a system was rebooted without shutting down, or tailscaled // crashed, and would for example restore system DNS configuration. - dns.CleanUp(logf, netMon, sys.HealthTracker(), args.tunname) + dns.CleanUp(logf, netMon, sys.Bus.Get(), sys.HealthTracker.Get(), args.tunname) router.CleanUp(logf, netMon, args.tunname) // If the cleanUp flag was passed, then exit. if args.cleanUp { @@ -476,21 +479,29 @@ func run() (err error) { log.Printf("error in synology migration: %v", err) } - if args.debug != "" { - debugMux = newDebugMux() + if buildfeatures.HasDebug && args.debug != "" { + debugMux = hookNewDebugMux.Get()() } - sys.Set(driveimpl.NewFileSystemForRemote(logf)) + if f, ok := hookSetSysDrive.GetOk(); ok { + f(sys, logf) + } if app := envknob.App(); app != "" { hostinfo.SetApp(app) } - return startIPNServer(context.Background(), logf, pol.PublicID, sys) + return startIPNServer(context.Background(), logf, publicLogID, sys) } +var ( + hookSetSysDrive feature.Hook[func(*tsd.System, logger.Logf)] + hookSetWgEnginConfigDrive feature.Hook[func(*wgengine.Config, logger.Logf)] +) + var sigPipe os.Signal // set by sigpipe.go +// logID may be the zero value if logging is not in use. func startIPNServer(ctx context.Context, logf logger.Logf, logID logid.PublicID, sys *tsd.System) error { ln, err := safesocket.Listen(args.socketpath) if err != nil { @@ -532,8 +543,8 @@ func startIPNServer(ctx context.Context, logf logger.Logf, logID logid.PublicID, } }() - srv := ipnserver.New(logf, logID, sys.NetMon.Get()) - if debugMux != nil { + srv := ipnserver.New(logf, logID, sys.Bus.Get(), sys.NetMon.Get()) + if buildfeatures.HasDebug && debugMux != nil { debugMux.HandleFunc("/debug/ipn", srv.ServeHTMLStatus) } var lbErr syncs.AtomicValue[error] @@ -584,82 +595,49 @@ func startIPNServer(ctx context.Context, logf logger.Logf, logID logid.PublicID, return nil } +var ( + hookNewNetstack feature.Hook[func(_ logger.Logf, _ *tsd.System, onlyNetstack bool) (tsd.NetstackImpl, error)] +) + +// logID may be the zero value if logging is not in use. func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID, sys *tsd.System) (_ *ipnlocal.LocalBackend, retErr error) { if logPol != nil { logPol.Logtail.SetNetMon(sys.NetMon.Get()) } - socksListener, httpProxyListener := mustStartProxyListeners(args.socksAddr, args.httpProxyAddr) + var startProxy proxyStartFunc + if listen, ok := hookOutboundProxyListen.GetOk(); ok { + startProxy = listen() + } dialer := &tsdial.Dialer{Logf: logf} // mutated below (before used) + dialer.SetBus(sys.Bus.Get()) sys.Set(dialer) onlyNetstack, err := createEngine(logf, sys) if err != nil { return nil, fmt.Errorf("createEngine: %w", err) } - if debugMux != nil { + if onlyNetstack && !buildfeatures.HasNetstack { + return nil, errors.New("userspace-networking support is not compiled in to this binary") + } + if buildfeatures.HasDebug && debugMux != nil { if ms, ok := sys.MagicSock.GetOK(); ok { debugMux.HandleFunc("/debug/magicsock", ms.ServeHTTPDebug) } go runDebugServer(logf, debugMux, args.debug) } - ns, err := newNetstack(logf, sys) - if err != nil { - return nil, fmt.Errorf("newNetstack: %w", err) + var ns tsd.NetstackImpl // or nil if not linked in + if newNetstack, ok := hookNewNetstack.GetOk(); ok { + ns, err = newNetstack(logf, sys, onlyNetstack) + if err != nil { + return nil, fmt.Errorf("newNetstack: %w", err) + } } - sys.Set(ns) - ns.ProcessLocalIPs = onlyNetstack - ns.ProcessSubnets = onlyNetstack || handleSubnetsInNetstack() - if onlyNetstack { - e := sys.Engine.Get() - dialer.UseNetstackForIP = func(ip netip.Addr) bool { - _, ok := e.PeerForIP(ip) - return ok - } - dialer.NetstackDialTCP = func(ctx context.Context, dst netip.AddrPort) (net.Conn, error) { - // Note: don't just return ns.DialContextTCP or we'll return - // *gonet.TCPConn(nil) instead of a nil interface which trips up - // callers. - tcpConn, err := ns.DialContextTCP(ctx, dst) - if err != nil { - return nil, err - } - return tcpConn, nil - } - dialer.NetstackDialUDP = func(ctx context.Context, dst netip.AddrPort) (net.Conn, error) { - // Note: don't just return ns.DialContextUDP or we'll return - // *gonet.UDPConn(nil) instead of a nil interface which trips up - // callers. - udpConn, err := ns.DialContextUDP(ctx, dst) - if err != nil { - return nil, err - } - return udpConn, nil - } - } - if socksListener != nil || httpProxyListener != nil { - var addrs []string - if httpProxyListener != nil { - hs := &http.Server{Handler: httpProxyHandler(dialer.UserDial)} - go func() { - log.Fatalf("HTTP proxy exited: %v", hs.Serve(httpProxyListener)) - }() - addrs = append(addrs, httpProxyListener.Addr().String()) - } - if socksListener != nil { - ss := &socks5.Server{ - Logf: logger.WithPrefix(logf, "socks5: "), - Dialer: dialer.UserDial, - } - go func() { - log.Fatalf("SOCKS5 server exited: %v", ss.Serve(socksListener)) - }() - addrs = append(addrs, socksListener.Addr().String()) - } - tshttpproxy.SetSelfProxy(addrs...) + if startProxy != nil { + go startProxy(logf, dialer) } opts := ipnServerOpts() @@ -685,16 +663,23 @@ func getLocalBackend(ctx context.Context, logf logger.Logf, logID logid.PublicID if root := lb.TailscaleVarRoot(); root != "" { dnsfallback.SetCachePath(filepath.Join(root, "derpmap.cached.json"), logf) } - lb.ConfigureWebClient(&local.Client{ - Socket: args.socketpath, - UseSocketOnly: args.socketpath != paths.DefaultTailscaledSocket(), - }) - if err := ns.Start(lb); err != nil { - log.Fatalf("failed to start netstack: %v", err) + if f, ok := hookConfigureWebClient.GetOk(); ok { + f(lb) + } + + if ns != nil { + if err := ns.Start(lb); err != nil { + log.Fatalf("failed to start netstack: %v", err) + } + } + if buildfeatures.HasTPM && args.hardwareAttestation.v { + lb.SetHardwareAttested() } return lb, nil } +var hookConfigureWebClient feature.Hook[func(*ipnlocal.LocalBackend)] + // createEngine tries to the wgengine.Engine based on the order of tunnels // specified in the command line flags. // @@ -714,7 +699,7 @@ func createEngine(logf logger.Logf, sys *tsd.System) (onlyNetstack bool, err err logf("wgengine.NewUserspaceEngine(tun %q) error: %v", name, err) errs = append(errs, err) } - return false, multierr.New(errs...) + return false, errors.Join(errs...) } // handleSubnetsInNetstack reports whether netstack should handle subnet routers @@ -743,16 +728,18 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo conf := wgengine.Config{ ListenPort: args.port, NetMon: sys.NetMon.Get(), - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), Dialer: sys.Dialer.Get(), SetSubsystem: sys.Set, ControlKnobs: sys.ControlKnobs(), EventBus: sys.Bus.Get(), - DriveForLocal: driveimpl.NewFileSystemForLocal(logf), + } + if f, ok := hookSetWgEnginConfigDrive.GetOk(); ok { + f(&conf, logf) } - sys.HealthTracker().SetMetricsRegistry(sys.UserMetricsRegistry()) + sys.HealthTracker.Get().SetMetricsRegistry(sys.UserMetricsRegistry()) onlyNetstack = name == "userspace-networking" netstackSubnetRouter := onlyNetstack // but mutated later on some platforms @@ -773,7 +760,7 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo // configuration being unavailable (from the noop // manager). More in Issue 4017. // TODO(bradfitz): add a Synology-specific DNS manager. - conf.DNS, err = dns.NewOSConfigurator(logf, sys.HealthTracker(), sys.PolicyClientOrDefault(), sys.ControlKnobs(), "") // empty interface name + conf.DNS, err = dns.NewOSConfigurator(logf, sys.HealthTracker.Get(), sys.PolicyClientOrDefault(), sys.ControlKnobs(), "") // empty interface name if err != nil { return false, fmt.Errorf("dns.NewOSConfigurator: %w", err) } @@ -801,13 +788,13 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo sys.NetMon.Get().SetTailscaleInterfaceName(devName) } - r, err := router.New(logf, dev, sys.NetMon.Get(), sys.HealthTracker(), sys.Bus.Get()) + r, err := router.New(logf, dev, sys.NetMon.Get(), sys.HealthTracker.Get(), sys.Bus.Get()) if err != nil { dev.Close() return false, fmt.Errorf("creating router: %w", err) } - d, err := dns.NewOSConfigurator(logf, sys.HealthTracker(), sys.PolicyClientOrDefault(), sys.ControlKnobs(), devName) + d, err := dns.NewOSConfigurator(logf, sys.HealthTracker.Get(), sys.PolicyClientOrDefault(), sys.ControlKnobs(), devName) if err != nil { dev.Close() r.Close() @@ -831,24 +818,12 @@ func tryEngine(logf logger.Logf, sys *tsd.System, name string) (onlyNetstack boo return onlyNetstack, nil } -func newDebugMux() *http.ServeMux { - mux := http.NewServeMux() - mux.HandleFunc("/debug/metrics", servePrometheusMetrics) - mux.HandleFunc("/debug/pprof/", pprof.Index) - mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) - mux.HandleFunc("/debug/pprof/profile", pprof.Profile) - mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) - mux.HandleFunc("/debug/pprof/trace", pprof.Trace) - return mux -} - -func servePrometheusMetrics(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain") - varz.Handler(w, r) - clientmetric.WritePrometheusExpositionFormat(w) -} +var hookNewDebugMux feature.Hook[func() *http.ServeMux] func runDebugServer(logf logger.Logf, mux *http.ServeMux, addr string) { + if !buildfeatures.HasDebug { + return + } ln, err := net.Listen("tcp", addr) if err != nil { log.Fatalf("debug server: %v", err) @@ -866,69 +841,6 @@ func runDebugServer(logf logger.Logf, mux *http.ServeMux, addr string) { } } -func newNetstack(logf logger.Logf, sys *tsd.System) (*netstack.Impl, error) { - ret, err := netstack.Create(logf, - sys.Tun.Get(), - sys.Engine.Get(), - sys.MagicSock.Get(), - sys.Dialer.Get(), - sys.DNSManager.Get(), - sys.ProxyMapper(), - ) - if err != nil { - return nil, err - } - // Only register debug info if we have a debug mux - if debugMux != nil { - expvar.Publish("netstack", ret.ExpVar()) - } - return ret, nil -} - -// mustStartProxyListeners creates listeners for local SOCKS and HTTP -// proxies, if the respective addresses are not empty. socksAddr and -// httpAddr can be the same, in which case socksListener will receive -// connections that look like they're speaking SOCKS and httpListener -// will receive everything else. -// -// socksListener and httpListener can be nil, if their respective -// addrs are empty. -func mustStartProxyListeners(socksAddr, httpAddr string) (socksListener, httpListener net.Listener) { - if socksAddr == httpAddr && socksAddr != "" && !strings.HasSuffix(socksAddr, ":0") { - ln, err := net.Listen("tcp", socksAddr) - if err != nil { - log.Fatalf("proxy listener: %v", err) - } - return proxymux.SplitSOCKSAndHTTP(ln) - } - - var err error - if socksAddr != "" { - socksListener, err = net.Listen("tcp", socksAddr) - if err != nil { - log.Fatalf("SOCKS5 listener: %v", err) - } - if strings.HasSuffix(socksAddr, ":0") { - // Log kernel-selected port number so integration tests - // can find it portably. - log.Printf("SOCKS5 listening on %v", socksListener.Addr()) - } - } - if httpAddr != "" { - httpListener, err = net.Listen("tcp", httpAddr) - if err != nil { - log.Fatalf("HTTP proxy listener: %v", err) - } - if strings.HasSuffix(httpAddr, ":0") { - // Log kernel-selected port number so integration tests - // can find it portably. - log.Printf("HTTP proxy listening on %v", httpListener.Addr()) - } - } - - return socksListener, httpListener -} - var beChildFunc = beChild func beChild(args []string) error { @@ -943,35 +855,6 @@ func beChild(args []string) error { return f(args[1:]) } -var serveDriveFunc = serveDrive - -// serveDrive serves one or more Taildrives on localhost using the WebDAV -// protocol. On UNIX and MacOS tailscaled environment, Taildrive spawns child -// tailscaled processes in serve-taildrive mode in order to access the fliesystem -// as specific (usually unprivileged) users. -// -// serveDrive prints the address on which it's listening to stdout so that the -// parent process knows where to connect to. -func serveDrive(args []string) error { - if len(args) == 0 { - return errors.New("missing shares") - } - if len(args)%2 != 0 { - return errors.New("need pairs") - } - s, err := driveimpl.NewFileServer() - if err != nil { - return fmt.Errorf("unable to start Taildrive file server: %v", err) - } - shares := make(map[string]string) - for i := 0; i < len(args); i += 2 { - shares[args[i]] = args[i+1] - } - s.SetShares(shares) - fmt.Printf("%v\n", s.Addr()) - return s.Serve() -} - // dieOnPipeReadErrorOfFD reads from the pipe named by fd and exit the process // when the pipe becomes readable. We use this in tests as a somewhat more // portable mechanism for the Linux PR_SET_PDEATHSIG, which we wish existed on @@ -1004,14 +887,64 @@ func applyIntegrationTestEnvKnob() { } } -func defaultEncryptState() bool { +// handleTPMFlags validates the --encrypt-state and --hardware-attestation flags +// if set, and defaults both to on if supported and compatible with other +// settings. +func handleTPMFlags() { + switch { + case args.hardwareAttestation.v: + if _, err := key.NewEmptyHardwareAttestationKey(); err == key.ErrUnsupported { + log.SetFlags(0) + log.Fatalf("--hardware-attestation is not supported on this platform or in this build of tailscaled") + } + case !args.hardwareAttestation.set: + policyHWAttestation, _ := policyclient.Get().GetBoolean(pkey.HardwareAttestation, feature.HardwareAttestationAvailable()) + if !policyHWAttestation { + break + } + if feature.TPMAvailable() { + args.hardwareAttestation.v = true + } + } + + switch { + case args.encryptState.v: + // Explicitly enabled, validate. + if err := canEncryptState(); err != nil { + log.SetFlags(0) + log.Fatal(err) + } + case !args.encryptState.set: + policyEncrypt, _ := policyclient.Get().GetBoolean(pkey.EncryptState, feature.TPMAvailable()) + if !policyEncrypt { + // Default disabled, no need to validate. + return + } + // Default enabled if available. + if err := canEncryptState(); err == nil { + args.encryptState.v = true + } + } +} + +// canEncryptState returns an error if state encryption can't be enabled, +// either due to availability or compatibility with other settings. +func canEncryptState() error { if runtime.GOOS != "windows" && runtime.GOOS != "linux" { // TPM encryption is only configurable on Windows and Linux. Other // platforms either use system APIs and are not configurable // (Android/Apple), or don't support any form of encryption yet // (plan9/FreeBSD/etc). - return false + return fmt.Errorf("--encrypt-state is not supported on %s", runtime.GOOS) + } + // Check if we have TPM access. + if !feature.TPMAvailable() { + return errors.New("--encrypt-state is not supported on this device or a TPM is not accessible") } - v, _ := policyclient.Get().GetBoolean(pkey.EncryptState, false) - return v + // Check for conflicting prefix in --state, like arn: or kube:. + if args.statepath != "" && store.HasKnownProviderPrefix(args.statepath) { + return errors.New("--encrypt-state can only be used with --state set to a local file path") + } + + return nil } diff --git a/cmd/tailscaled/tailscaled_drive.go b/cmd/tailscaled/tailscaled_drive.go new file mode 100644 index 0000000000000..49f35a3811404 --- /dev/null +++ b/cmd/tailscaled/tailscaled_drive.go @@ -0,0 +1,56 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_drive + +package main + +import ( + "errors" + "fmt" + + "tailscale.com/drive/driveimpl" + "tailscale.com/tsd" + "tailscale.com/types/logger" + "tailscale.com/wgengine" +) + +func init() { + subCommands["serve-taildrive"] = &serveDriveFunc + + hookSetSysDrive.Set(func(sys *tsd.System, logf logger.Logf) { + sys.Set(driveimpl.NewFileSystemForRemote(logf)) + }) + hookSetWgEnginConfigDrive.Set(func(conf *wgengine.Config, logf logger.Logf) { + conf.DriveForLocal = driveimpl.NewFileSystemForLocal(logf) + }) +} + +var serveDriveFunc = serveDrive + +// serveDrive serves one or more Taildrives on localhost using the WebDAV +// protocol. On UNIX and MacOS tailscaled environment, Taildrive spawns child +// tailscaled processes in serve-taildrive mode in order to access the fliesystem +// as specific (usually unprivileged) users. +// +// serveDrive prints the address on which it's listening to stdout so that the +// parent process knows where to connect to. +func serveDrive(args []string) error { + if len(args) == 0 { + return errors.New("missing shares") + } + if len(args)%2 != 0 { + return errors.New("need pairs") + } + s, err := driveimpl.NewFileServer() + if err != nil { + return fmt.Errorf("unable to start Taildrive file server: %v", err) + } + shares := make(map[string]string) + for i := 0; i < len(args); i += 2 { + shares[args[i]] = args[i+1] + } + s.SetShares(shares) + fmt.Printf("%v\n", s.Addr()) + return s.Serve() +} diff --git a/cmd/tailscaled/tailscaled_windows.go b/cmd/tailscaled/tailscaled_windows.go index 3a2edcac51886..3019bbaf9695b 100644 --- a/cmd/tailscaled/tailscaled_windows.go +++ b/cmd/tailscaled/tailscaled_windows.go @@ -47,13 +47,13 @@ import ( _ "tailscale.com/ipn/auditlog" _ "tailscale.com/ipn/desktop" "tailscale.com/logpolicy" - "tailscale.com/logtail/backoff" "tailscale.com/net/dns" "tailscale.com/net/netmon" "tailscale.com/net/tstun" "tailscale.com/tsd" "tailscale.com/types/logger" "tailscale.com/types/logid" + "tailscale.com/util/backoff" "tailscale.com/util/osdiag" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" @@ -149,6 +149,8 @@ var syslogf logger.Logf = logger.Discard // // At this point we're still the parent process that // Windows started. +// +// pol may be nil. func runWindowsService(pol *logpolicy.Policy) error { go func() { logger.Logf(log.Printf).JSON(1, "SupportInfo", osdiag.SupportInfo(osdiag.LogSupportInfoReasonStartup)) @@ -169,7 +171,7 @@ func runWindowsService(pol *logpolicy.Policy) error { } type ipnService struct { - Policy *logpolicy.Policy + Policy *logpolicy.Policy // or nil if logging not in use } // Called by Windows to execute the windows service. @@ -186,7 +188,11 @@ func (service *ipnService) Execute(args []string, r <-chan svc.ChangeRequest, ch doneCh := make(chan struct{}) go func() { defer close(doneCh) - args := []string{"/subproc", service.Policy.PublicID.String()} + publicID := "none" + if service.Policy != nil { + publicID = service.Policy.PublicID.String() + } + args := []string{"/subproc", publicID} // Make a logger without a date prefix, as filelogger // and logtail both already add their own. All we really want // from the log package is the automatic newline. diff --git a/cmd/tailscaled/webclient.go b/cmd/tailscaled/webclient.go new file mode 100644 index 0000000000000..672ba7126d2a7 --- /dev/null +++ b/cmd/tailscaled/webclient.go @@ -0,0 +1,21 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_webclient + +package main + +import ( + "tailscale.com/client/local" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/paths" +) + +func init() { + hookConfigureWebClient.Set(func(lb *ipnlocal.LocalBackend) { + lb.ConfigureWebClient(&local.Client{ + Socket: args.socketpath, + UseSocketOnly: args.socketpath != paths.DefaultTailscaledSocket(), + }) + }) +} diff --git a/cmd/testwrapper/flakytest/flakytest.go b/cmd/testwrapper/flakytest/flakytest.go index 6302900cbd3ab..856cb28ef275a 100644 --- a/cmd/testwrapper/flakytest/flakytest.go +++ b/cmd/testwrapper/flakytest/flakytest.go @@ -27,7 +27,7 @@ const FlakyTestLogMessage = "flakytest: this is a known flaky test" // starting at 1. const FlakeAttemptEnv = "TS_TESTWRAPPER_ATTEMPT" -var issueRegexp = regexp.MustCompile(`\Ahttps://github\.com/tailscale/[a-zA-Z0-9_.-]+/issues/\d+\z`) +var issueRegexp = regexp.MustCompile(`\Ahttps://github\.com/[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+/issues/\d+\z`) var ( rootFlakesMu sync.Mutex @@ -49,6 +49,15 @@ func Mark(t testing.TB, issue string) { // spamming people running tests without the wrapper) fmt.Fprintf(os.Stderr, "%s: %s\n", FlakyTestLogMessage, issue) } + t.Attr("flaky-test-issue-url", issue) + + // The Attr method above also emits human-readable output, so this t.Logf + // is somewhat redundant, but we keep it for compatibility with + // old test runs, so cmd/testwrapper doesn't need to be modified. + // TODO(bradfitz): switch testwrapper to look for Action "attr" + // instead: + // "Action":"attr","Package":"tailscale.com/cmd/testwrapper/flakytest","Test":"TestMarked_Root","Key":"flaky-test-issue-url","Value":"https://github.com/tailscale/tailscale/issues/0"} + // And then remove this Logf a month or so after that. t.Logf("flakytest: issue tracking this flaky test: %s", issue) // Record the root test name as flakey. diff --git a/cmd/testwrapper/flakytest/flakytest_test.go b/cmd/testwrapper/flakytest/flakytest_test.go index 64cbfd9a3cd1f..9b744de13d446 100644 --- a/cmd/testwrapper/flakytest/flakytest_test.go +++ b/cmd/testwrapper/flakytest/flakytest_test.go @@ -14,7 +14,8 @@ func TestIssueFormat(t *testing.T) { want bool }{ {"https://github.com/tailscale/cOrp/issues/1234", true}, - {"https://github.com/otherproject/corp/issues/1234", false}, + {"https://github.com/otherproject/corp/issues/1234", true}, + {"https://not.huyb/tailscale/corp/issues/1234", false}, {"https://github.com/tailscale/corp/issues/", false}, } for _, testCase := range testCases { diff --git a/cmd/tsconnect/wasm/wasm_js.go b/cmd/tsconnect/wasm/wasm_js.go index 87f8148668be3..2e81fa4a8a2e7 100644 --- a/cmd/tsconnect/wasm/wasm_js.go +++ b/cmd/tsconnect/wasm/wasm_js.go @@ -104,11 +104,12 @@ func newIPN(jsConfig js.Value) map[string]any { sys := tsd.NewSystem() sys.Set(store) dialer := &tsdial.Dialer{Logf: logf} + dialer.SetBus(sys.Bus.Get()) eng, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{ Dialer: dialer, SetSubsystem: sys.Set, ControlKnobs: sys.ControlKnobs(), - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), EventBus: sys.Bus.Get(), }) @@ -138,7 +139,7 @@ func newIPN(jsConfig js.Value) map[string]any { sys.Tun.Get().Start() logid := lpc.PublicID - srv := ipnserver.New(logf, logid, sys.NetMon.Get()) + srv := ipnserver.New(logf, logid, sys.Bus.Get(), sys.NetMon.Get()) lb, err := ipnlocal.NewLocalBackend(logf, logid, sys, controlclient.LoginEphemeral) if err != nil { log.Fatalf("ipnlocal.NewLocalBackend: %v", err) @@ -463,7 +464,6 @@ func (s *jsSSHSession) Run() { cols = s.pendingResizeCols } err = session.RequestPty("xterm", rows, cols, ssh.TerminalModes{}) - if err != nil { writeError("Pseudo Terminal", err) return diff --git a/cmd/tsidp/README.md b/cmd/tsidp/README.md index ffc296b87862a..1635feabf22f8 100644 --- a/cmd/tsidp/README.md +++ b/cmd/tsidp/README.md @@ -1,3 +1,6 @@ +> [!CAUTION] +> Development of tsidp has been moved to [https://github.com/tailscale/tsidp](https://github.com/tailscale/tsidp) and it is no longer maintained here. Please visit the new repository to see the latest updates, file an issue, or contribute. + # `tsidp` - Tailscale OpenID Connect (OIDC) Identity Provider [![status: community project](https://img.shields.io/badge/status-community_project-blue)](https://tailscale.com/kb/1531/community-projects) diff --git a/cmd/tsidp/depaware.txt b/cmd/tsidp/depaware.txt index cfe44d1dc1934..a2a473a5068ec 100644 --- a/cmd/tsidp/depaware.txt +++ b/cmd/tsidp/depaware.txt @@ -5,92 +5,15 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy - L github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ - L github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ - L github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 - L github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ - L github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/config from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds - L github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds - L github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ - L github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints+ - L github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ - L github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds - L github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 - L github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws - L github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/ssm from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm - L github.com/aws/aws-sdk-go-v2/service/ssm/types from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ - L github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ - L github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ - L github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ - L github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ - L github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ - L github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http - L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm github.com/coder/websocket from tailscale.com/util/eventbus github.com/coder/websocket/internal/errd from github.com/coder/websocket github.com/coder/websocket/internal/util from github.com/coder/websocket github.com/coder/websocket/internal/xsync from github.com/coder/websocket - L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw - W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ + W 💣 github.com/dblohm7/wingoes from tailscale.com/net/tshttpproxy+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc W github.com/dblohm7/wingoes/internal from github.com/dblohm7/wingoes/com W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/osdiag+ - LW 💣 github.com/digitalocean/go-smbios/smbios from tailscale.com/posture github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/gaissmai/bart from tailscale.com/net/ipset+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ @@ -101,22 +24,11 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ - W 💣 github.com/go-ole/go-ole from github.com/go-ole/go-ole/oleutil+ - W 💣 github.com/go-ole/go-ole/oleutil from tailscale.com/wgengine/winnet L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns github.com/golang/groupcache/lru from tailscale.com/net/dnscache github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ - L github.com/google/nftables from tailscale.com/util/linuxfw - L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt - L 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ - L github.com/google/nftables/expr from github.com/google/nftables+ - L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ - L github.com/google/nftables/xt from github.com/google/nftables/expr+ - DW github.com/google/uuid from github.com/prometheus-community/pro-bing+ - github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns - L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 - L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm + D github.com/google/uuid from github.com/prometheus-community/pro-bing + github.com/hdevalence/ed25519consensus from tailscale.com/tka L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/klauspost/compress from github.com/klauspost/compress/zstd @@ -126,16 +38,12 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd - L github.com/mdlayher/genetlink from tailscale.com/net/tstun - L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ + L 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - L github.com/mdlayher/netlink/nltest from github.com/google/nftables - L github.com/mdlayher/sdnotify from tailscale.com/util/systemd L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ - github.com/miekg/dns from tailscale.com/net/dns/recursive 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack - L 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ + L 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf W 💣 github.com/tailscale/certstore from tailscale.com/control/controlclient W 💣 github.com/tailscale/go-winio from tailscale.com/safesocket W 💣 github.com/tailscale/go-winio/internal/fs from github.com/tailscale/go-winio @@ -149,9 +57,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp github.com/tailscale/hujson from tailscale.com/ipn/conffile - L 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ - L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink - github.com/tailscale/peercred from tailscale.com/ipn/ipnauth + LD github.com/tailscale/peercred from tailscale.com/ipn/ipnauth github.com/tailscale/web-client-prebuilt from tailscale.com/client/web 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ W 💣 github.com/tailscale/wireguard-go/conn/winrio from github.com/tailscale/wireguard-go/conn @@ -163,7 +69,6 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ - L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 💣 go4.org/mem from tailscale.com/client/local+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ @@ -218,32 +123,37 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/web+ + tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ tailscale.com/client/web from tailscale.com/ipn/ipnlocal - tailscale.com/clientupdate from tailscale.com/client/web+ - LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnext+ - tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient tailscale.com/derp from tailscale.com/derp/derphttp+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ - tailscale.com/disco from tailscale.com/derp+ - tailscale.com/doctor from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal - 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal + tailscale.com/disco from tailscale.com/net/tstun+ tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ + tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/c2n from tailscale.com/tsnet + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock + tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet + tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet + tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey + tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy + tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ - tailscale.com/internal/noiseconn from tailscale.com/control/controlclient + tailscale.com/internal/client/tailscale from tailscale.com/tsnet+ tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ @@ -251,34 +161,26 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet - tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ - L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store - L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ - L tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ - L tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore - tailscale.com/kube/kubetypes from tailscale.com/envknob+ + tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/licenses from tailscale.com/client/web tailscale.com/log/filelogger from tailscale.com/logpolicy tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ - tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/metrics from tailscale.com/tsweb+ tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ - tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ tailscale.com/net/dnsfallback from tailscale.com/control/controlclient+ - tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/flowtrack from tailscale.com/wgengine+ tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ tailscale.com/net/memnet from tailscale.com/tsnet tailscale.com/net/netaddr from tailscale.com/ipn+ @@ -288,58 +190,56 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/netknob from tailscale.com/logpolicy+ 💣 tailscale.com/net/netmon from tailscale.com/control/controlclient+ 💣 tailscale.com/net/netns from tailscale.com/derp/derphttp+ - W 💣 tailscale.com/net/netstat from tailscale.com/portlist tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/netx from tailscale.com/control/controlclient+ tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ - tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ + tailscale.com/net/portmapper from tailscale.com/feature/portmapper + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/proxymux from tailscale.com/tsnet - tailscale.com/net/routetable from tailscale.com/doctor/routetable 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ - L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + 💣 tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock + tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ - 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal - tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ tailscale.com/syncs from tailscale.com/control/controlhttp+ tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock - tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tempfork/httprec from tailscale.com/feature/c2n tailscale.com/tka from tailscale.com/client/local+ tailscale.com/tsconst from tailscale.com/ipn/ipnlocal+ tailscale.com/tsd from tailscale.com/ipn/ipnext+ tailscale.com/tsnet from tailscale.com/cmd/tsidp tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ - tailscale.com/tstime/rate from tailscale.com/derp+ + tailscale.com/tstime/rate from tailscale.com/wgengine/filter tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/tsweb+ - tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/bools from tailscale.com/tsnet tailscale.com/types/dnstype from tailscale.com/client/local+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/ipproto from tailscale.com/ipn+ tailscale.com/types/key from tailscale.com/client/local+ - tailscale.com/types/lazy from tailscale.com/clientupdate+ + tailscale.com/types/lazy from tailscale.com/cmd/tsidp+ tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext - tailscale.com/types/netlogtype from tailscale.com/net/connstats+ + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ + tailscale.com/types/netlogtype from tailscale.com/wgengine/netlog tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ tailscale.com/types/opt from tailscale.com/cmd/tsidp+ @@ -350,25 +250,25 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/appc+ + tailscale.com/util/backoff from tailscale.com/control/controlclient+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ - tailscale.com/util/cmpver from tailscale.com/clientupdate+ + LW tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ - 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ - L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics+ + 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting + L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ - tailscale.com/util/eventbus from tailscale.com/ipn/localapi+ + tailscale.com/util/eventbus from tailscale.com/client/local+ tailscale.com/util/execqueue from tailscale.com/appc+ tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/client/web+ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns+ tailscale.com/util/mak from tailscale.com/appc+ - tailscale.com/util/multierr from tailscale.com/control/controlclient+ - tailscale.com/util/must from tailscale.com/clientupdate/distsign+ + tailscale.com/util/must from tailscale.com/cmd/tsidp+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto 💣 tailscale.com/util/osdiag from tailscale.com/ipn/localapi W 💣 tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag @@ -390,13 +290,12 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/localapi+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ - 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ - W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate+ + 💣 tailscale.com/util/winutil from tailscale.com/hostinfo+ + W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/util/osdiag W 💣 tailscale.com/util/winutil/gp from tailscale.com/net/dns+ W tailscale.com/util/winutil/policy from tailscale.com/ipn/ipnlocal W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ @@ -415,15 +314,12 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal 💣 tailscale.com/wgengine/wgint from tailscale.com/wgengine+ tailscale.com/wgengine/wglog from tailscale.com/wgengine - W 💣 tailscale.com/wgengine/winnet from tailscale.com/wgengine/router golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305+ - golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/ed25519 from gopkg.in/square/go-jose.v2 golang.org/x/crypto/hkdf from tailscale.com/control/controlbase @@ -436,28 +332,27 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh - golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ + golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ - golang.org/x/net/bpf from github.com/mdlayher/genetlink+ - golang.org/x/net/dns/dnsmessage from net+ - golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ - golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal - golang.org/x/net/http2/hpack from golang.org/x/net/http2+ + golang.org/x/net/bpf from github.com/mdlayher/netlink+ + golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal + golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ - golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ golang.org/x/net/internal/socks from golang.org/x/net/proxy - golang.org/x/net/ipv4 from github.com/miekg/dns+ - golang.org/x/net/ipv6 from github.com/miekg/dns+ + golang.org/x/net/ipv4 from github.com/prometheus-community/pro-bing+ + golang.org/x/net/ipv6 from github.com/prometheus-community/pro-bing+ golang.org/x/net/proxy from tailscale.com/net/netns D golang.org/x/net/route from tailscale.com/net/netmon+ + golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials + golang.org/x/oauth2/clientcredentials from tailscale.com/feature/oauthkey + golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/certstore+ - LD golang.org/x/sys/unix from github.com/google/nftables+ + LD golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/svc from golang.org/x/sys/windows/svc/mgr+ @@ -468,12 +363,27 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ - archive/tar from tailscale.com/clientupdate + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna bufio from compress/flate+ - bytes from archive/tar+ + bytes from bufio+ cmp from encoding/json+ compress/flate from compress/gzip+ - compress/gzip from github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding+ + compress/gzip from internal/profile+ W compress/zlib from debug/pe container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp container/list from crypto/tls+ @@ -538,12 +448,12 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar crypto/sha3 from crypto/internal/fips140hash crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ - crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+ + crypto/tls from github.com/prometheus-community/pro-bing+ crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ D crypto/x509/internal/macos from crypto/x509 crypto/x509/pkix from crypto/x509+ - DW database/sql/driver from github.com/google/uuid + D database/sql/driver from github.com/google/uuid W debug/dwarf from debug/pe W debug/pe from github.com/dblohm7/wingoes/pe embed from github.com/tailscale/web-client-prebuilt+ @@ -555,12 +465,12 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ - encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+ - errors from archive/tar+ - expvar from tailscale.com/derp+ + encoding/xml from github.com/tailscale/goupnp+ + errors from bufio+ + expvar from tailscale.com/health+ flag from tailscale.com/cmd/tsidp+ - fmt from archive/tar+ - hash from compress/zlib+ + fmt from compress/flate+ + hash from crypto+ W hash/adler32 from compress/zlib hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem @@ -577,7 +487,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar internal/filepathlite from os+ internal/fmtsort from fmt+ internal/goarch from crypto/internal/fips140deps/cpu+ - internal/godebug from archive/tar+ + internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ internal/goexperiment from hash/maphash+ internal/goos from crypto/x509+ @@ -600,7 +510,7 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ L internal/runtime/syscall from runtime+ - W internal/saferio from debug/pe + internal/saferio from debug/pe+ internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ @@ -613,14 +523,14 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar internal/testlog from os internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ - io from archive/tar+ - io/fs from archive/tar+ - io/ioutil from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + io from bufio+ + io/fs from crypto/x509+ + io/ioutil from github.com/godbus/dbus/v5+ iter from bytes+ log from expvar+ log/internal from log - maps from archive/tar+ - math from archive/tar+ + maps from crypto/x509+ + math from compress/flate+ math/big from crypto/dsa+ math/bits from bytes+ math/rand from github.com/fxamacker/cbor/v2+ @@ -630,40 +540,40 @@ tailscale.com/cmd/tsidp dependencies: (generated by github.com/tailscale/depawar mime/quotedprintable from mime/multipart net from crypto/tls+ net/http from expvar+ - net/http/httptrace from github.com/aws/smithy-go/transport/http+ - net/http/httputil from github.com/aws/smithy-go/transport/http+ + net/http/httptrace from github.com/prometheus-community/pro-bing+ + net/http/httputil from tailscale.com/client/web+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ net/http/internal/httpcommon from net/http net/http/pprof from tailscale.com/ipn/localapi+ net/netip from crypto/x509+ - net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + net/textproto from github.com/coder/websocket+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from github.com/aws/aws-sdk-go-v2/credentials/processcreds+ + os/exec from github.com/godbus/dbus/v5+ os/signal from tailscale.com/cmd/tsidp - os/user from archive/tar+ - path from archive/tar+ - path/filepath from archive/tar+ - reflect from archive/tar+ - regexp from github.com/aws/aws-sdk-go-v2/internal/endpoints+ + os/user from github.com/godbus/dbus/v5+ + path from debug/dwarf+ + path/filepath from crypto/x509+ + reflect from crypto/x509+ + regexp from github.com/tailscale/goupnp/httpu+ regexp/syntax from regexp - runtime from archive/tar+ - runtime/debug from github.com/aws/aws-sdk-go-v2/internal/sync/singleflight+ + runtime from crypto/internal/fips140+ + runtime/debug from github.com/coder/websocket/internal/xsync+ runtime/pprof from net/http/pprof+ runtime/trace from net/http/pprof - slices from archive/tar+ + slices from crypto/tls+ sort from compress/flate+ - strconv from archive/tar+ - strings from archive/tar+ + strconv from compress/flate+ + strings from bufio+ W structs from internal/syscall/windows - sync from archive/tar+ + sync from compress/flate+ sync/atomic from context+ - syscall from archive/tar+ + syscall from crypto/internal/sysrand+ text/tabwriter from runtime/pprof text/template from html/template text/template/parse from html/template+ - time from archive/tar+ + time from compress/gzip+ unicode from bytes+ unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index e6335e54d251b..9f5bf38aeecc6 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -12,8 +12,6 @@ import ( "sync/atomic" "time" - "tailscale.com/health" - "tailscale.com/logtail/backoff" "tailscale.com/net/sockstats" "tailscale.com/tailcfg" "tailscale.com/tstime" @@ -22,6 +20,7 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/persist" "tailscale.com/types/structs" + "tailscale.com/util/backoff" "tailscale.com/util/clientmetric" "tailscale.com/util/execqueue" ) @@ -122,8 +121,6 @@ type Auto struct { observerQueue execqueue.ExecQueue shutdownFn func() // to be called prior to shutdown or nil - unregisterHealthWatch func() - mu sync.Mutex // mutex guards the following fields wantLoggedIn bool // whether the user wants to be logged in per last method call @@ -193,19 +190,14 @@ func NewNoStart(opts Options) (_ *Auto, err error) { observer: opts.Observer, shutdownFn: opts.Shutdown, } + c.authCtx, c.authCancel = context.WithCancel(context.Background()) c.authCtx = sockstats.WithSockStats(c.authCtx, sockstats.LabelControlClientAuto, opts.Logf) c.mapCtx, c.mapCancel = context.WithCancel(context.Background()) c.mapCtx = sockstats.WithSockStats(c.mapCtx, sockstats.LabelControlClientAuto, opts.Logf) - c.unregisterHealthWatch = opts.HealthTracker.RegisterWatcher(func(c health.Change) { - if c.WarnableChanged { - direct.ReportWarnableChange(c.Warnable, c.UnhealthyState) - } - }) return c, nil - } // SetPaused controls whether HTTP activity should be paused. @@ -424,6 +416,11 @@ func (c *Auto) unpausedChanLocked() <-chan bool { return unpaused } +// ClientID returns the ClientID of the direct controlClient +func (c *Auto) ClientID() int64 { + return c.direct.ClientID() +} + // mapRoutineState is the state of Auto.mapRoutine while it's running. type mapRoutineState struct { c *Auto @@ -779,7 +776,6 @@ func (c *Auto) Shutdown() { shutdownFn() } - c.unregisterHealthWatch() <-c.authDone <-c.mapDone <-c.updateDone @@ -818,13 +814,3 @@ func (c *Auto) SetDNS(ctx context.Context, req *tailcfg.SetDNSRequest) error { func (c *Auto) DoNoiseRequest(req *http.Request) (*http.Response, error) { return c.direct.DoNoiseRequest(req) } - -// GetSingleUseNoiseRoundTripper returns a RoundTripper that can be only be used -// once (and must be used once) to make a single HTTP request over the noise -// channel to the coordination server. -// -// In addition to the RoundTripper, it returns the HTTP/2 channel's early noise -// payload, if any. -func (c *Auto) GetSingleUseNoiseRoundTripper(ctx context.Context) (http.RoundTripper, *tailcfg.EarlyNoise, error) { - return c.direct.GetSingleUseNoiseRoundTripper(ctx) -} diff --git a/control/controlclient/client.go b/control/controlclient/client.go index 8df64f9e8139a..d0aa129ae95b4 100644 --- a/control/controlclient/client.go +++ b/control/controlclient/client.go @@ -81,6 +81,9 @@ type Client interface { // in a separate http request. It has nothing to do with the rest of // the state machine. UpdateEndpoints(endpoints []tailcfg.Endpoint) + // ClientID returns the ClientID of a client. This ID is meant to + // distinguish one client from another. + ClientID() int64 } // UserVisibleError is an error that should be shown to users. diff --git a/control/controlclient/controlclient_test.go b/control/controlclient/controlclient_test.go index 792c26955e5d1..3914d10ef8310 100644 --- a/control/controlclient/controlclient_test.go +++ b/control/controlclient/controlclient_test.go @@ -35,6 +35,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/persist" + "tailscale.com/util/eventbus/eventbustest" ) func fieldsOf(t reflect.Type) (fields []string) { @@ -218,8 +219,11 @@ func TestDirectProxyManual(t *testing.T) { t.Skip("skipping without --live-network-test") } + bus := eventbustest.NewBus(t) + dialer := &tsdial.Dialer{} dialer.SetNetMon(netmon.NewStatic()) + dialer.SetBus(bus) opts := Options{ Persist: persist.Persist{}, @@ -233,12 +237,13 @@ func TestDirectProxyManual(t *testing.T) { }, DiscoPublicKey: key.NewDisco().Public(), Logf: t.Logf, - HealthTracker: &health.Tracker{}, + HealthTracker: health.NewTracker(bus), PopBrowserURL: func(url string) { t.Logf("PopBrowserURL: %q", url) }, Dialer: dialer, ControlKnobs: &controlknobs.Knobs{}, + Bus: bus, } d, err := NewDirect(opts) if err != nil { @@ -263,6 +268,8 @@ func TestHTTPSWithProxy(t *testing.T) { testHTTPS(t, true) } func testHTTPS(t *testing.T, withProxy bool) { bakedroots.ResetForTest(t, tlstest.TestRootCA()) + bus := eventbustest.NewBus(t) + controlLn, err := tls.Listen("tcp", "127.0.0.1:0", tlstest.ControlPlane.ServerTLSConfig()) if err != nil { t.Fatal(err) @@ -294,6 +301,7 @@ func testHTTPS(t *testing.T, withProxy bool) { dialer := &tsdial.Dialer{} dialer.SetNetMon(netmon.NewStatic()) + dialer.SetBus(bus) dialer.SetSystemDialerForTest(func(ctx context.Context, network, addr string) (net.Conn, error) { host, _, err := net.SplitHostPort(addr) if err != nil { @@ -322,11 +330,12 @@ func testHTTPS(t *testing.T, withProxy bool) { }, DiscoPublicKey: key.NewDisco().Public(), Logf: t.Logf, - HealthTracker: &health.Tracker{}, + HealthTracker: health.NewTracker(bus), PopBrowserURL: func(url string) { t.Logf("PopBrowserURL: %q", url) }, Dialer: dialer, + Bus: bus, } d, err := NewDirect(opts) if err != nil { diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 47283a673c935..63a12b2495fd8 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -4,10 +4,11 @@ package controlclient import ( - "bufio" "bytes" "cmp" "context" + "crypto" + "crypto/sha256" "encoding/binary" "encoding/json" "errors" @@ -28,7 +29,10 @@ import ( "go4.org/mem" "tailscale.com/control/controlknobs" + "tailscale.com/control/ts2021" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn/ipnstate" @@ -40,9 +44,7 @@ import ( "tailscale.com/net/netx" "tailscale.com/net/tlsdial" "tailscale.com/net/tsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/tailcfg" - "tailscale.com/tempfork/httprec" "tailscale.com/tka" "tailscale.com/tstime" "tailscale.com/types/key" @@ -52,41 +54,41 @@ import ( "tailscale.com/types/ptr" "tailscale.com/types/tkatype" "tailscale.com/util/clientmetric" - "tailscale.com/util/multierr" + "tailscale.com/util/eventbus" "tailscale.com/util/singleflight" "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" - "tailscale.com/util/systemd" "tailscale.com/util/testenv" "tailscale.com/util/zstdframe" ) // Direct is the client that connects to a tailcontrol server for a node. type Direct struct { - httpc *http.Client // HTTP client used to talk to tailcontrol - interceptedDial *atomic.Bool // if non-nil, pointer to bool whether ScreenTime intercepted our dial - dialer *tsdial.Dialer - dnsCache *dnscache.Resolver - controlKnobs *controlknobs.Knobs // always non-nil - serverURL string // URL of the tailcontrol server - clock tstime.Clock - logf logger.Logf - netMon *netmon.Monitor // non-nil - health *health.Tracker - discoPubKey key.DiscoPublic - getMachinePrivKey func() (key.MachinePrivate, error) - debugFlags []string - skipIPForwardingCheck bool - pinger Pinger - polc policyclient.Client // always non-nil - popBrowser func(url string) // or nil - c2nHandler http.Handler // or nil - onClientVersion func(*tailcfg.ClientVersion) // or nil - onControlTime func(time.Time) // or nil - onTailnetDefaultAutoUpdate func(bool) // or nil - panicOnUse bool // if true, panic if client is used (for testing) - closedCtx context.Context // alive until Direct.Close is called - closeCtx context.CancelFunc // cancels closedCtx + httpc *http.Client // HTTP client used to do TLS requests to control (just https://controlplane.tailscale.com/key?v=123) + interceptedDial *atomic.Bool // if non-nil, pointer to bool whether ScreenTime intercepted our dial + dialer *tsdial.Dialer + dnsCache *dnscache.Resolver + controlKnobs *controlknobs.Knobs // always non-nil + serverURL string // URL of the tailcontrol server + clock tstime.Clock + logf logger.Logf + netMon *netmon.Monitor // non-nil + health *health.Tracker + discoPubKey key.DiscoPublic + busClient *eventbus.Client + clientVersionPub *eventbus.Publisher[tailcfg.ClientVersion] + autoUpdatePub *eventbus.Publisher[AutoUpdate] + controlTimePub *eventbus.Publisher[ControlTime] + getMachinePrivKey func() (key.MachinePrivate, error) + debugFlags []string + skipIPForwardingCheck bool + pinger Pinger + popBrowser func(url string) // or nil + polc policyclient.Client // always non-nil + c2nHandler http.Handler // or nil + panicOnUse bool // if true, panic if client is used (for testing) + closedCtx context.Context // alive until Direct.Close is called + closeCtx context.CancelFunc // cancels closedCtx dialPlan ControlDialPlanner // can be nil @@ -94,8 +96,8 @@ type Direct struct { serverLegacyKey key.MachinePublic // original ("legacy") nacl crypto_box-based public key; only used for signRegisterRequest on Windows now serverNoiseKey key.MachinePublic - sfGroup singleflight.Group[struct{}, *NoiseClient] // protects noiseClient creation. - noiseClient *NoiseClient + sfGroup singleflight.Group[struct{}, *ts2021.Client] // protects noiseClient creation. + noiseClient *ts2021.Client // also protected by mu persist persist.PersistView authKey string @@ -107,6 +109,8 @@ type Direct struct { tkaHead string lastPingURL string // last PingRequest.URL received, for dup suppression connectionHandleForTest string // sent in MapRequest.ConnectionHandleForTest + + controlClientID int64 // Random ID used to differentiate clients for consumers of messages. } // Observer is implemented by users of the control client (such as LocalBackend) @@ -120,26 +124,24 @@ type Observer interface { } type Options struct { - Persist persist.Persist // initial persistent data - GetMachinePrivateKey func() (key.MachinePrivate, error) // returns the machine key to use - ServerURL string // URL of the tailcontrol server - AuthKey string // optional node auth key for auto registration - Clock tstime.Clock - Hostinfo *tailcfg.Hostinfo // non-nil passes ownership, nil means to use default using os.Hostname, etc - DiscoPublicKey key.DiscoPublic - PolicyClient policyclient.Client // or nil for none - Logf logger.Logf - HTTPTestClient *http.Client // optional HTTP client to use (for tests only) - NoiseTestClient *http.Client // optional HTTP client to use for noise RPCs (tests only) - DebugFlags []string // debug settings to send to control - HealthTracker *health.Tracker - PopBrowserURL func(url string) // optional func to open browser - OnClientVersion func(*tailcfg.ClientVersion) // optional func to inform GUI of client version status - OnControlTime func(time.Time) // optional func to notify callers of new time from control - OnTailnetDefaultAutoUpdate func(bool) // optional func to inform GUI of default auto-update setting for the tailnet - Dialer *tsdial.Dialer // non-nil - C2NHandler http.Handler // or nil - ControlKnobs *controlknobs.Knobs // or nil to ignore + Persist persist.Persist // initial persistent data + GetMachinePrivateKey func() (key.MachinePrivate, error) // returns the machine key to use + ServerURL string // URL of the tailcontrol server + AuthKey string // optional node auth key for auto registration + Clock tstime.Clock + Hostinfo *tailcfg.Hostinfo // non-nil passes ownership, nil means to use default using os.Hostname, etc + DiscoPublicKey key.DiscoPublic + PolicyClient policyclient.Client // or nil for none + Logf logger.Logf + HTTPTestClient *http.Client // optional HTTP client to use (for tests only) + NoiseTestClient *http.Client // optional HTTP client to use for noise RPCs (tests only) + DebugFlags []string // debug settings to send to control + HealthTracker *health.Tracker + PopBrowserURL func(url string) // optional func to open browser + Dialer *tsdial.Dialer // non-nil + C2NHandler http.Handler // or nil + ControlKnobs *controlknobs.Knobs // or nil to ignore + Bus *eventbus.Bus // non-nil, for setting up publishers // Observer is called when there's a change in status to report // from the control client. @@ -218,6 +220,8 @@ type NetmapDeltaUpdater interface { UpdateNetmapDelta([]netmap.NodeMutation) (ok bool) } +var nextControlClientID atomic.Int64 + // NewDirect returns a new Direct client. func NewDirect(opts Options) (*Direct, error) { if opts.ServerURL == "" { @@ -270,8 +274,12 @@ func NewDirect(opts Options) (*Direct, error) { var interceptedDial *atomic.Bool if httpc == nil { tr := http.DefaultTransport.(*http.Transport).Clone() - tr.Proxy = tshttpproxy.ProxyFromEnvironment - tshttpproxy.SetTransportGetProxyConnectHeader(tr) + if buildfeatures.HasUseProxy { + tr.Proxy = feature.HookProxyFromEnvironment.GetOrNil() + if f, ok := feature.HookProxySetTransportGetProxyConnectHeader.GetOk(); ok { + f(tr) + } + } tr.TLSClientConfig = tlsdial.Config(opts.HealthTracker, tr.TLSClientConfig) var dialFunc netx.DialFunc dialFunc, interceptedDial = makeScreenTimeDetectingDialFunc(opts.Dialer.SystemDial) @@ -287,33 +295,32 @@ func NewDirect(opts Options) (*Direct, error) { } c := &Direct{ - httpc: httpc, - interceptedDial: interceptedDial, - controlKnobs: opts.ControlKnobs, - getMachinePrivKey: opts.GetMachinePrivateKey, - serverURL: opts.ServerURL, - clock: opts.Clock, - logf: opts.Logf, - persist: opts.Persist.View(), - authKey: opts.AuthKey, - discoPubKey: opts.DiscoPublicKey, - debugFlags: opts.DebugFlags, - netMon: netMon, - health: opts.HealthTracker, - skipIPForwardingCheck: opts.SkipIPForwardingCheck, - pinger: opts.Pinger, - polc: cmp.Or(opts.PolicyClient, policyclient.Client(policyclient.NoPolicyClient{})), - popBrowser: opts.PopBrowserURL, - onClientVersion: opts.OnClientVersion, - onTailnetDefaultAutoUpdate: opts.OnTailnetDefaultAutoUpdate, - onControlTime: opts.OnControlTime, - c2nHandler: opts.C2NHandler, - dialer: opts.Dialer, - dnsCache: dnsCache, - dialPlan: opts.DialPlan, + httpc: httpc, + interceptedDial: interceptedDial, + controlKnobs: opts.ControlKnobs, + getMachinePrivKey: opts.GetMachinePrivateKey, + serverURL: opts.ServerURL, + clock: opts.Clock, + logf: opts.Logf, + persist: opts.Persist.View(), + authKey: opts.AuthKey, + discoPubKey: opts.DiscoPublicKey, + debugFlags: opts.DebugFlags, + netMon: netMon, + health: opts.HealthTracker, + skipIPForwardingCheck: opts.SkipIPForwardingCheck, + pinger: opts.Pinger, + polc: cmp.Or(opts.PolicyClient, policyclient.Client(policyclient.NoPolicyClient{})), + popBrowser: opts.PopBrowserURL, + c2nHandler: opts.C2NHandler, + dialer: opts.Dialer, + dnsCache: dnsCache, + dialPlan: opts.DialPlan, } c.closedCtx, c.closeCtx = context.WithCancel(context.Background()) + c.controlClientID = nextControlClientID.Add(1) + if opts.Hostinfo == nil { c.SetHostinfo(hostinfo.New()) } else { @@ -323,7 +330,7 @@ func NewDirect(opts Options) (*Direct, error) { } } if opts.NoiseTestClient != nil { - c.noiseClient = &NoiseClient{ + c.noiseClient = &ts2021.Client{ Client: opts.NoiseTestClient, } c.serverNoiseKey = key.NewMachine().Public() // prevent early error before hitting test client @@ -331,6 +338,12 @@ func NewDirect(opts Options) (*Direct, error) { if strings.Contains(opts.ServerURL, "controlplane.tailscale.com") && envknob.Bool("TS_PANIC_IF_HIT_MAIN_CONTROL") { c.panicOnUse = true } + + c.busClient = opts.Bus.Client("controlClient.direct") + c.clientVersionPub = eventbus.Publish[tailcfg.ClientVersion](c.busClient) + c.autoUpdatePub = eventbus.Publish[AutoUpdate](c.busClient) + c.controlTimePub = eventbus.Publish[ControlTime](c.busClient) + return c, nil } @@ -340,15 +353,14 @@ func (c *Direct) Close() error { c.mu.Lock() defer c.mu.Unlock() + c.busClient.Close() if c.noiseClient != nil { if err := c.noiseClient.Close(); err != nil { return err } } c.noiseClient = nil - if tr, ok := c.httpc.Transport.(*http.Transport); ok { - tr.CloseIdleConnections() - } + c.httpc.CloseIdleConnections() return nil } @@ -389,7 +401,7 @@ func (c *Direct) SetNetInfo(ni *tailcfg.NetInfo) bool { return true } -// SetNetInfo stores a new TKA head value for next update. +// SetTKAHead stores a new TKA head value for next update. // It reports whether the TKA head changed. func (c *Direct) SetTKAHead(tkaHead string) bool { c.mu.Lock() @@ -533,7 +545,9 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new } else { if expired { c.logf("Old key expired -> regen=true") - systemd.Status("key expired; run 'tailscale up' to authenticate") + if f, ok := feature.HookSystemdStatus.GetOk(); ok { + f("key expired; run 'tailscale up' to authenticate") + } regen = true } if (opt.Flags & LoginInteractive) != 0 { @@ -592,6 +606,7 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new if persist.NetworkLockKey.IsZero() { persist.NetworkLockKey = key.NewNLPrivate() } + nlPub := persist.NetworkLockKey.Public() if tryingNewKey.IsZero() { @@ -688,8 +703,8 @@ func (c *Direct) doLogin(ctx context.Context, opt loginOpt) (mustRegen bool, new if err != nil { return regen, opt.URL, nil, err } - addLBHeader(req, request.OldNodeKey) - addLBHeader(req, request.NodeKey) + ts2021.AddLBHeader(req, request.OldNodeKey) + ts2021.AddLBHeader(req, request.NodeKey) res, err := httpc.Do(req) if err != nil { @@ -826,6 +841,23 @@ func (c *Direct) SendUpdate(ctx context.Context) error { return c.sendMapRequest(ctx, false, nil) } +// ClientID returns the controlClientID of the controlClient. +func (c *Direct) ClientID() int64 { + return c.controlClientID +} + +// AutoUpdate is an eventbus value, reporting the value of tailcfg.MapResponse.DefaultAutoUpdate. +type AutoUpdate struct { + ClientID int64 // The ID field is used for consumers to differentiate instances of Direct. + Value bool // The Value represents DefaultAutoUpdate from [tailcfg.MapResponse]. +} + +// ControlTime is an eventbus value, reporting the value of tailcfg.MapResponse.ControlTime. +type ControlTime struct { + ClientID int64 // The ID field is used for consumers to differentiate instances of Direct. + Value time.Time // The Value represents ControlTime from [tailcfg.MapResponse]. +} + // If we go more than watchdogTimeout without hearing from the server, // end the long poll. We should be receiving a keep alive ping // every minute. @@ -915,8 +947,29 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap TKAHead: tkaHead, ConnectionHandleForTest: connectionHandleForTest, } + + // If we have a hardware attestation key, sign the node key with it and send + // the key & signature in the map request. + if buildfeatures.HasTPM { + if k := persist.AsStruct().AttestationKey; k != nil && !k.IsZero() { + hwPub := key.HardwareAttestationPublicFromPlatformKey(k) + request.HardwareAttestationKey = hwPub + + t := c.clock.Now() + msg := fmt.Sprintf("%d|%s", t.Unix(), nodeKey.String()) + digest := sha256.Sum256([]byte(msg)) + sig, err := k.Sign(nil, digest[:], crypto.SHA256) + if err != nil { + c.logf("failed to sign node key with hardware attestation key: %v", err) + } else { + request.HardwareAttestationKeySignature = sig + request.HardwareAttestationKeySignatureTimestamp = t + } + } + } + var extraDebugFlags []string - if hi != nil && c.netMon != nil && !c.skipIPForwardingCheck && + if buildfeatures.HasAdvertiseRoutes && hi != nil && c.netMon != nil && !c.skipIPForwardingCheck && ipForwardingBroken(hi.RoutableIPs, c.netMon.InterfaceState()) { extraDebugFlags = append(extraDebugFlags, "warn-ip-forwarding-off") } @@ -980,7 +1033,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap if err != nil { return err } - addLBHeader(req, nodeKey) + ts2021.AddLBHeader(req, nodeKey) res, err := httpc.Do(req) if err != nil { @@ -1060,7 +1113,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap vlogf("netmap: read body after %v", time.Since(t0).Round(time.Millisecond)) var resp tailcfg.MapResponse - if err := c.decodeMsg(msg, &resp); err != nil { + if err := sess.decodeMsg(msg, &resp); err != nil { vlogf("netmap: decode error: %v", err) return err } @@ -1085,14 +1138,12 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap c.logf("netmap: control says to open URL %v; no popBrowser func", u) } } - if resp.ClientVersion != nil && c.onClientVersion != nil { - c.onClientVersion(resp.ClientVersion) + if resp.ClientVersion != nil { + c.clientVersionPub.Publish(*resp.ClientVersion) } if resp.ControlTime != nil && !resp.ControlTime.IsZero() { c.logf.JSON(1, "controltime", resp.ControlTime.UTC()) - if c.onControlTime != nil { - c.onControlTime(*resp.ControlTime) - } + c.controlTimePub.Publish(ControlTime{c.controlClientID, *resp.ControlTime}) } if resp.KeepAlive { vlogf("netmap: got keep-alive") @@ -1112,9 +1163,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap continue } if au, ok := resp.DefaultAutoUpdate.Get(); ok { - if c.onTailnetDefaultAutoUpdate != nil { - c.onTailnetDefaultAutoUpdate(au) - } + c.autoUpdatePub.Publish(AutoUpdate{c.controlClientID, au}) } metricMapResponseMap.Add(1) @@ -1138,12 +1187,33 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap return nil } +// NetmapFromMapResponseForDebug returns a NetworkMap from the given MapResponse. +// It is intended for debugging only. +func NetmapFromMapResponseForDebug(ctx context.Context, pr persist.PersistView, resp *tailcfg.MapResponse) (*netmap.NetworkMap, error) { + if resp == nil { + return nil, errors.New("nil MapResponse") + } + if resp.Node == nil { + return nil, errors.New("MapResponse lacks Node") + } + + nu := &rememberLastNetmapUpdater{} + sess := newMapSession(pr.PrivateNodeKey(), nu, nil) + defer sess.Close() + + if err := sess.HandleNonKeepAliveMapResponse(ctx, resp); err != nil { + return nil, fmt.Errorf("HandleNonKeepAliveMapResponse: %w", err) + } + + return sess.netmap(), nil +} + func (c *Direct) handleDebugMessage(ctx context.Context, debug *tailcfg.Debug) error { if code := debug.Exit; code != nil { c.logf("exiting process with status %v per controlplane", *code) os.Exit(*code) } - if debug.DisableLogTail { + if buildfeatures.HasLogTail && debug.DisableLogTail { logtail.Disable() envknob.SetNoLogsNoSupport() } @@ -1192,12 +1262,23 @@ func decode(res *http.Response, v any) error { var jsonEscapedZero = []byte(`\u0000`) +const justKeepAliveStr = `{"KeepAlive":true}` + // decodeMsg is responsible for uncompressing msg and unmarshaling into v. -func (c *Direct) decodeMsg(compressedMsg []byte, v any) error { +func (sess *mapSession) decodeMsg(compressedMsg []byte, v *tailcfg.MapResponse) error { + // Fast path for common case of keep-alive message. + // See tailscale/tailscale#17343. + if sess.keepAliveZ != nil && bytes.Equal(compressedMsg, sess.keepAliveZ) { + v.KeepAlive = true + return nil + } + b, err := zstdframe.AppendDecode(nil, compressedMsg) if err != nil { return err } + sess.ztdDecodesForTest++ + if DevKnob.DumpNetMaps() { var buf bytes.Buffer json.Indent(&buf, b, "", " ") @@ -1210,6 +1291,9 @@ func (c *Direct) decodeMsg(compressedMsg []byte, v any) error { if err := json.Unmarshal(b, v); err != nil { return fmt.Errorf("response: %v", err) } + if v.KeepAlive && string(b) == justKeepAliveStr { + sess.keepAliveZ = compressedMsg + } return nil } @@ -1257,7 +1341,7 @@ func loadServerPubKeys(ctx context.Context, httpc *http.Client, serverURL string out = tailcfg.OverTLSPublicKeyResponse{} k, err := key.ParseMachinePublicUntyped(mem.B(b)) if err != nil { - return nil, multierr.New(jsonErr, err) + return nil, errors.Join(jsonErr, err) } out.LegacyPublicKey = k return &out, nil @@ -1327,6 +1411,10 @@ func (c *Direct) isUniquePingRequest(pr *tailcfg.PingRequest) bool { return true } +// HookAnswerC2NPing is where feature/c2n conditionally registers support +// for handling C2N (control-to-node) HTTP requests. +var HookAnswerC2NPing feature.Hook[func(logger.Logf, http.Handler, *http.Client, *tailcfg.PingRequest)] + func (c *Direct) answerPing(pr *tailcfg.PingRequest) { httpc := c.httpc useNoise := pr.URLIsNoise || pr.Types == "c2n" @@ -1347,11 +1435,16 @@ func (c *Direct) answerPing(pr *tailcfg.PingRequest) { answerHeadPing(c.logf, httpc, pr) return case "c2n": + if !buildfeatures.HasC2N { + return + } if !useNoise && !envknob.Bool("TS_DEBUG_PERMIT_HTTP_C2N") { c.logf("refusing to answer c2n ping without noise") return } - answerC2NPing(c.logf, c.c2nHandler, httpc, pr) + if f, ok := HookAnswerC2NPing.GetOk(); ok { + f(c.logf, c.c2nHandler, httpc, pr) + } return } for _, t := range strings.Split(pr.Types, ",") { @@ -1386,54 +1479,6 @@ func answerHeadPing(logf logger.Logf, c *http.Client, pr *tailcfg.PingRequest) { } } -func answerC2NPing(logf logger.Logf, c2nHandler http.Handler, c *http.Client, pr *tailcfg.PingRequest) { - if c2nHandler == nil { - logf("answerC2NPing: c2nHandler not defined") - return - } - hreq, err := http.ReadRequest(bufio.NewReader(bytes.NewReader(pr.Payload))) - if err != nil { - logf("answerC2NPing: ReadRequest: %v", err) - return - } - if pr.Log { - logf("answerC2NPing: got c2n request for %v ...", hreq.RequestURI) - } - handlerTimeout := time.Minute - if v := hreq.Header.Get("C2n-Handler-Timeout"); v != "" { - handlerTimeout, _ = time.ParseDuration(v) - } - handlerCtx, cancel := context.WithTimeout(context.Background(), handlerTimeout) - defer cancel() - hreq = hreq.WithContext(handlerCtx) - rec := httprec.NewRecorder() - c2nHandler.ServeHTTP(rec, hreq) - cancel() - - c2nResBuf := new(bytes.Buffer) - rec.Result().Write(c2nResBuf) - - replyCtx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - - req, err := http.NewRequestWithContext(replyCtx, "POST", pr.URL, c2nResBuf) - if err != nil { - logf("answerC2NPing: NewRequestWithContext: %v", err) - return - } - if pr.Log { - logf("answerC2NPing: sending POST ping to %v ...", pr.URL) - } - t0 := clock.Now() - _, err = c.Do(req) - d := time.Since(t0).Round(time.Millisecond) - if err != nil { - logf("answerC2NPing error: %v to %v (after %v)", err, pr.URL, d) - } else if pr.Log { - logf("answerC2NPing complete to %v (after %v)", pr.URL, d) - } -} - // sleepAsRequest implements the sleep for a tailcfg.Debug message requesting // that the client sleep. The complication is that while we're sleeping (if for // a long time), we need to periodically reset the watchdog timer before it @@ -1458,7 +1503,7 @@ func sleepAsRequested(ctx context.Context, logf logger.Logf, d time.Duration, cl } // getNoiseClient returns the noise client, creating one if one doesn't exist. -func (c *Direct) getNoiseClient() (*NoiseClient, error) { +func (c *Direct) getNoiseClient() (*ts2021.Client, error) { c.mu.Lock() serverNoiseKey := c.serverNoiseKey nc := c.noiseClient @@ -1473,13 +1518,13 @@ func (c *Direct) getNoiseClient() (*NoiseClient, error) { if c.dialPlan != nil { dp = c.dialPlan.Load } - nc, err, _ := c.sfGroup.Do(struct{}{}, func() (*NoiseClient, error) { + nc, err, _ := c.sfGroup.Do(struct{}{}, func() (*ts2021.Client, error) { k, err := c.getMachinePrivKey() if err != nil { return nil, err } c.logf("[v1] creating new noise client") - nc, err := NewNoiseClient(NoiseOpts{ + nc, err := ts2021.NewClient(ts2021.ClientOpts{ PrivKey: k, ServerPubKey: serverNoiseKey, ServerURL: c.serverURL, @@ -1513,7 +1558,7 @@ func (c *Direct) setDNSNoise(ctx context.Context, req *tailcfg.SetDNSRequest) er if err != nil { return err } - res, err := nc.post(ctx, "/machine/set-dns", newReq.NodeKey, &newReq) + res, err := nc.Post(ctx, "/machine/set-dns", newReq.NodeKey, &newReq) if err != nil { return err } @@ -1534,6 +1579,9 @@ func (c *Direct) setDNSNoise(ctx context.Context, req *tailcfg.SetDNSRequest) er // SetDNS sends the SetDNSRequest request to the control plane server, // requesting a DNS record be created or updated. func (c *Direct) SetDNS(ctx context.Context, req *tailcfg.SetDNSRequest) (err error) { + if !buildfeatures.HasACME { + return feature.ErrUnavailable + } metricSetDNS.Add(1) defer func() { if err != nil { @@ -1554,20 +1602,6 @@ func (c *Direct) DoNoiseRequest(req *http.Request) (*http.Response, error) { return nc.Do(req) } -// GetSingleUseNoiseRoundTripper returns a RoundTripper that can be only be used -// once (and must be used once) to make a single HTTP request over the noise -// channel to the coordination server. -// -// In addition to the RoundTripper, it returns the HTTP/2 channel's early noise -// payload, if any. -func (c *Direct) GetSingleUseNoiseRoundTripper(ctx context.Context) (http.RoundTripper, *tailcfg.EarlyNoise, error) { - nc, err := c.getNoiseClient() - if err != nil { - return nil, nil, err - } - return nc.GetSingleUseRoundTripper(ctx) -} - // doPingerPing sends a Ping to pr.IP using pinger, and sends an http request back to // pr.URL with ping response data. func doPingerPing(logf logger.Logf, c *http.Client, pr *tailcfg.PingRequest, pinger Pinger, pingType tailcfg.PingType) { @@ -1624,47 +1658,6 @@ func postPingResult(start time.Time, logf logger.Logf, c *http.Client, pr *tailc return nil } -// ReportWarnableChange reports to the control plane a change to this node's -// health. w must be non-nil. us can be nil to indicate a healthy state for w. -func (c *Direct) ReportWarnableChange(w *health.Warnable, us *health.UnhealthyState) { - if w == health.NetworkStatusWarnable || w == health.IPNStateWarnable || w == health.LoginStateWarnable { - // We don't report these. These include things like the network is down - // (in which case we can't report anyway) or the user wanted things - // stopped, as opposed to the more unexpected failure types in the other - // subsystems. - return - } - np, err := c.getNoiseClient() - if err != nil { - // Don't report errors to control if the server doesn't support noise. - return - } - nodeKey, ok := c.GetPersist().PublicNodeKeyOK() - if !ok { - return - } - if c.panicOnUse { - panic("tainted client") - } - // TODO(angott): at some point, update `Subsys` in the request to be `Warnable` - req := &tailcfg.HealthChangeRequest{ - Subsys: string(w.Code), - NodeKey: nodeKey, - } - if us != nil { - req.Error = us.Text - } - - // Best effort, no logging: - ctx, cancel := context.WithTimeout(c.closedCtx, 5*time.Second) - defer cancel() - res, err := np.post(ctx, "/machine/update-health", nodeKey, req) - if err != nil { - return - } - res.Body.Close() -} - // SetDeviceAttrs does a synchronous call to the control plane to update // the node's attributes. // @@ -1703,7 +1696,7 @@ func (c *Direct) SetDeviceAttrs(ctx context.Context, attrs tailcfg.AttrUpdate) e ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() - res, err := nc.doWithBody(ctx, "PATCH", "/machine/set-device-attr", nodeKey, req) + res, err := nc.DoWithBody(ctx, "PATCH", "/machine/set-device-attr", nodeKey, req) if err != nil { return err } @@ -1744,7 +1737,7 @@ func (c *Direct) sendAuditLog(ctx context.Context, auditLog tailcfg.AuditLogRequ panic("tainted client") } - res, err := nc.post(ctx, "/machine/audit-log", nodeKey, req) + res, err := nc.Post(ctx, "/machine/audit-log", nodeKey, req) if err != nil { return fmt.Errorf("%w: %w", errHTTPPostFailure, err) } @@ -1756,12 +1749,6 @@ func (c *Direct) sendAuditLog(ctx context.Context, auditLog tailcfg.AuditLogRequ return nil } -func addLBHeader(req *http.Request, nodeKey key.NodePublic) { - if !nodeKey.IsZero() { - req.Header.Add(tailcfg.LBHeader, nodeKey.String()) - } -} - // makeScreenTimeDetectingDialFunc returns dialFunc, optionally wrapped (on // Apple systems) with a func that sets the returned atomic.Bool for whether // Screen Time seemed to intercept the connection. diff --git a/control/controlclient/direct_test.go b/control/controlclient/direct_test.go index e2a6f9fa4b93f..dd93dc7b33d61 100644 --- a/control/controlclient/direct_test.go +++ b/control/controlclient/direct_test.go @@ -17,21 +17,26 @@ import ( "tailscale.com/net/tsdial" "tailscale.com/tailcfg" "tailscale.com/types/key" + "tailscale.com/util/eventbus/eventbustest" ) func TestNewDirect(t *testing.T) { hi := hostinfo.New() ni := tailcfg.NetInfo{LinkType: "wired"} hi.NetInfo = &ni + bus := eventbustest.NewBus(t) k := key.NewMachine() + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) opts := Options{ ServerURL: "https://example.com", Hostinfo: hi, GetMachinePrivateKey: func() (key.MachinePrivate, error) { return k, nil }, - Dialer: tsdial.NewDialer(netmon.NewStatic()), + Dialer: dialer, + Bus: bus, } c, err := NewDirect(opts) if err != nil { @@ -99,15 +104,19 @@ func TestTsmpPing(t *testing.T) { hi := hostinfo.New() ni := tailcfg.NetInfo{LinkType: "wired"} hi.NetInfo = &ni + bus := eventbustest.NewBus(t) k := key.NewMachine() + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) opts := Options{ ServerURL: "https://example.com", Hostinfo: hi, GetMachinePrivateKey: func() (key.MachinePrivate, error) { return k, nil }, - Dialer: tsdial.NewDialer(netmon.NewStatic()), + Dialer: dialer, + Bus: bus, } c, err := NewDirect(opts) diff --git a/control/controlclient/map.go b/control/controlclient/map.go index 22cea5acaa2f7..eafdb2d565a76 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -57,6 +57,9 @@ type mapSession struct { altClock tstime.Clock // if nil, regular time is used cancel context.CancelFunc // always non-nil, shuts down caller's base long poll context + keepAliveZ []byte // if non-nil, the learned zstd encoding of the just-KeepAlive message for this session + ztdDecodesForTest int // for testing + // sessionAliveCtx is a Background-based context that's alive for the // duration of the mapSession that we own the lifetime of. It's closed by // sessionAliveCtxClose. diff --git a/control/controlclient/map_test.go b/control/controlclient/map_test.go index ff5df8207ba8f..2be4b6ad70b2d 100644 --- a/control/controlclient/map_test.go +++ b/control/controlclient/map_test.go @@ -4,6 +4,7 @@ package controlclient import ( + "bytes" "context" "encoding/json" "fmt" @@ -20,6 +21,7 @@ import ( "go4.org/mem" "tailscale.com/control/controlknobs" "tailscale.com/health" + "tailscale.com/ipn" "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/tstime" @@ -27,9 +29,12 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" + "tailscale.com/types/persist" "tailscale.com/types/ptr" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" "tailscale.com/util/must" + "tailscale.com/util/zstdframe" ) func eps(s ...string) []netip.AddrPort { @@ -1326,7 +1331,7 @@ func TestNetmapDisplayMessage(t *testing.T) { // [netmap.NetworkMap] to a [health.Tracker]. func TestNetmapHealthIntegration(t *testing.T) { ms := newTestMapSession(t, nil) - ht := health.Tracker{} + ht := health.NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() @@ -1371,7 +1376,7 @@ func TestNetmapHealthIntegration(t *testing.T) { // passing the [netmap.NetworkMap] to a [health.Tracker]. func TestNetmapDisplayMessageIntegration(t *testing.T) { ms := newTestMapSession(t, nil) - ht := health.Tracker{} + ht := health.NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() @@ -1418,3 +1423,63 @@ func TestNetmapDisplayMessageIntegration(t *testing.T) { t.Errorf("unexpected message contents (-want +got):\n%s", diff) } } + +func TestNetmapForMapResponseForDebug(t *testing.T) { + mr := &tailcfg.MapResponse{ + Node: &tailcfg.Node{ + ID: 1, + Name: "foo.bar.ts.net.", + }, + Peers: []*tailcfg.Node{ + {ID: 2, Name: "peer1.bar.ts.net.", HomeDERP: 1}, + {ID: 3, Name: "peer2.bar.ts.net.", HomeDERP: 1}, + }, + } + ms := newTestMapSession(t, nil) + nm1 := ms.netmapForResponse(mr) + + prefs := &ipn.Prefs{Persist: &persist.Persist{PrivateNodeKey: ms.privateNodeKey}} + nm2, err := NetmapFromMapResponseForDebug(t.Context(), prefs.View().Persist(), mr) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(nm1, nm2) { + t.Errorf("mismatch\nnm1: %s\nnm2: %s\n", logger.AsJSON(nm1), logger.AsJSON(nm2)) + } +} + +func TestLearnZstdOfKeepAlive(t *testing.T) { + keepAliveMsgZstd := (func() []byte { + msg := must.Get(json.Marshal(tailcfg.MapResponse{ + KeepAlive: true, + })) + return zstdframe.AppendEncode(nil, msg, zstdframe.FastestCompression) + })() + + sess := newTestMapSession(t, nil) + + // The first time we see a zstd keep-alive message, we learn how + // the server encodes that. + var mr tailcfg.MapResponse + must.Do(sess.decodeMsg(keepAliveMsgZstd, &mr)) + if !mr.KeepAlive { + t.Fatal("mr.KeepAlive false; want true") + } + if !bytes.Equal(sess.keepAliveZ, keepAliveMsgZstd) { + t.Fatalf("sess.keepAlive = %q; want %q", sess.keepAliveZ, keepAliveMsgZstd) + } + if got, want := sess.ztdDecodesForTest, 1; got != want { + t.Fatalf("got %d zstd decodes; want %d", got, want) + } + + // The second time on the session where we see that message, we + // decode it without needing to decompress. + var mr2 tailcfg.MapResponse + must.Do(sess.decodeMsg(keepAliveMsgZstd, &mr2)) + if !mr2.KeepAlive { + t.Fatal("mr2.KeepAlive false; want true") + } + if got, want := sess.ztdDecodesForTest, 1; got != want { + t.Fatalf("got %d zstd decodes; want %d", got, want) + } +} diff --git a/control/controlclient/noise.go b/control/controlclient/noise.go deleted file mode 100644 index 4bd8cfc25ee96..0000000000000 --- a/control/controlclient/noise.go +++ /dev/null @@ -1,418 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package controlclient - -import ( - "bytes" - "cmp" - "context" - "encoding/json" - "errors" - "math" - "net/http" - "net/netip" - "net/url" - "sync" - "time" - - "golang.org/x/net/http2" - "tailscale.com/control/controlhttp" - "tailscale.com/health" - "tailscale.com/internal/noiseconn" - "tailscale.com/net/dnscache" - "tailscale.com/net/netmon" - "tailscale.com/net/tsdial" - "tailscale.com/tailcfg" - "tailscale.com/tstime" - "tailscale.com/types/key" - "tailscale.com/types/logger" - "tailscale.com/util/mak" - "tailscale.com/util/multierr" - "tailscale.com/util/singleflight" -) - -// NoiseClient provides a http.Client to connect to tailcontrol over -// the ts2021 protocol. -type NoiseClient struct { - // Client is an HTTP client to talk to the coordination server. - // It automatically makes a new Noise connection as needed. - // It does not support node key proofs. To do that, call - // noiseClient.getConn instead to make a connection. - *http.Client - - // h2t is the HTTP/2 transport we use a bit to create new - // *http2.ClientConns. We don't use its connection pool and we don't use its - // dialing. We use it for exactly one reason: its idle timeout that can only - // be configured via the HTTP/1 config. And then we call NewClientConn (with - // an existing Noise connection) on the http2.Transport which sets up an - // http2.ClientConn using that idle timeout from an http1.Transport. - h2t *http2.Transport - - // sfDial ensures that two concurrent requests for a noise connection only - // produce one shared one between the two callers. - sfDial singleflight.Group[struct{}, *noiseconn.Conn] - - dialer *tsdial.Dialer - dnsCache *dnscache.Resolver - privKey key.MachinePrivate - serverPubKey key.MachinePublic - host string // the host part of serverURL - httpPort string // the default port to dial - httpsPort string // the fallback Noise-over-https port or empty if none - - // dialPlan optionally returns a ControlDialPlan previously received - // from the control server; either the function or the return value can - // be nil. - dialPlan func() *tailcfg.ControlDialPlan - - logf logger.Logf - netMon *netmon.Monitor - health *health.Tracker - - // mu only protects the following variables. - mu sync.Mutex - closed bool - last *noiseconn.Conn // or nil - nextID int - connPool map[int]*noiseconn.Conn // active connections not yet closed; see noiseconn.Conn.Close -} - -// NoiseOpts contains options for the NewNoiseClient function. All fields are -// required unless otherwise specified. -type NoiseOpts struct { - // PrivKey is this node's private key. - PrivKey key.MachinePrivate - // ServerPubKey is the public key of the server. - ServerPubKey key.MachinePublic - // ServerURL is the URL of the server to connect to. - ServerURL string - // Dialer's SystemDial function is used to connect to the server. - Dialer *tsdial.Dialer - // DNSCache is the caching Resolver to use to connect to the server. - // - // This field can be nil. - DNSCache *dnscache.Resolver - // Logf is the log function to use. This field can be nil. - Logf logger.Logf - // NetMon is the network monitor that, if set, will be used to get the - // network interface state. This field can be nil; if so, the current - // state will be looked up dynamically. - NetMon *netmon.Monitor - // HealthTracker, if non-nil, is the health tracker to use. - HealthTracker *health.Tracker - // DialPlan, if set, is a function that should return an explicit plan - // on how to connect to the server. - DialPlan func() *tailcfg.ControlDialPlan -} - -// NewNoiseClient returns a new noiseClient for the provided server and machine key. -// serverURL is of the form https://: (no trailing slash). -// -// netMon may be nil, if non-nil it's used to do faster interface lookups. -// dialPlan may be nil -func NewNoiseClient(opts NoiseOpts) (*NoiseClient, error) { - logf := opts.Logf - u, err := url.Parse(opts.ServerURL) - if err != nil { - return nil, err - } - - if u.Scheme != "http" && u.Scheme != "https" { - return nil, errors.New("invalid ServerURL scheme, must be http or https") - } - - var httpPort string - var httpsPort string - addr, _ := netip.ParseAddr(u.Hostname()) - isPrivateHost := addr.IsPrivate() || addr.IsLoopback() || u.Hostname() == "localhost" - if port := u.Port(); port != "" { - // If there is an explicit port specified, entirely rely on the scheme, - // unless it's http with a private host in which case we never try using HTTPS. - if u.Scheme == "https" { - httpPort = "" - httpsPort = port - } else if u.Scheme == "http" { - httpPort = port - httpsPort = "443" - if isPrivateHost { - logf("setting empty HTTPS port with http scheme and private host %s", u.Hostname()) - httpsPort = "" - } - } - } else if u.Scheme == "http" && isPrivateHost { - // Whenever the scheme is http and the hostname is an IP address, do not set the HTTPS port, - // as there cannot be a TLS certificate issued for an IP, unless it's a public IP. - httpPort = "80" - httpsPort = "" - } else { - // Otherwise, use the standard ports - httpPort = "80" - httpsPort = "443" - } - - np := &NoiseClient{ - serverPubKey: opts.ServerPubKey, - privKey: opts.PrivKey, - host: u.Hostname(), - httpPort: httpPort, - httpsPort: httpsPort, - dialer: opts.Dialer, - dnsCache: opts.DNSCache, - dialPlan: opts.DialPlan, - logf: opts.Logf, - netMon: opts.NetMon, - health: opts.HealthTracker, - } - - // Create the HTTP/2 Transport using a net/http.Transport - // (which only does HTTP/1) because it's the only way to - // configure certain properties on the http2.Transport. But we - // never actually use the net/http.Transport for any HTTP/1 - // requests. - h2Transport, err := http2.ConfigureTransports(&http.Transport{ - IdleConnTimeout: time.Minute, - }) - if err != nil { - return nil, err - } - np.h2t = h2Transport - - np.Client = &http.Client{Transport: np} - return np, nil -} - -// GetSingleUseRoundTripper returns a RoundTripper that can be only be used once -// (and must be used once) to make a single HTTP request over the noise channel -// to the coordination server. -// -// In addition to the RoundTripper, it returns the HTTP/2 channel's early noise -// payload, if any. -func (nc *NoiseClient) GetSingleUseRoundTripper(ctx context.Context) (http.RoundTripper, *tailcfg.EarlyNoise, error) { - for tries := 0; tries < 3; tries++ { - conn, err := nc.getConn(ctx) - if err != nil { - return nil, nil, err - } - ok, earlyPayloadMaybeNil, err := conn.ReserveNewRequest(ctx) - if err != nil { - return nil, nil, err - } - if ok { - return conn, earlyPayloadMaybeNil, nil - } - } - return nil, nil, errors.New("[unexpected] failed to reserve a request on a connection") -} - -// contextErr is an error that wraps another error and is used to indicate that -// the error was because a context expired. -type contextErr struct { - err error -} - -func (e contextErr) Error() string { - return e.err.Error() -} - -func (e contextErr) Unwrap() error { - return e.err -} - -// getConn returns a noiseconn.Conn that can be used to make requests to the -// coordination server. It may return a cached connection or create a new one. -// Dials are singleflighted, so concurrent calls to getConn may only dial once. -// As such, context values may not be respected as there are no guarantees that -// the context passed to getConn is the same as the context passed to dial. -func (nc *NoiseClient) getConn(ctx context.Context) (*noiseconn.Conn, error) { - nc.mu.Lock() - if last := nc.last; last != nil && last.CanTakeNewRequest() { - nc.mu.Unlock() - return last, nil - } - nc.mu.Unlock() - - for { - // We singeflight the dial to avoid making multiple connections, however - // that means that we can't simply cancel the dial if the context is - // canceled. Instead, we have to additionally check that the context - // which was canceled is our context and retry if our context is still - // valid. - conn, err, _ := nc.sfDial.Do(struct{}{}, func() (*noiseconn.Conn, error) { - c, err := nc.dial(ctx) - if err != nil { - if ctx.Err() != nil { - return nil, contextErr{ctx.Err()} - } - return nil, err - } - return c, nil - }) - var ce contextErr - if err == nil || !errors.As(err, &ce) { - return conn, err - } - if ctx.Err() == nil { - // The dial failed because of a context error, but our context - // is still valid. Retry. - continue - } - // The dial failed because our context was canceled. Return the - // underlying error. - return nil, ce.Unwrap() - } -} - -func (nc *NoiseClient) RoundTrip(req *http.Request) (*http.Response, error) { - ctx := req.Context() - conn, err := nc.getConn(ctx) - if err != nil { - return nil, err - } - return conn.RoundTrip(req) -} - -// connClosed removes the connection with the provided ID from the pool -// of active connections. -func (nc *NoiseClient) connClosed(id int) { - nc.mu.Lock() - defer nc.mu.Unlock() - conn := nc.connPool[id] - if conn != nil { - delete(nc.connPool, id) - if nc.last == conn { - nc.last = nil - } - } -} - -// Close closes all the underlying noise connections. -// It is a no-op and returns nil if the connection is already closed. -func (nc *NoiseClient) Close() error { - nc.mu.Lock() - nc.closed = true - conns := nc.connPool - nc.connPool = nil - nc.mu.Unlock() - - var errors []error - for _, c := range conns { - if err := c.Close(); err != nil { - errors = append(errors, err) - } - } - return multierr.New(errors...) -} - -// dial opens a new connection to tailcontrol, fetching the server noise key -// if not cached. -func (nc *NoiseClient) dial(ctx context.Context) (*noiseconn.Conn, error) { - nc.mu.Lock() - connID := nc.nextID - nc.nextID++ - nc.mu.Unlock() - - if tailcfg.CurrentCapabilityVersion > math.MaxUint16 { - // Panic, because a test should have started failing several - // thousand version numbers before getting to this point. - panic("capability version is too high to fit in the wire protocol") - } - - var dialPlan *tailcfg.ControlDialPlan - if nc.dialPlan != nil { - dialPlan = nc.dialPlan() - } - - // If we have a dial plan, then set our timeout as slightly longer than - // the maximum amount of time contained therein; we assume that - // explicit instructions on timeouts are more useful than a single - // hard-coded timeout. - // - // The default value of 5 is chosen so that, when there's no dial plan, - // we retain the previous behaviour of 10 seconds end-to-end timeout. - timeoutSec := 5.0 - if dialPlan != nil { - for _, c := range dialPlan.Candidates { - if v := c.DialStartDelaySec + c.DialTimeoutSec; v > timeoutSec { - timeoutSec = v - } - } - } - - // After we establish a connection, we need some time to actually - // upgrade it into a Noise connection. With a ballpark worst-case RTT - // of 1000ms, give ourselves an extra 5 seconds to complete the - // handshake. - timeoutSec += 5 - - // Be extremely defensive and ensure that the timeout is in the range - // [5, 60] seconds (e.g. if we accidentally get a negative number). - if timeoutSec > 60 { - timeoutSec = 60 - } else if timeoutSec < 5 { - timeoutSec = 5 - } - - timeout := time.Duration(timeoutSec * float64(time.Second)) - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - clientConn, err := (&controlhttp.Dialer{ - Hostname: nc.host, - HTTPPort: nc.httpPort, - HTTPSPort: cmp.Or(nc.httpsPort, controlhttp.NoPort), - MachineKey: nc.privKey, - ControlKey: nc.serverPubKey, - ProtocolVersion: uint16(tailcfg.CurrentCapabilityVersion), - Dialer: nc.dialer.SystemDial, - DNSCache: nc.dnsCache, - DialPlan: dialPlan, - Logf: nc.logf, - NetMon: nc.netMon, - HealthTracker: nc.health, - Clock: tstime.StdClock{}, - }).Dial(ctx) - if err != nil { - return nil, err - } - - ncc, err := noiseconn.New(clientConn.Conn, nc.h2t, connID, nc.connClosed) - if err != nil { - return nil, err - } - - nc.mu.Lock() - if nc.closed { - nc.mu.Unlock() - ncc.Close() // Needs to be called without holding the lock. - return nil, errors.New("noise client closed") - } - defer nc.mu.Unlock() - mak.Set(&nc.connPool, connID, ncc) - nc.last = ncc - return ncc, nil -} - -// post does a POST to the control server at the given path, JSON-encoding body. -// The provided nodeKey is an optional load balancing hint. -func (nc *NoiseClient) post(ctx context.Context, path string, nodeKey key.NodePublic, body any) (*http.Response, error) { - return nc.doWithBody(ctx, "POST", path, nodeKey, body) -} - -func (nc *NoiseClient) doWithBody(ctx context.Context, method, path string, nodeKey key.NodePublic, body any) (*http.Response, error) { - jbody, err := json.Marshal(body) - if err != nil { - return nil, err - } - req, err := http.NewRequestWithContext(ctx, method, "https://"+nc.host+path, bytes.NewReader(jbody)) - if err != nil { - return nil, err - } - addLBHeader(req, nodeKey) - req.Header.Set("Content-Type", "application/json") - conn, err := nc.getConn(ctx) - if err != nil { - return nil, err - } - return conn.RoundTrip(req) -} diff --git a/control/controlhttp/client.go b/control/controlhttp/client.go index 1bb60d672980d..06a2131fdcb2b 100644 --- a/control/controlhttp/client.go +++ b/control/controlhttp/client.go @@ -20,26 +20,27 @@ package controlhttp import ( + "cmp" "context" "crypto/tls" "encoding/base64" "errors" "fmt" "io" - "math" "net" "net/http" "net/http/httptrace" "net/netip" "net/url" "runtime" - "sort" "sync/atomic" "time" "tailscale.com/control/controlbase" "tailscale.com/control/controlhttp/controlhttpcommon" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/dnscache" "tailscale.com/net/dnsfallback" @@ -47,11 +48,9 @@ import ( "tailscale.com/net/netx" "tailscale.com/net/sockstats" "tailscale.com/net/tlsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstime" - "tailscale.com/util/multierr" ) var stdDialer net.Dialer @@ -82,7 +81,7 @@ func (a *Dialer) getProxyFunc() func(*http.Request) (*url.URL, error) { if a.proxyFunc != nil { return a.proxyFunc } - return tshttpproxy.ProxyFromEnvironment + return feature.HookProxyFromEnvironment.GetOrNil() } // httpsFallbackDelay is how long we'll wait for a.HTTPPort to work before @@ -104,162 +103,71 @@ func (a *Dialer) dial(ctx context.Context) (*ClientConn, error) { // host we know about. useDialPlan := envknob.BoolDefaultTrue("TS_USE_CONTROL_DIAL_PLAN") if !useDialPlan || a.DialPlan == nil || len(a.DialPlan.Candidates) == 0 { - return a.dialHost(ctx, netip.Addr{}) + return a.dialHost(ctx) } candidates := a.DialPlan.Candidates - // Otherwise, we try dialing per the plan. Store the highest priority - // in the list, so that if we get a connection to one of those - // candidates we can return quickly. - var highestPriority int = math.MinInt - for _, c := range candidates { - if c.Priority > highestPriority { - highestPriority = c.Priority - } - } - - // This context allows us to cancel in-flight connections if we get a - // highest-priority connection before we're all done. + // Create a context to be canceled as we return, so once we get a good connection, + // we can drop all the other ones. ctx, cancel := context.WithCancel(ctx) defer cancel() // Now, for each candidate, kick off a dial in parallel. type dialResult struct { - conn *ClientConn - err error - addr netip.Addr - priority int - } - resultsCh := make(chan dialResult, len(candidates)) - - var pending atomic.Int32 - pending.Store(int32(len(candidates))) - for _, c := range candidates { - go func(ctx context.Context, c tailcfg.ControlIPCandidate) { - var ( - conn *ClientConn - err error - ) - - // Always send results back to our channel. - defer func() { - resultsCh <- dialResult{conn, err, c.IP, c.Priority} - if pending.Add(-1) == 0 { - close(resultsCh) - } - }() - - // If non-zero, wait the configured start timeout - // before we do anything. - if c.DialStartDelaySec > 0 { - a.logf("[v2] controlhttp: waiting %.2f seconds before dialing %q @ %v", c.DialStartDelaySec, a.Hostname, c.IP) - tmr, tmrChannel := a.clock().NewTimer(time.Duration(c.DialStartDelaySec * float64(time.Second))) - defer tmr.Stop() - select { - case <-ctx.Done(): - err = ctx.Err() - return - case <-tmrChannel: - } - } + conn *ClientConn + err error + } + resultsCh := make(chan dialResult) // unbuffered, never closed - // Now, create a sub-context with the given timeout and - // try dialing the provided host. - ctx, cancel := context.WithTimeout(ctx, time.Duration(c.DialTimeoutSec*float64(time.Second))) - defer cancel() + dialCand := func(cand tailcfg.ControlIPCandidate) (*ClientConn, error) { + if cand.ACEHost != "" { + a.logf("[v2] controlhttp: waited %.2f seconds, dialing %q via ACE %s (%s)", cand.DialStartDelaySec, a.Hostname, cand.ACEHost, cmp.Or(cand.IP.String(), "dns")) + } else { + a.logf("[v2] controlhttp: waited %.2f seconds, dialing %q @ %s", cand.DialStartDelaySec, a.Hostname, cand.IP.String()) + } - // This will dial, and the defer above sends it back to our parent. - a.logf("[v2] controlhttp: trying to dial %q @ %v", a.Hostname, c.IP) - conn, err = a.dialHost(ctx, c.IP) - }(ctx, c) + ctx, cancel := context.WithTimeout(ctx, time.Duration(cand.DialTimeoutSec*float64(time.Second))) + defer cancel() + return a.dialHostOpt(ctx, cand.IP, cand.ACEHost) } - var results []dialResult - for res := range resultsCh { - // If we get a response that has the highest priority, we don't - // need to wait for any of the other connections to finish; we - // can just return this connection. - // - // TODO(andrew): we could make this better by keeping track of - // the highest remaining priority dynamically, instead of just - // checking for the highest total - if res.priority == highestPriority && res.conn != nil { - a.logf("[v1] controlhttp: high-priority success dialing %q @ %v from dial plan", a.Hostname, res.addr) - - // Drain the channel and any existing connections in - // the background. + for _, cand := range candidates { + timer := time.AfterFunc(time.Duration(cand.DialStartDelaySec*float64(time.Second)), func() { go func() { - for _, res := range results { - if res.conn != nil { - res.conn.Close() + conn, err := dialCand(cand) + select { + case resultsCh <- dialResult{conn, err}: + if err == nil { + a.logf("[v1] controlhttp: succeeded dialing %q @ %v from dial plan", a.Hostname, cmp.Or(cand.ACEHost, cand.IP.String())) } - } - for res := range resultsCh { - if res.conn != nil { - res.conn.Close() + case <-ctx.Done(): + if conn != nil { + conn.Close() } } - if a.drainFinished != nil { - close(a.drainFinished) - } }() - return res.conn, nil - } - - // This isn't a highest-priority result, so just store it until - // we're done. - results = append(results, res) + }) + defer timer.Stop() } - // After we finish this function, close any remaining open connections. - defer func() { - for _, result := range results { - // Note: below, we nil out the returned connection (if - // any) in the slice so we don't close it. - if result.conn != nil { - result.conn.Close() + var errs []error + for { + select { + case res := <-resultsCh: + if res.err == nil { + return res.conn, nil } + errs = append(errs, res.err) + if len(errs) == len(candidates) { + // If we get here, then we didn't get anywhere with our dial plan; fall back to just using DNS. + a.logf("controlhttp: failed dialing using DialPlan, falling back to DNS; errs=%s", errors.Join(errs...)) + return a.dialHost(ctx) + } + case <-ctx.Done(): + a.logf("controlhttp: context aborted dialing") + return nil, ctx.Err() } - - // We don't drain asynchronously after this point, so notify our - // channel when we return. - if a.drainFinished != nil { - close(a.drainFinished) - } - }() - - // Sort by priority, then take the first non-error response. - sort.Slice(results, func(i, j int) bool { - // NOTE: intentionally inverted so that the highest priority - // item comes first - return results[i].priority > results[j].priority - }) - - var ( - conn *ClientConn - errs []error - ) - for i, result := range results { - if result.err != nil { - errs = append(errs, result.err) - continue - } - - a.logf("[v1] controlhttp: succeeded dialing %q @ %v from dial plan", a.Hostname, result.addr) - conn = result.conn - results[i].conn = nil // so we don't close it in the defer - return conn, nil - } - if ctx.Err() != nil { - a.logf("controlhttp: context aborted dialing") - return nil, ctx.Err() } - - merr := multierr.New(errs...) - - // If we get here, then we didn't get anywhere with our dial plan; fall back to just using DNS. - a.logf("controlhttp: failed dialing using DialPlan, falling back to DNS; errs=%s", merr.Error()) - return a.dialHost(ctx, netip.Addr{}) } // The TS_FORCE_NOISE_443 envknob forces the controlclient noise dialer to @@ -316,10 +224,19 @@ var debugNoiseDial = envknob.RegisterBool("TS_DEBUG_NOISE_DIAL") // dialHost connects to the configured Dialer.Hostname and upgrades the // connection into a controlbase.Conn. +func (a *Dialer) dialHost(ctx context.Context) (*ClientConn, error) { + return a.dialHostOpt(ctx, + netip.Addr{}, // no pre-resolved IP + "", // don't use ACE + ) +} + +// dialHostOpt connects to the configured Dialer.Hostname and upgrades the +// connection into a controlbase.Conn. // // If optAddr is valid, then no DNS is used and the connection will be made to the // provided address. -func (a *Dialer) dialHost(ctx context.Context, optAddr netip.Addr) (*ClientConn, error) { +func (a *Dialer) dialHostOpt(ctx context.Context, optAddr netip.Addr, optACEHost string) (*ClientConn, error) { // Create one shared context used by both port 80 and port 443 dials. // If port 80 is still in flight when 443 returns, this deferred cancel // will stop the port 80 dial. @@ -341,7 +258,7 @@ func (a *Dialer) dialHost(ctx context.Context, optAddr netip.Addr) (*ClientConn, Host: net.JoinHostPort(a.Hostname, strDef(a.HTTPSPort, "443")), Path: serverUpgradePath, } - if a.HTTPSPort == NoPort { + if a.HTTPSPort == NoPort || optACEHost != "" { u443 = nil } @@ -353,11 +270,11 @@ func (a *Dialer) dialHost(ctx context.Context, optAddr netip.Addr) (*ClientConn, ch := make(chan tryURLRes) // must be unbuffered try := func(u *url.URL) { if debugNoiseDial() { - a.logf("trying noise dial (%v, %v) ...", u, optAddr) + a.logf("trying noise dial (%v, %v) ...", u, cmp.Or(optACEHost, optAddr.String())) } - cbConn, err := a.dialURL(ctx, u, optAddr) + cbConn, err := a.dialURL(ctx, u, optAddr, optACEHost) if debugNoiseDial() { - a.logf("noise dial (%v, %v) = (%v, %v)", u, optAddr, cbConn, err) + a.logf("noise dial (%v, %v) = (%v, %v)", u, cmp.Or(optACEHost, optAddr.String()), cbConn, err) } select { case ch <- tryURLRes{u, cbConn, err}: @@ -388,6 +305,9 @@ func (a *Dialer) dialHost(ctx context.Context, optAddr netip.Addr) (*ClientConn, } var err80, err443 error + if forceTLS { + err80 = errors.New("TLS forced: no port 80 dialed") + } for { select { case <-ctx.Done(): @@ -423,12 +343,12 @@ func (a *Dialer) dialHost(ctx context.Context, optAddr netip.Addr) (*ClientConn, // // If optAddr is valid, then no DNS is used and the connection will be made to the // provided address. -func (a *Dialer) dialURL(ctx context.Context, u *url.URL, optAddr netip.Addr) (*ClientConn, error) { +func (a *Dialer) dialURL(ctx context.Context, u *url.URL, optAddr netip.Addr, optACEHost string) (*ClientConn, error) { init, cont, err := controlbase.ClientDeferred(a.MachineKey, a.ControlKey, a.ProtocolVersion) if err != nil { return nil, err } - netConn, err := a.tryURLUpgrade(ctx, u, optAddr, init) + netConn, err := a.tryURLUpgrade(ctx, u, optAddr, optACEHost, init) if err != nil { return nil, err } @@ -474,13 +394,15 @@ var macOSScreenTime = health.Register(&health.Warnable{ ImpactsConnectivity: true, }) +var HookMakeACEDialer feature.Hook[func(dialer netx.DialFunc, aceHost string, optIP netip.Addr) netx.DialFunc] + // tryURLUpgrade connects to u, and tries to upgrade it to a net.Conn. // // If optAddr is valid, then no DNS is used and the connection will be made to // the provided address. // // Only the provided ctx is used, not a.ctx. -func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Addr, init []byte) (_ net.Conn, retErr error) { +func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Addr, optACEHost string, init []byte) (_ net.Conn, retErr error) { var dns *dnscache.Resolver // If we were provided an address to dial, then create a resolver that just @@ -502,6 +424,17 @@ func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Ad dialer = stdDialer.DialContext } + if optACEHost != "" { + if !buildfeatures.HasACE { + return nil, feature.ErrUnavailable + } + f, ok := HookMakeACEDialer.GetOk() + if !ok { + return nil, feature.ErrUnavailable + } + dialer = f(dialer, optACEHost, optAddr) + } + // On macOS, see if Screen Time is blocking things. if runtime.GOOS == "darwin" { var proxydIntercepted atomic.Bool // intercepted by macOS webfilterproxyd @@ -528,9 +461,21 @@ func (a *Dialer) tryURLUpgrade(ctx context.Context, u *url.URL, optAddr netip.Ad tr := http.DefaultTransport.(*http.Transport).Clone() defer tr.CloseIdleConnections() - tr.Proxy = a.getProxyFunc() - tshttpproxy.SetTransportGetProxyConnectHeader(tr) - tr.DialContext = dnscache.Dialer(dialer, dns) + if optACEHost != "" { + // If using ACE, we don't want to use any HTTP proxy. + // ACE is already a tunnel+proxy. + // TODO(tailscale/corp#32483): use system proxy too? + tr.Proxy = nil + tr.DialContext = dialer + } else { + if buildfeatures.HasUseProxy { + tr.Proxy = a.getProxyFunc() + if set, ok := feature.HookProxySetTransportGetProxyConnectHeader.GetOk(); ok { + set(tr) + } + } + tr.DialContext = dnscache.Dialer(dialer, dns) + } // Disable HTTP2, since h2 can't do protocol switching. tr.TLSClientConfig.NextProtos = []string{} tr.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{} diff --git a/control/controlhttp/constants.go b/control/controlhttp/constants.go index 12038fae45b1c..359410ae9d29c 100644 --- a/control/controlhttp/constants.go +++ b/control/controlhttp/constants.go @@ -78,8 +78,8 @@ type Dialer struct { // dropped. Logf logger.Logf - // NetMon is the [netmon.Monitor] to use for this Dialer. It must be - // non-nil. + // NetMon is the [netmon.Monitor] to use for this Dialer. + // It is optional. NetMon *netmon.Monitor // HealthTracker, if non-nil, is the health tracker to use. @@ -98,7 +98,6 @@ type Dialer struct { logPort80Failure atomic.Bool // For tests only - drainFinished chan struct{} omitCertErrorLogging bool testFallbackDelay time.Duration diff --git a/control/controlhttp/http_test.go b/control/controlhttp/http_test.go index daf262023da97..648b9e5ed88d5 100644 --- a/control/controlhttp/http_test.go +++ b/control/controlhttp/http_test.go @@ -15,19 +15,20 @@ import ( "net/http/httputil" "net/netip" "net/url" - "runtime" "slices" "strconv" + "strings" "sync" "testing" + "testing/synctest" "time" "tailscale.com/control/controlbase" "tailscale.com/control/controlhttp/controlhttpcommon" "tailscale.com/control/controlhttp/controlhttpserver" "tailscale.com/health" + "tailscale.com/net/memnet" "tailscale.com/net/netmon" - "tailscale.com/net/netx" "tailscale.com/net/socks5" "tailscale.com/net/tsdial" "tailscale.com/tailcfg" @@ -35,6 +36,8 @@ import ( "tailscale.com/tstime" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/util/eventbus/eventbustest" + "tailscale.com/util/must" ) type httpTestParam struct { @@ -146,6 +149,8 @@ func testControlHTTP(t *testing.T, param httpTestParam) { proxy := param.proxy client, server := key.NewMachine(), key.NewMachine() + bus := eventbustest.NewBus(t) + const testProtocolVersion = 1 const earlyWriteMsg = "Hello, world!" sch := make(chan serverResult, 1) @@ -215,6 +220,7 @@ func testControlHTTP(t *testing.T, param httpTestParam) { netMon := netmon.NewStatic() dialer := tsdial.NewDialer(netMon) + dialer.SetBus(bus) a := &Dialer{ Hostname: "localhost", HTTPPort: strconv.Itoa(httpLn.Addr().(*net.TCPAddr).Port), @@ -228,7 +234,7 @@ func testControlHTTP(t *testing.T, param httpTestParam) { omitCertErrorLogging: true, testFallbackDelay: fallbackDelay, Clock: clock, - HealthTracker: new(health.Tracker), + HealthTracker: health.NewTracker(eventbustest.NewBus(t)), } if param.httpInDial { @@ -531,6 +537,28 @@ EKTcWGekdmdDPsHloRNtsiCa697B2O9IFA== } } +// slowListener wraps a memnet listener to delay accept operations +type slowListener struct { + net.Listener + delay time.Duration +} + +func (sl *slowListener) Accept() (net.Conn, error) { + // Add delay before accepting connections + timer := time.NewTimer(sl.delay) + defer timer.Stop() + <-timer.C + + return sl.Listener.Accept() +} + +func newSlowListener(inner net.Listener, delay time.Duration) net.Listener { + return &slowListener{ + Listener: inner, + delay: delay, + } +} + func brokenMITMHandler(clock tstime.Clock) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Upgrade", controlhttpcommon.UpgradeHeaderValue) @@ -544,33 +572,102 @@ func brokenMITMHandler(clock tstime.Clock) http.HandlerFunc { } func TestDialPlan(t *testing.T) { - if runtime.GOOS != "linux" { - t.Skip("only works on Linux due to multiple localhost addresses") + testCases := []struct { + name string + plan *tailcfg.ControlDialPlan + want []netip.Addr + allowFallback bool + maxDuration time.Duration + }{ + { + name: "single", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + {IP: netip.MustParseAddr("10.0.0.2"), DialTimeoutSec: 10}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.2")}, + }, + { + name: "broken-then-good", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + {IP: netip.MustParseAddr("10.0.0.10"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.2"), DialTimeoutSec: 10, DialStartDelaySec: 1}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.2")}, + }, + { + name: "multiple-candidates-with-broken", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + // Multiple good IPs plus a broken one + // Should succeed with any of the good ones + {IP: netip.MustParseAddr("10.0.0.10"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.2"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.4"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.3"), DialTimeoutSec: 10}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.2"), netip.MustParseAddr("10.0.0.4"), netip.MustParseAddr("10.0.0.3")}, + }, + { + name: "multiple-candidates-race", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + {IP: netip.MustParseAddr("10.0.0.10"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.3"), DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.2"), DialTimeoutSec: 10}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.3"), netip.MustParseAddr("10.0.0.2")}, + }, + { + name: "fallback", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + {IP: netip.MustParseAddr("10.0.0.10"), DialTimeoutSec: 1}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.1")}, + allowFallback: true, + }, + { + // In tailscale/corp#32534 we discovered that a prior implementation + // of the dial race was waiting for all dials to complete when the + // top priority dial was failing. This delay was long enough that in + // real scenarios the server will close the connection due to + // inactivity, because the client does not send the first inside of + // noise request soon enough. This test is a regression guard + // against that behavior - proving that the dial returns promptly + // even if there is some cause of a slow race. + name: "slow-endpoint-doesnt-block", + plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ + {IP: netip.MustParseAddr("10.0.0.12"), Priority: 5, DialTimeoutSec: 10}, + {IP: netip.MustParseAddr("10.0.0.2"), Priority: 1, DialTimeoutSec: 10}, + }}, + want: []netip.Addr{netip.MustParseAddr("10.0.0.2")}, + maxDuration: 2 * time.Second, // Must complete quickly, not wait for slow endpoint + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + runDialPlanTest(t, tt.plan, tt.want, tt.allowFallback, tt.maxDuration) + }) + }) } +} +func runDialPlanTest(t *testing.T, plan *tailcfg.ControlDialPlan, want []netip.Addr, allowFallback bool, maxDuration time.Duration) { client, server := key.NewMachine(), key.NewMachine() const ( testProtocolVersion = 1 + httpPort = "80" + httpsPort = "443" ) - getRandomPort := func() string { - ln, err := net.Listen("tcp", ":0") - if err != nil { - t.Fatalf("net.Listen: %v", err) - } - defer ln.Close() - _, port, err := net.SplitHostPort(ln.Addr().String()) - if err != nil { - t.Fatal(err) - } - return port - } + memNetwork := &memnet.Network{} - // We need consistent ports for each address; these are chosen - // randomly and we hope that they won't conflict during this test. - httpPort := getRandomPort() - httpsPort := getRandomPort() + fallbackAddr := netip.MustParseAddr("10.0.0.1") + goodAddr := netip.MustParseAddr("10.0.0.2") + otherAddr := netip.MustParseAddr("10.0.0.3") + other2Addr := netip.MustParseAddr("10.0.0.4") + brokenAddr := netip.MustParseAddr("10.0.0.10") + slowAddr := netip.MustParseAddr("10.0.0.12") makeHandler := func(t *testing.T, name string, host netip.Addr, wrap func(http.Handler) http.Handler) { done := make(chan struct{}) @@ -591,17 +688,66 @@ func TestDialPlan(t *testing.T) { handler = wrap(handler) } - httpLn, err := net.Listen("tcp", host.String()+":"+httpPort) + httpLn := must.Get(memNetwork.Listen("tcp", host.String()+":"+httpPort)) + httpsLn := must.Get(memNetwork.Listen("tcp", host.String()+":"+httpsPort)) + + httpServer := &http.Server{Handler: handler} + go httpServer.Serve(httpLn) + t.Cleanup(func() { + httpServer.Close() + }) + + httpsServer := &http.Server{ + Handler: handler, + TLSConfig: tlsConfig(t), + ErrorLog: logger.StdLogger(logger.WithPrefix(t.Logf, "http.Server.ErrorLog: ")), + } + go httpsServer.ServeTLS(httpsLn, "", "") + t.Cleanup(func() { + httpsServer.Close() + }) + } + + // Use synctest's controlled time + clock := tstime.StdClock{} + makeHandler(t, "fallback", fallbackAddr, nil) + makeHandler(t, "good", goodAddr, nil) + makeHandler(t, "other", otherAddr, nil) + makeHandler(t, "other2", other2Addr, nil) + makeHandler(t, "broken", brokenAddr, func(h http.Handler) http.Handler { + return brokenMITMHandler(clock) + }) + // Create slow listener that delays accept by 5 seconds + makeSlowHandler := func(t *testing.T, name string, host netip.Addr, delay time.Duration) { + done := make(chan struct{}) + t.Cleanup(func() { + close(done) + }) + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + conn, err := controlhttpserver.AcceptHTTP(context.Background(), w, r, server, nil) + if err != nil { + log.Print(err) + } else { + defer conn.Close() + } + w.Header().Set("X-Handler-Name", name) + <-done + }) + + httpLn, err := memNetwork.Listen("tcp", host.String()+":"+httpPort) if err != nil { t.Fatalf("HTTP listen: %v", err) } - httpsLn, err := net.Listen("tcp", host.String()+":"+httpsPort) + httpsLn, err := memNetwork.Listen("tcp", host.String()+":"+httpsPort) if err != nil { t.Fatalf("HTTPS listen: %v", err) } + slowHttpLn := newSlowListener(httpLn, delay) + slowHttpsLn := newSlowListener(httpsLn, delay) + httpServer := &http.Server{Handler: handler} - go httpServer.Serve(httpLn) + go httpServer.Serve(slowHttpLn) t.Cleanup(func() { httpServer.Close() }) @@ -611,213 +757,148 @@ func TestDialPlan(t *testing.T) { TLSConfig: tlsConfig(t), ErrorLog: logger.StdLogger(logger.WithPrefix(t.Logf, "http.Server.ErrorLog: ")), } - go httpsServer.ServeTLS(httpsLn, "", "") + go httpsServer.ServeTLS(slowHttpsLn, "", "") t.Cleanup(func() { httpsServer.Close() }) - return } + makeSlowHandler(t, "slow", slowAddr, 5*time.Second) - fallbackAddr := netip.MustParseAddr("127.0.0.1") - goodAddr := netip.MustParseAddr("127.0.0.2") - otherAddr := netip.MustParseAddr("127.0.0.3") - other2Addr := netip.MustParseAddr("127.0.0.4") - brokenAddr := netip.MustParseAddr("127.0.0.10") + // memnetDialer with connection tracking, so we can catch connection leaks. + dialer := &memnetDialer{ + inner: memNetwork.Dial, + t: t, + } + defer dialer.waitForAllClosedSynctest() - testCases := []struct { - name string - plan *tailcfg.ControlDialPlan - wrap func(http.Handler) http.Handler - want netip.Addr + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() - allowFallback bool - }{ - { - name: "single", - plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ - {IP: goodAddr, Priority: 1, DialTimeoutSec: 10}, - }}, - want: goodAddr, - }, - { - name: "broken-then-good", - plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ - // Dials the broken one, which fails, and then - // eventually dials the good one and succeeds - {IP: brokenAddr, Priority: 2, DialTimeoutSec: 10}, - {IP: goodAddr, Priority: 1, DialTimeoutSec: 10, DialStartDelaySec: 1}, - }}, - want: goodAddr, - }, - // TODO(#8442): fix this test - // { - // name: "multiple-priority-fast-path", - // plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ - // // Dials some good IPs and our bad one (which - // // hangs forever), which then hits the fast - // // path where we bail without waiting. - // {IP: brokenAddr, Priority: 1, DialTimeoutSec: 10}, - // {IP: goodAddr, Priority: 1, DialTimeoutSec: 10}, - // {IP: other2Addr, Priority: 1, DialTimeoutSec: 10}, - // {IP: otherAddr, Priority: 2, DialTimeoutSec: 10}, - // }}, - // want: otherAddr, - // }, - { - name: "multiple-priority-slow-path", - plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ - // Our broken address is the highest priority, - // so we don't hit our fast path. - {IP: brokenAddr, Priority: 10, DialTimeoutSec: 10}, - {IP: otherAddr, Priority: 2, DialTimeoutSec: 10}, - {IP: goodAddr, Priority: 1, DialTimeoutSec: 10}, - }}, - want: otherAddr, - }, - { - name: "fallback", - plan: &tailcfg.ControlDialPlan{Candidates: []tailcfg.ControlIPCandidate{ - {IP: brokenAddr, Priority: 1, DialTimeoutSec: 1}, - }}, - want: fallbackAddr, - allowFallback: true, - }, + host := "example.com" + if allowFallback { + host = fallbackAddr.String() + } + bus := eventbustest.NewBus(t) + a := &Dialer{ + Hostname: host, + HTTPPort: httpPort, + HTTPSPort: httpsPort, + MachineKey: client, + ControlKey: server.Public(), + ProtocolVersion: testProtocolVersion, + Dialer: dialer.Dial, + Logf: t.Logf, + DialPlan: plan, + proxyFunc: func(*http.Request) (*url.URL, error) { return nil, nil }, + omitCertErrorLogging: true, + testFallbackDelay: 50 * time.Millisecond, + Clock: clock, + HealthTracker: health.NewTracker(bus), } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - // TODO(awly): replace this with tstest.NewClock and update the - // test to advance the clock correctly. - clock := tstime.StdClock{} - makeHandler(t, "fallback", fallbackAddr, nil) - makeHandler(t, "good", goodAddr, nil) - makeHandler(t, "other", otherAddr, nil) - makeHandler(t, "other2", other2Addr, nil) - makeHandler(t, "broken", brokenAddr, func(h http.Handler) http.Handler { - return brokenMITMHandler(clock) - }) - dialer := closeTrackDialer{ - t: t, - inner: tsdial.NewDialer(netmon.NewStatic()).SystemDial, - conns: make(map[*closeTrackConn]bool), - } - defer dialer.Done() + start := time.Now() + conn, err := a.dial(ctx) + duration := time.Since(start) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() + if err != nil { + t.Fatalf("dialing controlhttp: %v", err) + } + defer conn.Close() - // By default, we intentionally point to something that - // we know won't connect, since we want a fallback to - // DNS to be an error. - host := "example.com" - if tt.allowFallback { - host = "localhost" - } + if maxDuration > 0 && duration > maxDuration { + t.Errorf("dial took %v, expected < %v (should not wait for slow endpoints)", duration, maxDuration) + } - drained := make(chan struct{}) - a := &Dialer{ - Hostname: host, - HTTPPort: httpPort, - HTTPSPort: httpsPort, - MachineKey: client, - ControlKey: server.Public(), - ProtocolVersion: testProtocolVersion, - Dialer: dialer.Dial, - Logf: t.Logf, - DialPlan: tt.plan, - proxyFunc: func(*http.Request) (*url.URL, error) { return nil, nil }, - drainFinished: drained, - omitCertErrorLogging: true, - testFallbackDelay: 50 * time.Millisecond, - Clock: clock, - HealthTracker: new(health.Tracker), - } + raddr := conn.RemoteAddr() + raddrStr := raddr.String() - conn, err := a.dial(ctx) - if err != nil { - t.Fatalf("dialing controlhttp: %v", err) - } - defer conn.Close() - - raddr := conn.RemoteAddr().(*net.TCPAddr) + // split on "|" first to remove memnet pipe suffix + addrPart := raddrStr + if idx := strings.Index(raddrStr, "|"); idx >= 0 { + addrPart = raddrStr[:idx] + } - got, ok := netip.AddrFromSlice(raddr.IP) - if !ok { - t.Errorf("invalid remote IP: %v", raddr.IP) - } else if got != tt.want { - t.Errorf("got connection from %q; want %q", got, tt.want) - } else { - t.Logf("successfully connected to %q", raddr.String()) - } + host, _, err2 := net.SplitHostPort(addrPart) + if err2 != nil { + t.Fatalf("failed to parse remote address %q: %v", addrPart, err2) + } - // Wait until our dialer drains so we can verify that - // all connections are closed. - <-drained - }) + got, err3 := netip.ParseAddr(host) + if err3 != nil { + t.Errorf("invalid remote IP: %v", host) + } else { + found := slices.Contains(want, got) + if !found { + t.Errorf("got connection from %q; want one of %v", got, want) + } else { + t.Logf("successfully connected to %q", raddr.String()) + } } } -type closeTrackDialer struct { - t testing.TB - inner netx.DialFunc +// memnetDialer wraps memnet.Network.Dial to track connections for testing +type memnetDialer struct { + inner func(ctx context.Context, network, addr string) (net.Conn, error) + t *testing.T mu sync.Mutex - conns map[*closeTrackConn]bool + conns map[net.Conn]string // conn -> remote address for debugging } -func (d *closeTrackDialer) Dial(ctx context.Context, network, addr string) (net.Conn, error) { - c, err := d.inner(ctx, network, addr) +func (d *memnetDialer) Dial(ctx context.Context, network, addr string) (net.Conn, error) { + conn, err := d.inner(ctx, network, addr) if err != nil { return nil, err } - ct := &closeTrackConn{Conn: c, d: d} d.mu.Lock() - d.conns[ct] = true + if d.conns == nil { + d.conns = make(map[net.Conn]string) + } + d.conns[conn] = conn.RemoteAddr().String() + d.t.Logf("tracked connection opened to %s", conn.RemoteAddr()) d.mu.Unlock() - return ct, nil + + return &memnetTrackedConn{Conn: conn, dialer: d}, nil } -func (d *closeTrackDialer) Done() { - // Unfortunately, tsdial.Dialer.SystemDial closes connections - // asynchronously in a goroutine, so we can't assume that everything is - // closed by the time we get here. - // - // Sleep/wait a few times on the assumption that things will close - // "eventually". - const iters = 100 - for i := range iters { +func (d *memnetDialer) waitForAllClosedSynctest() { + const maxWait = 15 * time.Second + const checkInterval = 100 * time.Millisecond + + for range int(maxWait / checkInterval) { d.mu.Lock() - if len(d.conns) == 0 { + remaining := len(d.conns) + if remaining == 0 { d.mu.Unlock() return } + d.mu.Unlock() - // Only error on last iteration - if i != iters-1 { - d.mu.Unlock() - time.Sleep(100 * time.Millisecond) - continue - } + time.Sleep(checkInterval) + } - for conn := range d.conns { - d.t.Errorf("expected close of conn %p; RemoteAddr=%q", conn, conn.RemoteAddr().String()) - } - d.mu.Unlock() + d.mu.Lock() + defer d.mu.Unlock() + for _, addr := range d.conns { + d.t.Errorf("connection to %s was not closed after %v", addr, maxWait) } } -func (d *closeTrackDialer) noteClose(c *closeTrackConn) { +func (d *memnetDialer) noteClose(conn net.Conn) { d.mu.Lock() - delete(d.conns, c) // safe if already deleted + if addr, exists := d.conns[conn]; exists { + d.t.Logf("tracked connection closed to %s", addr) + delete(d.conns, conn) + } d.mu.Unlock() } -type closeTrackConn struct { +type memnetTrackedConn struct { net.Conn - d *closeTrackDialer + dialer *memnetDialer } -func (c *closeTrackConn) Close() error { - c.d.noteClose(c) +func (c *memnetTrackedConn) Close() error { + c.dialer.noteClose(c.Conn) return c.Conn.Close() } diff --git a/control/controlknobs/controlknobs.go b/control/controlknobs/controlknobs.go index 2578744cade65..09c16b8b12f1e 100644 --- a/control/controlknobs/controlknobs.go +++ b/control/controlknobs/controlknobs.go @@ -62,8 +62,9 @@ type Knobs struct { // netfiltering, unless overridden by the user. LinuxForceNfTables atomic.Bool - // SeamlessKeyRenewal is whether to enable the alpha functionality of - // renewing node keys without breaking connections. + // SeamlessKeyRenewal is whether to renew node keys without breaking connections. + // This is enabled by default in 1.90 and later, but we but we can remotely disable + // it from the control plane if there's a problem. // http://go/seamless-key-renewal SeamlessKeyRenewal atomic.Bool @@ -128,6 +129,7 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) { forceIPTables = has(tailcfg.NodeAttrLinuxMustUseIPTables) forceNfTables = has(tailcfg.NodeAttrLinuxMustUseNfTables) seamlessKeyRenewal = has(tailcfg.NodeAttrSeamlessKeyRenewal) + disableSeamlessKeyRenewal = has(tailcfg.NodeAttrDisableSeamlessKeyRenewal) probeUDPLifetime = has(tailcfg.NodeAttrProbeUDPLifetime) appCStoreRoutes = has(tailcfg.NodeAttrStoreAppCRoutes) userDialUseRoutes = has(tailcfg.NodeAttrUserDialUseRoutes) @@ -154,7 +156,6 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) { k.SilentDisco.Store(silentDisco) k.LinuxForceIPTables.Store(forceIPTables) k.LinuxForceNfTables.Store(forceNfTables) - k.SeamlessKeyRenewal.Store(seamlessKeyRenewal) k.ProbeUDPLifetime.Store(probeUDPLifetime) k.AppCStoreRoutes.Store(appCStoreRoutes) k.UserDialUseRoutes.Store(userDialUseRoutes) @@ -162,6 +163,21 @@ func (k *Knobs) UpdateFromNodeAttributes(capMap tailcfg.NodeCapMap) { k.DisableLocalDNSOverrideViaNRPT.Store(disableLocalDNSOverrideViaNRPT) k.DisableCaptivePortalDetection.Store(disableCaptivePortalDetection) k.DisableSkipStatusQueue.Store(disableSkipStatusQueue) + + // If both attributes are present, then "enable" should win. This reflects + // the history of seamless key renewal. + // + // Before 1.90, seamless was a private alpha, opt-in feature. Devices would + // only seamless do if customers opted in using the seamless renewal attr. + // + // In 1.90 and later, seamless is the default behaviour, and devices will use + // seamless unless explicitly told not to by control (e.g. if we discover + // a bug and want clients to use the prior behaviour). + // + // If a customer has opted in to the pre-1.90 seamless implementation, we + // don't want to switch it off for them -- we only want to switch it off for + // devices that haven't opted in. + k.SeamlessKeyRenewal.Store(seamlessKeyRenewal || !disableSeamlessKeyRenewal) } // AsDebugJSON returns k as something that can be marshalled with json.Marshal diff --git a/control/ts2021/client.go b/control/ts2021/client.go new file mode 100644 index 0000000000000..ca10b1d1b5bc6 --- /dev/null +++ b/control/ts2021/client.go @@ -0,0 +1,312 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ts2021 + +import ( + "bytes" + "cmp" + "context" + "encoding/json" + "errors" + "fmt" + "log" + "math" + "net" + "net/http" + "net/netip" + "net/url" + "sync" + "time" + + "tailscale.com/control/controlhttp" + "tailscale.com/health" + "tailscale.com/net/dnscache" + "tailscale.com/net/netmon" + "tailscale.com/net/tsdial" + "tailscale.com/tailcfg" + "tailscale.com/tstime" + "tailscale.com/types/key" + "tailscale.com/types/logger" + "tailscale.com/util/mak" + "tailscale.com/util/set" +) + +// Client provides a http.Client to connect to tailcontrol over +// the ts2021 protocol. +type Client struct { + // Client is an HTTP client to talk to the coordination server. + // It automatically makes a new Noise connection as needed. + *http.Client + + logf logger.Logf // non-nil + opts ClientOpts + host string // the host part of serverURL + httpPort string // the default port to dial + httpsPort string // the fallback Noise-over-https port or empty if none + + // mu protects the following + mu sync.Mutex + closed bool + connPool set.HandleSet[*Conn] // all live connections +} + +// ClientOpts contains options for the [NewClient] function. All fields are +// required unless otherwise specified. +type ClientOpts struct { + // ServerURL is the URL of the server to connect to. + ServerURL string + + // PrivKey is this node's private key. + PrivKey key.MachinePrivate + + // ServerPubKey is the public key of the server. + // It is of the form https://: (no trailing slash). + ServerPubKey key.MachinePublic + + // Dialer's SystemDial function is used to connect to the server. + Dialer *tsdial.Dialer + + // Optional fields follow + + // Logf is the log function to use. + // If nil, log.Printf is used. + Logf logger.Logf + + // NetMon is the network monitor that will be used to get the + // network interface state. This field can be nil; if so, the current + // state will be looked up dynamically. + NetMon *netmon.Monitor + + // DNSCache is the caching Resolver to use to connect to the server. + // + // This field can be nil. + DNSCache *dnscache.Resolver + + // HealthTracker, if non-nil, is the health tracker to use. + HealthTracker *health.Tracker + + // DialPlan, if set, is a function that should return an explicit plan + // on how to connect to the server. + DialPlan func() *tailcfg.ControlDialPlan + + // ProtocolVersion, if non-zero, specifies an alternate + // protocol version to use instead of the default, + // of [tailcfg.CurrentCapabilityVersion]. + ProtocolVersion uint16 +} + +// NewClient returns a new noiseClient for the provided server and machine key. +// +// netMon may be nil, if non-nil it's used to do faster interface lookups. +// dialPlan may be nil +func NewClient(opts ClientOpts) (*Client, error) { + logf := opts.Logf + if logf == nil { + logf = log.Printf + } + if opts.ServerURL == "" { + return nil, errors.New("ServerURL is required") + } + if opts.PrivKey.IsZero() { + return nil, errors.New("PrivKey is required") + } + if opts.ServerPubKey.IsZero() { + return nil, errors.New("ServerPubKey is required") + } + if opts.Dialer == nil { + return nil, errors.New("Dialer is required") + } + + u, err := url.Parse(opts.ServerURL) + if err != nil { + return nil, fmt.Errorf("invalid ClientOpts.ServerURL: %w", err) + } + if u.Scheme != "http" && u.Scheme != "https" { + return nil, errors.New("invalid ServerURL scheme, must be http or https") + } + + httpPort, httpsPort := "80", "443" + addr, _ := netip.ParseAddr(u.Hostname()) + isPrivateHost := addr.IsPrivate() || addr.IsLoopback() || u.Hostname() == "localhost" + if port := u.Port(); port != "" { + // If there is an explicit port specified, entirely rely on the scheme, + // unless it's http with a private host in which case we never try using HTTPS. + if u.Scheme == "https" { + httpPort = "" + httpsPort = port + } else if u.Scheme == "http" { + httpPort = port + httpsPort = "443" + if isPrivateHost { + logf("setting empty HTTPS port with http scheme and private host %s", u.Hostname()) + httpsPort = "" + } + } + } else if u.Scheme == "http" && isPrivateHost { + // Whenever the scheme is http and the hostname is an IP address, do not set the HTTPS port, + // as there cannot be a TLS certificate issued for an IP, unless it's a public IP. + httpPort = "80" + httpsPort = "" + } + + np := &Client{ + opts: opts, + host: u.Hostname(), + httpPort: httpPort, + httpsPort: httpsPort, + logf: logf, + } + + tr := &http.Transport{ + Protocols: new(http.Protocols), + MaxConnsPerHost: 1, + } + // We force only HTTP/2 for this transport, which is what the control server + // speaks inside the ts2021 Noise encryption. But Go doesn't know about that, + // so we use "SetUnencryptedHTTP2" even though it's actually encrypted. + tr.Protocols.SetUnencryptedHTTP2(true) + tr.DialTLSContext = func(ctx context.Context, network, addr string) (net.Conn, error) { + return np.dial(ctx) + } + + np.Client = &http.Client{Transport: tr} + return np, nil +} + +// Close closes all the underlying noise connections. +// It is a no-op and returns nil if the connection is already closed. +func (nc *Client) Close() error { + nc.mu.Lock() + live := nc.connPool + nc.closed = true + nc.connPool = nil // stop noteConnClosed from mutating it as we loop over it (in live) below + nc.mu.Unlock() + + for _, c := range live { + c.Close() + } + nc.Client.CloseIdleConnections() + + return nil +} + +// dial opens a new connection to tailcontrol, fetching the server noise key +// if not cached. +func (nc *Client) dial(ctx context.Context) (*Conn, error) { + if tailcfg.CurrentCapabilityVersion > math.MaxUint16 { + // Panic, because a test should have started failing several + // thousand version numbers before getting to this point. + panic("capability version is too high to fit in the wire protocol") + } + + var dialPlan *tailcfg.ControlDialPlan + if nc.opts.DialPlan != nil { + dialPlan = nc.opts.DialPlan() + } + + // If we have a dial plan, then set our timeout as slightly longer than + // the maximum amount of time contained therein; we assume that + // explicit instructions on timeouts are more useful than a single + // hard-coded timeout. + // + // The default value of 5 is chosen so that, when there's no dial plan, + // we retain the previous behaviour of 10 seconds end-to-end timeout. + timeoutSec := 5.0 + if dialPlan != nil { + for _, c := range dialPlan.Candidates { + if v := c.DialStartDelaySec + c.DialTimeoutSec; v > timeoutSec { + timeoutSec = v + } + } + } + + // After we establish a connection, we need some time to actually + // upgrade it into a Noise connection. With a ballpark worst-case RTT + // of 1000ms, give ourselves an extra 5 seconds to complete the + // handshake. + timeoutSec += 5 + + // Be extremely defensive and ensure that the timeout is in the range + // [5, 60] seconds (e.g. if we accidentally get a negative number). + if timeoutSec > 60 { + timeoutSec = 60 + } else if timeoutSec < 5 { + timeoutSec = 5 + } + + timeout := time.Duration(timeoutSec * float64(time.Second)) + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + chd := &controlhttp.Dialer{ + Hostname: nc.host, + HTTPPort: nc.httpPort, + HTTPSPort: cmp.Or(nc.httpsPort, controlhttp.NoPort), + MachineKey: nc.opts.PrivKey, + ControlKey: nc.opts.ServerPubKey, + ProtocolVersion: cmp.Or(nc.opts.ProtocolVersion, uint16(tailcfg.CurrentCapabilityVersion)), + Dialer: nc.opts.Dialer.SystemDial, + DNSCache: nc.opts.DNSCache, + DialPlan: dialPlan, + Logf: nc.logf, + NetMon: nc.opts.NetMon, + HealthTracker: nc.opts.HealthTracker, + Clock: tstime.StdClock{}, + } + clientConn, err := chd.Dial(ctx) + if err != nil { + return nil, err + } + + nc.mu.Lock() + + handle := set.NewHandle() + ncc := NewConn(clientConn.Conn, func() { nc.noteConnClosed(handle) }) + mak.Set(&nc.connPool, handle, ncc) + + if nc.closed { + nc.mu.Unlock() + ncc.Close() // Needs to be called without holding the lock. + return nil, errors.New("noise client closed") + } + + defer nc.mu.Unlock() + return ncc, nil +} + +// noteConnClosed notes that the *Conn with the given handle has closed and +// should be removed from the live connPool (which is usually of size 0 or 1, +// except perhaps briefly 2 during a network failure and reconnect). +func (nc *Client) noteConnClosed(handle set.Handle) { + nc.mu.Lock() + defer nc.mu.Unlock() + nc.connPool.Delete(handle) +} + +// post does a POST to the control server at the given path, JSON-encoding body. +// The provided nodeKey is an optional load balancing hint. +func (nc *Client) Post(ctx context.Context, path string, nodeKey key.NodePublic, body any) (*http.Response, error) { + return nc.DoWithBody(ctx, "POST", path, nodeKey, body) +} + +func (nc *Client) DoWithBody(ctx context.Context, method, path string, nodeKey key.NodePublic, body any) (*http.Response, error) { + jbody, err := json.Marshal(body) + if err != nil { + return nil, err + } + req, err := http.NewRequestWithContext(ctx, method, "https://"+nc.host+path, bytes.NewReader(jbody)) + if err != nil { + return nil, err + } + AddLBHeader(req, nodeKey) + req.Header.Set("Content-Type", "application/json") + return nc.Do(req) +} + +// AddLBHeader adds the load balancer header to req if nodeKey is non-zero. +func AddLBHeader(req *http.Request, nodeKey key.NodePublic) { + if !nodeKey.IsZero() { + req.Header.Add(tailcfg.LBHeader, nodeKey.String()) + } +} diff --git a/control/controlclient/noise_test.go b/control/ts2021/client_test.go similarity index 80% rename from control/controlclient/noise_test.go rename to control/ts2021/client_test.go index 4904016f2f082..72fa1f44264c3 100644 --- a/control/controlclient/noise_test.go +++ b/control/ts2021/client_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package controlclient +package ts2021 import ( "context" @@ -10,18 +10,20 @@ import ( "io" "math" "net/http" + "net/http/httptrace" + "sync/atomic" "testing" "time" "golang.org/x/net/http2" "tailscale.com/control/controlhttp/controlhttpserver" - "tailscale.com/internal/noiseconn" "tailscale.com/net/netmon" "tailscale.com/net/tsdial" "tailscale.com/tailcfg" "tailscale.com/tstest/nettest" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/util/must" ) // maxAllowedNoiseVersion is the highest we expect the Tailscale @@ -54,14 +56,23 @@ func TestNoiseClientHTTP2Upgrade_earlyPayload(t *testing.T) { }.run(t) } -func makeClientWithURL(t *testing.T, url string) *NoiseClient { - nc, err := NewNoiseClient(NoiseOpts{ - Logf: t.Logf, - ServerURL: url, +var ( + testPrivKey = key.NewMachine() + testServerPub = key.NewMachine().Public() +) + +func makeClientWithURL(t *testing.T, url string) *Client { + nc, err := NewClient(ClientOpts{ + Logf: t.Logf, + PrivKey: testPrivKey, + ServerPubKey: testServerPub, + ServerURL: url, + Dialer: tsdial.NewDialer(netmon.NewStatic()), }) if err != nil { t.Fatal(err) } + t.Cleanup(func() { nc.Close() }) return nc } @@ -198,7 +209,7 @@ func (tt noiseClientTest) run(t *testing.T) { dialer.SetSystemDialerForTest(nw.Dial) } - nc, err := NewNoiseClient(NoiseOpts{ + nc, err := NewClient(ClientOpts{ PrivKey: clientPrivate, ServerPubKey: serverPrivate.Public(), ServerURL: hs.URL, @@ -209,28 +220,39 @@ func (tt noiseClientTest) run(t *testing.T) { t.Fatal(err) } - // Get a conn and verify it read its early payload before the http/2 - // handshake. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - c, err := nc.getConn(ctx) - if err != nil { - t.Fatal(err) - } - payload, err := c.GetEarlyPayload(ctx) - if err != nil { - t.Fatal("timed out waiting for didReadHeaderCh") - } + var sawConn atomic.Bool + trace := httptrace.WithClientTrace(t.Context(), &httptrace.ClientTrace{ + GotConn: func(ci httptrace.GotConnInfo) { + ncc, ok := ci.Conn.(*Conn) + if !ok { + // This trace hook sees two dials: the lower-level controlhttp upgrade's + // dial (a tsdial.sysConn), and then the *ts2021.Conn we want. + // Ignore the first one. + return + } + sawConn.Store(true) - gotNonNil := payload != nil - if gotNonNil != tt.sendEarlyPayload { - t.Errorf("sendEarlyPayload = %v but got earlyPayload = %T", tt.sendEarlyPayload, payload) - } - if payload != nil { - if payload.NodeKeyChallenge != chalPrivate.Public() { - t.Errorf("earlyPayload.NodeKeyChallenge = %v; want %v", payload.NodeKeyChallenge, chalPrivate.Public()) - } - } + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) + defer cancel() + + payload, err := ncc.GetEarlyPayload(ctx) + if err != nil { + t.Errorf("GetEarlyPayload: %v", err) + return + } + + gotNonNil := payload != nil + if gotNonNil != tt.sendEarlyPayload { + t.Errorf("sendEarlyPayload = %v but got earlyPayload = %T", tt.sendEarlyPayload, payload) + } + if payload != nil { + if payload.NodeKeyChallenge != chalPrivate.Public() { + t.Errorf("earlyPayload.NodeKeyChallenge = %v; want %v", payload.NodeKeyChallenge, chalPrivate.Public()) + } + } + }, + }) + req := must.Get(http.NewRequestWithContext(trace, "GET", "https://unused.example/", nil)) checkRes := func(t *testing.T, res *http.Response) { t.Helper() @@ -244,15 +266,19 @@ func (tt noiseClientTest) run(t *testing.T) { } } - // And verify we can do HTTP/2 against that conn. - res, err := (&http.Client{Transport: c}).Get("https://unused.example/") + // Verify we can do HTTP/2 against that conn. + res, err := nc.Do(req) if err != nil { t.Fatal(err) } checkRes(t, res) + if !sawConn.Load() { + t.Error("ClientTrace.GotConn never saw the *ts2021.Conn") + } + // And try using the high-level nc.post API as well. - res, err = nc.post(context.Background(), "/", key.NodePublic{}, nil) + res, err = nc.Post(context.Background(), "/", key.NodePublic{}, nil) if err != nil { t.Fatal(err) } @@ -307,7 +333,7 @@ func (up *Upgrader) ServeHTTP(w http.ResponseWriter, r *http.Request) { // https://httpwg.org/specs/rfc7540.html#rfc.section.4.1 (Especially not // an HTTP/2 settings frame, which isn't of type 'T') var notH2Frame [5]byte - copy(notH2Frame[:], noiseconn.EarlyPayloadMagic) + copy(notH2Frame[:], EarlyPayloadMagic) var lenBuf [4]byte binary.BigEndian.PutUint32(lenBuf[:], uint32(len(earlyJSON))) // These writes are all buffered by caller, so fine to do them diff --git a/internal/noiseconn/conn.go b/control/ts2021/conn.go similarity index 69% rename from internal/noiseconn/conn.go rename to control/ts2021/conn.go index 7476b7ecc5a6a..52d663272a8c6 100644 --- a/internal/noiseconn/conn.go +++ b/control/ts2021/conn.go @@ -1,12 +1,10 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Package noiseconn contains an internal-only wrapper around controlbase.Conn -// that properly handles the early payload sent by the server before the HTTP/2 -// session begins. -// -// See the documentation on the Conn type for more details. -package noiseconn +// Package ts2021 handles the details of the Tailscale 2021 control protocol +// that are after (above) the Noise layer. In particular, the +// "tailcfg.EarlyNoise" message and the subsequent HTTP/2 connection. +package ts2021 import ( "bytes" @@ -15,10 +13,8 @@ import ( "encoding/json" "errors" "io" - "net/http" "sync" - "golang.org/x/net/http2" "tailscale.com/control/controlbase" "tailscale.com/tailcfg" ) @@ -29,12 +25,13 @@ import ( // the pool when the connection is closed, properly handles an optional "early // payload" that's sent prior to beginning the HTTP/2 session, and provides a // way to return a connection to a pool when the connection is closed. +// +// Use [NewConn] to build a new Conn if you want [Conn.GetEarlyPayload] to work. +// Otherwise making a Conn directly, only setting Conn, is fine. type Conn struct { *controlbase.Conn - id int - onClose func(int) - h2cc *http2.ClientConn + onClose func() // or nil readHeaderOnce sync.Once // guards init of reader field reader io.Reader // (effectively Conn.Reader after header) earlyPayloadReady chan struct{} // closed after earlyPayload is set (including set to nil) @@ -42,31 +39,19 @@ type Conn struct { earlyPayloadErr error } -// New creates a new Conn that wraps the given controlbase.Conn. +// NewConn creates a new Conn that wraps the given controlbase.Conn. // // h2t is the HTTP/2 transport to use for the connection; a new // http2.ClientConn will be created that reads from the returned Conn. // // connID should be a unique ID for this connection. When the Conn is closed, -// the onClose function will be called with the connID if it is non-nil. -func New(conn *controlbase.Conn, h2t *http2.Transport, connID int, onClose func(int)) (*Conn, error) { - ncc := &Conn{ +// the onClose function will be called if it is non-nil. +func NewConn(conn *controlbase.Conn, onClose func()) *Conn { + return &Conn{ Conn: conn, - id: connID, - onClose: onClose, earlyPayloadReady: make(chan struct{}), + onClose: sync.OnceFunc(onClose), } - h2cc, err := h2t.NewClientConn(ncc) - if err != nil { - return nil, err - } - ncc.h2cc = h2cc - return ncc, nil -} - -// RoundTrip implements the http.RoundTripper interface. -func (c *Conn) RoundTrip(r *http.Request) (*http.Response, error) { - return c.h2cc.RoundTrip(r) } // GetEarlyPayload waits for the early Noise payload to arrive. @@ -76,6 +61,15 @@ func (c *Conn) RoundTrip(r *http.Request) (*http.Response, error) { // early Noise payload is ready (if any) and will return the same result for // the lifetime of the Conn. func (c *Conn) GetEarlyPayload(ctx context.Context) (*tailcfg.EarlyNoise, error) { + if c.earlyPayloadReady == nil { + return nil, errors.New("Conn was not created with NewConn; early payload not supported") + } + select { + case <-c.earlyPayloadReady: + return c.earlyPayload, c.earlyPayloadErr + default: + go c.readHeaderOnce.Do(c.readHeader) + } select { case <-c.earlyPayloadReady: return c.earlyPayload, c.earlyPayloadErr @@ -84,28 +78,6 @@ func (c *Conn) GetEarlyPayload(ctx context.Context) (*tailcfg.EarlyNoise, error) } } -// ReserveNewRequest will reserve a new concurrent request on the connection. -// -// It returns whether the reservation was successful, and any early Noise -// payload if present. If a reservation was not successful, it will return -// false and nil for the early payload. -func (c *Conn) ReserveNewRequest(ctx context.Context) (bool, *tailcfg.EarlyNoise, error) { - earlyPayloadMaybeNil, err := c.GetEarlyPayload(ctx) - if err != nil { - return false, nil, err - } - if c.h2cc.ReserveNewRequest() { - return true, earlyPayloadMaybeNil, nil - } - return false, nil, nil -} - -// CanTakeNewRequest reports whether the underlying HTTP/2 connection can take -// a new request, meaning it has not been closed or received or sent a GOAWAY. -func (c *Conn) CanTakeNewRequest() bool { - return c.h2cc.CanTakeNewRequest() -} - // The first 9 bytes from the server to client over Noise are either an HTTP/2 // settings frame (a normal HTTP/2 setup) or, as we added later, an "early payload" // header that's also 9 bytes long: 5 bytes (EarlyPayloadMagic) followed by 4 bytes @@ -133,6 +105,14 @@ func (c *Conn) Read(p []byte) (n int, err error) { return c.reader.Read(p) } +// Close closes the connection. +func (c *Conn) Close() error { + if c.onClose != nil { + defer c.onClose() + } + return c.Conn.Close() +} + // readHeader reads the optional "early payload" from the server that arrives // after the Noise handshake but before the HTTP/2 session begins. // @@ -140,7 +120,9 @@ func (c *Conn) Read(p []byte) (n int, err error) { // c.earlyPayload, closing c.earlyPayloadReady, and initializing c.reader for // future reads. func (c *Conn) readHeader() { - defer close(c.earlyPayloadReady) + if c.earlyPayloadReady != nil { + defer close(c.earlyPayloadReady) + } setErr := func(err error) { c.reader = returnErrReader{err} @@ -174,14 +156,3 @@ func (c *Conn) readHeader() { } c.reader = c.Conn } - -// Close closes the connection. -func (c *Conn) Close() error { - if err := c.Conn.Close(); err != nil { - return err - } - if c.onClose != nil { - c.onClose(c.id) - } - return nil -} diff --git a/derp/client_test.go b/derp/client_test.go new file mode 100644 index 0000000000000..a731ad197f1e7 --- /dev/null +++ b/derp/client_test.go @@ -0,0 +1,235 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package derp + +import ( + "bufio" + "bytes" + "io" + "net" + "reflect" + "sync" + "testing" + "time" + + "tailscale.com/tstest" + "tailscale.com/types/key" +) + +type dummyNetConn struct { + net.Conn +} + +func (dummyNetConn) SetReadDeadline(time.Time) error { return nil } + +func TestClientRecv(t *testing.T) { + tests := []struct { + name string + input []byte + want any + }{ + { + name: "ping", + input: []byte{ + byte(FramePing), 0, 0, 0, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + }, + want: PingMessage{1, 2, 3, 4, 5, 6, 7, 8}, + }, + { + name: "pong", + input: []byte{ + byte(FramePong), 0, 0, 0, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + }, + want: PongMessage{1, 2, 3, 4, 5, 6, 7, 8}, + }, + { + name: "health_bad", + input: []byte{ + byte(FrameHealth), 0, 0, 0, 3, + byte('B'), byte('A'), byte('D'), + }, + want: HealthMessage{Problem: "BAD"}, + }, + { + name: "health_ok", + input: []byte{ + byte(FrameHealth), 0, 0, 0, 0, + }, + want: HealthMessage{}, + }, + { + name: "server_restarting", + input: []byte{ + byte(FrameRestarting), 0, 0, 0, 8, + 0, 0, 0, 1, + 0, 0, 0, 2, + }, + want: ServerRestartingMessage{ + ReconnectIn: 1 * time.Millisecond, + TryFor: 2 * time.Millisecond, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Client{ + nc: dummyNetConn{}, + br: bufio.NewReader(bytes.NewReader(tt.input)), + logf: t.Logf, + clock: &tstest.Clock{}, + } + got, err := c.Recv() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("got %#v; want %#v", got, tt.want) + } + }) + } +} + +func TestClientSendPing(t *testing.T) { + var buf bytes.Buffer + c := &Client{ + bw: bufio.NewWriter(&buf), + } + if err := c.SendPing([8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil { + t.Fatal(err) + } + want := []byte{ + byte(FramePing), 0, 0, 0, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + } + if !bytes.Equal(buf.Bytes(), want) { + t.Errorf("unexpected output\nwrote: % 02x\n want: % 02x", buf.Bytes(), want) + } +} + +func TestClientSendPong(t *testing.T) { + var buf bytes.Buffer + c := &Client{ + bw: bufio.NewWriter(&buf), + } + if err := c.SendPong([8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil { + t.Fatal(err) + } + want := []byte{ + byte(FramePong), 0, 0, 0, 8, + 1, 2, 3, 4, 5, 6, 7, 8, + } + if !bytes.Equal(buf.Bytes(), want) { + t.Errorf("unexpected output\nwrote: % 02x\n want: % 02x", buf.Bytes(), want) + } +} + +func BenchmarkWriteUint32(b *testing.B) { + w := bufio.NewWriter(io.Discard) + b.ReportAllocs() + b.ResetTimer() + for range b.N { + writeUint32(w, 0x0ba3a) + } +} + +type nopRead struct{} + +func (r nopRead) Read(p []byte) (int, error) { + return len(p), nil +} + +var sinkU32 uint32 + +func BenchmarkReadUint32(b *testing.B) { + r := bufio.NewReader(nopRead{}) + var err error + b.ReportAllocs() + b.ResetTimer() + for range b.N { + sinkU32, err = readUint32(r) + if err != nil { + b.Fatal(err) + } + } +} + +type countWriter struct { + mu sync.Mutex + writes int + bytes int64 +} + +func (w *countWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + w.writes++ + w.bytes += int64(len(p)) + return len(p), nil +} + +func (w *countWriter) Stats() (writes int, bytes int64) { + w.mu.Lock() + defer w.mu.Unlock() + return w.writes, w.bytes +} + +func (w *countWriter) ResetStats() { + w.mu.Lock() + defer w.mu.Unlock() + w.writes, w.bytes = 0, 0 +} + +func TestClientSendRateLimiting(t *testing.T) { + cw := new(countWriter) + c := &Client{ + bw: bufio.NewWriter(cw), + clock: &tstest.Clock{}, + } + c.setSendRateLimiter(ServerInfoMessage{}) + + pkt := make([]byte, 1000) + if err := c.send(key.NodePublic{}, pkt); err != nil { + t.Fatal(err) + } + writes1, bytes1 := cw.Stats() + if writes1 != 1 { + t.Errorf("writes = %v, want 1", writes1) + } + + // Flood should all succeed. + cw.ResetStats() + for range 1000 { + if err := c.send(key.NodePublic{}, pkt); err != nil { + t.Fatal(err) + } + } + writes1K, bytes1K := cw.Stats() + if writes1K != 1000 { + t.Logf("writes = %v; want 1000", writes1K) + } + if got, want := bytes1K, bytes1*1000; got != want { + t.Logf("bytes = %v; want %v", got, want) + } + + // Set a rate limiter + cw.ResetStats() + c.setSendRateLimiter(ServerInfoMessage{ + TokenBucketBytesPerSecond: 1, + TokenBucketBytesBurst: int(bytes1 * 2), + }) + for range 1000 { + if err := c.send(key.NodePublic{}, pkt); err != nil { + t.Fatal(err) + } + } + writesLimited, bytesLimited := cw.Stats() + if writesLimited == 0 || writesLimited == writes1K { + t.Errorf("limited conn's write count = %v; want non-zero, less than 1k", writesLimited) + } + if bytesLimited < bytes1*2 || bytesLimited >= bytes1K { + t.Errorf("limited conn's bytes count = %v; want >=%v, <%v", bytesLimited, bytes1K*2, bytes1K) + } +} diff --git a/derp/derp.go b/derp/derp.go index 24c1ca65cfb3c..e19a99b0025ce 100644 --- a/derp/derp.go +++ b/derp/derp.go @@ -27,15 +27,15 @@ import ( // including its on-wire framing overhead) const MaxPacketSize = 64 << 10 -// magic is the DERP magic number, sent in the frameServerKey frame +// Magic is the DERP Magic number, sent in the frameServerKey frame // upon initial connection. -const magic = "DERP🔑" // 8 bytes: 0x44 45 52 50 f0 9f 94 91 +const Magic = "DERP🔑" // 8 bytes: 0x44 45 52 50 f0 9f 94 91 const ( - nonceLen = 24 - frameHeaderLen = 1 + 4 // frameType byte + 4 byte length - keyLen = 32 - maxInfoLen = 1 << 20 + NonceLen = 24 + FrameHeaderLen = 1 + 4 // frameType byte + 4 byte length + KeyLen = 32 + MaxInfoLen = 1 << 20 ) // KeepAlive is the minimum frequency at which the DERP server sends @@ -48,10 +48,10 @@ const KeepAlive = 60 * time.Second // - version 2: received packets have src addrs in frameRecvPacket at beginning const ProtocolVersion = 2 -// frameType is the one byte frame type at the beginning of the frame +// FrameType is the one byte frame type at the beginning of the frame // header. The second field is a big-endian uint32 describing the // length of the remaining frame (not including the initial 5 bytes). -type frameType byte +type FrameType byte /* Protocol flow: @@ -69,14 +69,14 @@ Steady state: * server then sends frameRecvPacket to recipient */ const ( - frameServerKey = frameType(0x01) // 8B magic + 32B public key + (0+ bytes future use) - frameClientInfo = frameType(0x02) // 32B pub key + 24B nonce + naclbox(json) - frameServerInfo = frameType(0x03) // 24B nonce + naclbox(json) - frameSendPacket = frameType(0x04) // 32B dest pub key + packet bytes - frameForwardPacket = frameType(0x0a) // 32B src pub key + 32B dst pub key + packet bytes - frameRecvPacket = frameType(0x05) // v0/1: packet bytes, v2: 32B src pub key + packet bytes - frameKeepAlive = frameType(0x06) // no payload, no-op (to be replaced with ping/pong) - frameNotePreferred = frameType(0x07) // 1 byte payload: 0x01 or 0x00 for whether this is client's home node + FrameServerKey = FrameType(0x01) // 8B magic + 32B public key + (0+ bytes future use) + FrameClientInfo = FrameType(0x02) // 32B pub key + 24B nonce + naclbox(json) + FrameServerInfo = FrameType(0x03) // 24B nonce + naclbox(json) + FrameSendPacket = FrameType(0x04) // 32B dest pub key + packet bytes + FrameForwardPacket = FrameType(0x0a) // 32B src pub key + 32B dst pub key + packet bytes + FrameRecvPacket = FrameType(0x05) // v0/1: packet bytes, v2: 32B src pub key + packet bytes + FrameKeepAlive = FrameType(0x06) // no payload, no-op (to be replaced with ping/pong) + FrameNotePreferred = FrameType(0x07) // 1 byte payload: 0x01 or 0x00 for whether this is client's home node // framePeerGone is sent from server to client to signal that // a previous sender is no longer connected. That is, if A @@ -85,7 +85,7 @@ const ( // exists on that connection to get back to A. It is also sent // if A tries to send a CallMeMaybe to B and the server has no // record of B - framePeerGone = frameType(0x08) // 32B pub key of peer that's gone + 1 byte reason + FramePeerGone = FrameType(0x08) // 32B pub key of peer that's gone + 1 byte reason // framePeerPresent is like framePeerGone, but for other members of the DERP // region when they're meshed up together. @@ -96,7 +96,7 @@ const ( // remaining after that, it's a PeerPresentFlags byte. // While current servers send 41 bytes, old servers will send fewer, and newer // servers might send more. - framePeerPresent = frameType(0x09) + FramePeerPresent = FrameType(0x09) // frameWatchConns is how one DERP node in a regional mesh // subscribes to the others in the region. @@ -104,30 +104,30 @@ const ( // is closed. Otherwise, the client is initially flooded with // framePeerPresent for all connected nodes, and then a stream of // framePeerPresent & framePeerGone has peers connect and disconnect. - frameWatchConns = frameType(0x10) + FrameWatchConns = FrameType(0x10) // frameClosePeer is a privileged frame type (requires the // mesh key for now) that closes the provided peer's // connection. (To be used for cluster load balancing // purposes, when clients end up on a non-ideal node) - frameClosePeer = frameType(0x11) // 32B pub key of peer to close. + FrameClosePeer = FrameType(0x11) // 32B pub key of peer to close. - framePing = frameType(0x12) // 8 byte ping payload, to be echoed back in framePong - framePong = frameType(0x13) // 8 byte payload, the contents of the ping being replied to + FramePing = FrameType(0x12) // 8 byte ping payload, to be echoed back in framePong + FramePong = FrameType(0x13) // 8 byte payload, the contents of the ping being replied to // frameHealth is sent from server to client to tell the client // if their connection is unhealthy somehow. Currently the only unhealthy state // is whether the connection is detected as a duplicate. // The entire frame body is the text of the error message. An empty message // clears the error state. - frameHealth = frameType(0x14) + FrameHealth = FrameType(0x14) // frameRestarting is sent from server to client for the // server to declare that it's restarting. Payload is two big // endian uint32 durations in milliseconds: when to reconnect, // and how long to try total. See ServerRestartingMessage docs for // more details on how the client should interpret them. - frameRestarting = frameType(0x15) + FrameRestarting = FrameType(0x15) ) // PeerGoneReasonType is a one byte reason code explaining why a @@ -154,6 +154,18 @@ const ( PeerPresentNotIdeal = 1 << 3 // client said derp server is not its Region.Nodes[0] ideal node ) +// IdealNodeHeader is the HTTP request header sent on DERP HTTP client requests +// to indicate that they're connecting to their ideal (Region.Nodes[0]) node. +// The HTTP header value is the name of the node they wish they were connected +// to. This is an optional header. +const IdealNodeHeader = "Ideal-Node" + +// FastStartHeader is the header (with value "1") that signals to the HTTP +// server that the DERP HTTP client does not want the HTTP 101 response +// headers and it will begin writing & reading the DERP protocol immediately +// following its HTTP request. +const FastStartHeader = "Derp-Fast-Start" + var bin = binary.BigEndian func writeUint32(bw *bufio.Writer, v uint32) error { @@ -186,15 +198,24 @@ func readUint32(br *bufio.Reader) (uint32, error) { return bin.Uint32(b[:]), nil } -func readFrameTypeHeader(br *bufio.Reader, wantType frameType) (frameLen uint32, err error) { - gotType, frameLen, err := readFrameHeader(br) +// ReadFrameTypeHeader reads a frame header from br and +// verifies that the frame type matches wantType. +// +// If it does, it returns the frame length (not including +// the 5 byte header) and a nil error. +// +// If it doesn't, it returns an error and a zero length. +func ReadFrameTypeHeader(br *bufio.Reader, wantType FrameType) (frameLen uint32, err error) { + gotType, frameLen, err := ReadFrameHeader(br) if err == nil && wantType != gotType { err = fmt.Errorf("bad frame type 0x%X, want 0x%X", gotType, wantType) } return frameLen, err } -func readFrameHeader(br *bufio.Reader) (t frameType, frameLen uint32, err error) { +// ReadFrameHeader reads the header of a DERP frame, +// reading 5 bytes from br. +func ReadFrameHeader(br *bufio.Reader) (t FrameType, frameLen uint32, err error) { tb, err := br.ReadByte() if err != nil { return 0, 0, err @@ -203,7 +224,7 @@ func readFrameHeader(br *bufio.Reader) (t frameType, frameLen uint32, err error) if err != nil { return 0, 0, err } - return frameType(tb), frameLen, nil + return FrameType(tb), frameLen, nil } // readFrame reads a frame header and then reads its payload into @@ -216,8 +237,8 @@ func readFrameHeader(br *bufio.Reader) (t frameType, frameLen uint32, err error) // bytes are read, err will be io.ErrShortBuffer, and frameLen and t // will both be set. That is, callers need to explicitly handle when // they get more data than expected. -func readFrame(br *bufio.Reader, maxSize uint32, b []byte) (t frameType, frameLen uint32, err error) { - t, frameLen, err = readFrameHeader(br) +func readFrame(br *bufio.Reader, maxSize uint32, b []byte) (t FrameType, frameLen uint32, err error) { + t, frameLen, err = ReadFrameHeader(br) if err != nil { return 0, 0, err } @@ -239,19 +260,26 @@ func readFrame(br *bufio.Reader, maxSize uint32, b []byte) (t frameType, frameLe return t, frameLen, err } -func writeFrameHeader(bw *bufio.Writer, t frameType, frameLen uint32) error { +// WriteFrameHeader writes a frame header to bw. +// +// The frame header is 5 bytes: a one byte frame type +// followed by a big-endian uint32 length of the +// remaining frame (not including the 5 byte header). +// +// It does not flush bw. +func WriteFrameHeader(bw *bufio.Writer, t FrameType, frameLen uint32) error { if err := bw.WriteByte(byte(t)); err != nil { return err } return writeUint32(bw, frameLen) } -// writeFrame writes a complete frame & flushes it. -func writeFrame(bw *bufio.Writer, t frameType, b []byte) error { +// WriteFrame writes a complete frame & flushes it. +func WriteFrame(bw *bufio.Writer, t FrameType, b []byte) error { if len(b) > 10<<20 { return errors.New("unreasonably large frame write") } - if err := writeFrameHeader(bw, t, uint32(len(b))); err != nil { + if err := WriteFrameHeader(bw, t, uint32(len(b))); err != nil { return err } if _, err := bw.Write(b); err != nil { @@ -270,3 +298,12 @@ type Conn interface { SetReadDeadline(time.Time) error SetWriteDeadline(time.Time) error } + +// ServerInfo is the message sent from the server to clients during +// the connection setup. +type ServerInfo struct { + Version int `json:"version,omitempty"` + + TokenBucketBytesPerSecond int `json:",omitempty"` + TokenBucketBytesBurst int `json:",omitempty"` +} diff --git a/derp/derp_client.go b/derp/derp_client.go index 69f35db1e2791..d28905cd2c8b2 100644 --- a/derp/derp_client.go +++ b/derp/derp_client.go @@ -133,17 +133,17 @@ func (c *Client) recvServerKey() error { if err != nil { return err } - if flen < uint32(len(buf)) || t != frameServerKey || string(buf[:len(magic)]) != magic { + if flen < uint32(len(buf)) || t != FrameServerKey || string(buf[:len(Magic)]) != Magic { return errors.New("invalid server greeting") } - c.serverKey = key.NodePublicFromRaw32(mem.B(buf[len(magic):])) + c.serverKey = key.NodePublicFromRaw32(mem.B(buf[len(Magic):])) return nil } -func (c *Client) parseServerInfo(b []byte) (*serverInfo, error) { - const maxLength = nonceLen + maxInfoLen +func (c *Client) parseServerInfo(b []byte) (*ServerInfo, error) { + const maxLength = NonceLen + MaxInfoLen fl := len(b) - if fl < nonceLen { + if fl < NonceLen { return nil, fmt.Errorf("short serverInfo frame") } if fl > maxLength { @@ -153,14 +153,16 @@ func (c *Client) parseServerInfo(b []byte) (*serverInfo, error) { if !ok { return nil, fmt.Errorf("failed to open naclbox from server key %s", c.serverKey) } - info := new(serverInfo) + info := new(ServerInfo) if err := json.Unmarshal(msg, info); err != nil { return nil, fmt.Errorf("invalid JSON: %v", err) } return info, nil } -type clientInfo struct { +// ClientInfo is the information a DERP client sends to the server +// about itself when it connects. +type ClientInfo struct { // MeshKey optionally specifies a pre-shared key used by // trusted clients. It's required to subscribe to the // connection list & forward packets. It's empty for regular @@ -180,7 +182,7 @@ type clientInfo struct { } // Equal reports if two clientInfo values are equal. -func (c *clientInfo) Equal(other *clientInfo) bool { +func (c *ClientInfo) Equal(other *ClientInfo) bool { if c == nil || other == nil { return c == other } @@ -191,7 +193,7 @@ func (c *clientInfo) Equal(other *clientInfo) bool { } func (c *Client) sendClientKey() error { - msg, err := json.Marshal(clientInfo{ + msg, err := json.Marshal(ClientInfo{ Version: ProtocolVersion, MeshKey: c.meshKey, CanAckPings: c.canAckPings, @@ -202,10 +204,10 @@ func (c *Client) sendClientKey() error { } msgbox := c.privateKey.SealTo(c.serverKey, msg) - buf := make([]byte, 0, keyLen+len(msgbox)) + buf := make([]byte, 0, KeyLen+len(msgbox)) buf = c.publicKey.AppendTo(buf) buf = append(buf, msgbox...) - return writeFrame(c.bw, frameClientInfo, buf) + return WriteFrame(c.bw, FrameClientInfo, buf) } // ServerPublicKey returns the server's public key. @@ -230,12 +232,12 @@ func (c *Client) send(dstKey key.NodePublic, pkt []byte) (ret error) { c.wmu.Lock() defer c.wmu.Unlock() if c.rate != nil { - pktLen := frameHeaderLen + key.NodePublicRawLen + len(pkt) + pktLen := FrameHeaderLen + key.NodePublicRawLen + len(pkt) if !c.rate.AllowN(c.clock.Now(), pktLen) { return nil // drop } } - if err := writeFrameHeader(c.bw, frameSendPacket, uint32(key.NodePublicRawLen+len(pkt))); err != nil { + if err := WriteFrameHeader(c.bw, FrameSendPacket, uint32(key.NodePublicRawLen+len(pkt))); err != nil { return err } if _, err := c.bw.Write(dstKey.AppendTo(nil)); err != nil { @@ -264,7 +266,7 @@ func (c *Client) ForwardPacket(srcKey, dstKey key.NodePublic, pkt []byte) (err e timer := c.clock.AfterFunc(5*time.Second, c.writeTimeoutFired) defer timer.Stop() - if err := writeFrameHeader(c.bw, frameForwardPacket, uint32(keyLen*2+len(pkt))); err != nil { + if err := WriteFrameHeader(c.bw, FrameForwardPacket, uint32(KeyLen*2+len(pkt))); err != nil { return err } if _, err := c.bw.Write(srcKey.AppendTo(nil)); err != nil { @@ -282,17 +284,17 @@ func (c *Client) ForwardPacket(srcKey, dstKey key.NodePublic, pkt []byte) (err e func (c *Client) writeTimeoutFired() { c.nc.Close() } func (c *Client) SendPing(data [8]byte) error { - return c.sendPingOrPong(framePing, data) + return c.sendPingOrPong(FramePing, data) } func (c *Client) SendPong(data [8]byte) error { - return c.sendPingOrPong(framePong, data) + return c.sendPingOrPong(FramePong, data) } -func (c *Client) sendPingOrPong(typ frameType, data [8]byte) error { +func (c *Client) sendPingOrPong(typ FrameType, data [8]byte) error { c.wmu.Lock() defer c.wmu.Unlock() - if err := writeFrameHeader(c.bw, typ, 8); err != nil { + if err := WriteFrameHeader(c.bw, typ, 8); err != nil { return err } if _, err := c.bw.Write(data[:]); err != nil { @@ -314,7 +316,7 @@ func (c *Client) NotePreferred(preferred bool) (err error) { c.wmu.Lock() defer c.wmu.Unlock() - if err := writeFrameHeader(c.bw, frameNotePreferred, 1); err != nil { + if err := WriteFrameHeader(c.bw, FrameNotePreferred, 1); err != nil { return err } var b byte = 0x00 @@ -332,7 +334,7 @@ func (c *Client) NotePreferred(preferred bool) (err error) { func (c *Client) WatchConnectionChanges() error { c.wmu.Lock() defer c.wmu.Unlock() - if err := writeFrameHeader(c.bw, frameWatchConns, 0); err != nil { + if err := WriteFrameHeader(c.bw, FrameWatchConns, 0); err != nil { return err } return c.bw.Flush() @@ -343,7 +345,7 @@ func (c *Client) WatchConnectionChanges() error { func (c *Client) ClosePeer(target key.NodePublic) error { c.wmu.Lock() defer c.wmu.Unlock() - return writeFrame(c.bw, frameClosePeer, target.AppendTo(nil)) + return WriteFrame(c.bw, FrameClosePeer, target.AppendTo(nil)) } // ReceivedMessage represents a type returned by Client.Recv. Unless @@ -502,7 +504,7 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro c.peeked = 0 } - t, n, err := readFrameHeader(c.br) + t, n, err := ReadFrameHeader(c.br) if err != nil { return nil, err } @@ -533,7 +535,7 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro switch t { default: continue - case frameServerInfo: + case FrameServerInfo: // Server sends this at start-up. Currently unused. // Just has a JSON message saying "version: 2", // but the protocol seems extensible enough as-is without @@ -550,29 +552,29 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro } c.setSendRateLimiter(sm) return sm, nil - case frameKeepAlive: + case FrameKeepAlive: // A one-way keep-alive message that doesn't require an acknowledgement. // This predated framePing/framePong. return KeepAliveMessage{}, nil - case framePeerGone: - if n < keyLen { + case FramePeerGone: + if n < KeyLen { c.logf("[unexpected] dropping short peerGone frame from DERP server") continue } // Backward compatibility for the older peerGone without reason byte reason := PeerGoneReasonDisconnected - if n > keyLen { - reason = PeerGoneReasonType(b[keyLen]) + if n > KeyLen { + reason = PeerGoneReasonType(b[KeyLen]) } pg := PeerGoneMessage{ - Peer: key.NodePublicFromRaw32(mem.B(b[:keyLen])), + Peer: key.NodePublicFromRaw32(mem.B(b[:KeyLen])), Reason: reason, } return pg, nil - case framePeerPresent: + case FramePeerPresent: remain := b - chunk, remain, ok := cutLeadingN(remain, keyLen) + chunk, remain, ok := cutLeadingN(remain, KeyLen) if !ok { c.logf("[unexpected] dropping short peerPresent frame from DERP server") continue @@ -600,17 +602,17 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro msg.Flags = PeerPresentFlags(chunk[0]) return msg, nil - case frameRecvPacket: + case FrameRecvPacket: var rp ReceivedPacket - if n < keyLen { + if n < KeyLen { c.logf("[unexpected] dropping short packet from DERP server") continue } - rp.Source = key.NodePublicFromRaw32(mem.B(b[:keyLen])) - rp.Data = b[keyLen:n] + rp.Source = key.NodePublicFromRaw32(mem.B(b[:KeyLen])) + rp.Data = b[KeyLen:n] return rp, nil - case framePing: + case FramePing: var pm PingMessage if n < 8 { c.logf("[unexpected] dropping short ping frame") @@ -619,7 +621,7 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro copy(pm[:], b[:]) return pm, nil - case framePong: + case FramePong: var pm PongMessage if n < 8 { c.logf("[unexpected] dropping short ping frame") @@ -628,10 +630,10 @@ func (c *Client) recvTimeout(timeout time.Duration) (m ReceivedMessage, err erro copy(pm[:], b[:]) return pm, nil - case frameHealth: + case FrameHealth: return HealthMessage{Problem: string(b[:])}, nil - case frameRestarting: + case FrameRestarting: var m ServerRestartingMessage if n < 8 { c.logf("[unexpected] dropping short server restarting frame") diff --git a/derp/derp_test.go b/derp/derp_test.go index 9d07e159b4584..52793f90fa9f5 100644 --- a/derp/derp_test.go +++ b/derp/derp_test.go @@ -1,59 +1,56 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package derp +package derp_test import ( "bufio" "bytes" - "cmp" "context" - "crypto/x509" - "encoding/asn1" "encoding/json" "errors" "expvar" "fmt" "io" - "log" "net" - "os" - "reflect" - "strconv" "strings" "sync" "testing" "time" - qt "github.com/frankban/quicktest" - "go4.org/mem" - "golang.org/x/time/rate" - "tailscale.com/derp/derpconst" + "tailscale.com/derp" + "tailscale.com/derp/derpserver" "tailscale.com/disco" + "tailscale.com/metrics" "tailscale.com/net/memnet" - "tailscale.com/tstest" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/util/must" ) +type ( + ClientInfo = derp.ClientInfo + Conn = derp.Conn + Client = derp.Client +) + func TestClientInfoUnmarshal(t *testing.T) { for i, in := range map[string]struct { json string - want *clientInfo + want *ClientInfo wantErr string }{ "empty": { json: `{}`, - want: &clientInfo{}, + want: &ClientInfo{}, }, "valid": { json: `{"Version":5,"MeshKey":"6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8"}`, - want: &clientInfo{MeshKey: must.Get(key.ParseDERPMesh("6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8")), Version: 5}, + want: &ClientInfo{MeshKey: must.Get(key.ParseDERPMesh("6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8")), Version: 5}, }, "validLowerMeshKey": { json: `{"version":5,"meshKey":"6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8"}`, - want: &clientInfo{MeshKey: must.Get(key.ParseDERPMesh("6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8")), Version: 5}, + want: &ClientInfo{MeshKey: must.Get(key.ParseDERPMesh("6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8")), Version: 5}, }, "invalidMeshKeyToShort": { json: `{"version":5,"meshKey":"abcdefg"}`, @@ -66,7 +63,7 @@ func TestClientInfoUnmarshal(t *testing.T) { } { t.Run(i, func(t *testing.T) { t.Parallel() - var got clientInfo + var got ClientInfo err := json.Unmarshal([]byte(in.json), &got) if in.wantErr != "" { if err == nil || !strings.Contains(err.Error(), in.wantErr) { @@ -86,7 +83,7 @@ func TestClientInfoUnmarshal(t *testing.T) { func TestSendRecv(t *testing.T) { serverPrivateKey := key.NewNode() - s := NewServer(serverPrivateKey, t.Logf) + s := derpserver.New(serverPrivateKey, t.Logf) defer s.Close() const numClients = 3 @@ -132,7 +129,7 @@ func TestSendRecv(t *testing.T) { key := clientPrivateKeys[i] brw := bufio.NewReadWriter(bufio.NewReader(cout), bufio.NewWriter(cout)) - c, err := NewClient(key, cout, brw, t.Logf) + c, err := derp.NewClient(key, cout, brw, t.Logf) if err != nil { t.Fatalf("client %d: %v", i, err) } @@ -159,16 +156,16 @@ func TestSendRecv(t *testing.T) { default: t.Errorf("unexpected message type %T", m) continue - case PeerGoneMessage: + case derp.PeerGoneMessage: switch m.Reason { - case PeerGoneReasonDisconnected: + case derp.PeerGoneReasonDisconnected: peerGoneCountDisconnected.Add(1) - case PeerGoneReasonNotHere: + case derp.PeerGoneReasonNotHere: peerGoneCountNotHere.Add(1) default: t.Errorf("unexpected PeerGone reason %v", m.Reason) } - case ReceivedPacket: + case derp.ReceivedPacket: if m.Source.IsZero() { t.Errorf("zero Source address in ReceivedPacket") } @@ -198,12 +195,15 @@ func TestSendRecv(t *testing.T) { } } + serverMetrics := s.ExpVar().(*metrics.Set) + wantActive := func(total, home int64) { t.Helper() dl := time.Now().Add(5 * time.Second) var gotTotal, gotHome int64 for time.Now().Before(dl) { - gotTotal, gotHome = s.curClients.Value(), s.curHomeClients.Value() + gotTotal = serverMetrics.Get("gauge_current_connections").(*expvar.Int).Value() + gotHome = serverMetrics.Get("gauge_current_home_connections").(*expvar.Int).Value() if gotTotal == total && gotHome == home { return } @@ -305,7 +305,7 @@ func TestSendRecv(t *testing.T) { func TestSendFreeze(t *testing.T) { serverPrivateKey := key.NewNode() - s := NewServer(serverPrivateKey, t.Logf) + s := derpserver.New(serverPrivateKey, t.Logf) defer s.Close() s.WriteTimeout = 100 * time.Millisecond @@ -323,7 +323,7 @@ func TestSendFreeze(t *testing.T) { go s.Accept(ctx, c1, bufio.NewReadWriter(bufio.NewReader(c1), bufio.NewWriter(c1)), name) brw := bufio.NewReadWriter(bufio.NewReader(c2), bufio.NewWriter(c2)) - c, err := NewClient(k, c2, brw, t.Logf) + c, err := derp.NewClient(k, c2, brw, t.Logf) if err != nil { t.Fatal(err) } @@ -374,7 +374,7 @@ func TestSendFreeze(t *testing.T) { default: errCh <- fmt.Errorf("%s: unexpected message type %T", name, m) return - case ReceivedPacket: + case derp.ReceivedPacket: if m.Source.IsZero() { errCh <- fmt.Errorf("%s: zero Source address in ReceivedPacket", name) return @@ -504,7 +504,7 @@ func TestSendFreeze(t *testing.T) { } type testServer struct { - s *Server + s *derpserver.Server ln net.Listener logf logger.Logf @@ -549,7 +549,7 @@ const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789a func newTestServer(t *testing.T, ctx context.Context) *testServer { t.Helper() logf := logger.WithPrefix(t.Logf, "derp-server: ") - s := NewServer(key.NewNode(), logf) + s := derpserver.New(key.NewNode(), logf) s.SetMeshKey(testMeshKey) ln, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { @@ -614,7 +614,7 @@ func newTestClient(t *testing.T, ts *testServer, name string, newClient func(net func newRegularClient(t *testing.T, ts *testServer, name string) *testClient { return newTestClient(t, ts, name, func(nc net.Conn, priv key.NodePrivate, logf logger.Logf) (*Client, error) { brw := bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)) - c, err := NewClient(priv, nc, brw, logf) + c, err := derp.NewClient(priv, nc, brw, logf) if err != nil { return nil, err } @@ -631,7 +631,7 @@ func newTestWatcher(t *testing.T, ts *testServer, name string) *testClient { return nil, err } brw := bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)) - c, err := NewClient(priv, nc, brw, logf, MeshKey(mk)) + c, err := derp.NewClient(priv, nc, brw, logf, derp.MeshKey(mk)) if err != nil { return nil, err } @@ -651,12 +651,12 @@ func (tc *testClient) wantPresent(t *testing.T, peers ...key.NodePublic) { } for { - m, err := tc.c.recvTimeout(time.Second) + m, err := tc.c.RecvTimeoutForTest(time.Second) if err != nil { t.Fatal(err) } switch m := m.(type) { - case PeerPresentMessage: + case derp.PeerPresentMessage: got := m.Key if !want[got] { t.Fatalf("got peer present for %v; want present for %v", tc.ts.keyName(got), logger.ArgWriter(func(bw *bufio.Writer) { @@ -667,7 +667,7 @@ func (tc *testClient) wantPresent(t *testing.T, peers ...key.NodePublic) { } t.Logf("got present with IP %v, flags=%v", m.IPPort, m.Flags) switch m.Flags { - case PeerPresentIsMeshPeer, PeerPresentIsRegular: + case derp.PeerPresentIsMeshPeer, derp.PeerPresentIsRegular: // Okay default: t.Errorf("unexpected PeerPresentIsMeshPeer flags %v", m.Flags) @@ -684,19 +684,19 @@ func (tc *testClient) wantPresent(t *testing.T, peers ...key.NodePublic) { func (tc *testClient) wantGone(t *testing.T, peer key.NodePublic) { t.Helper() - m, err := tc.c.recvTimeout(time.Second) + m, err := tc.c.RecvTimeoutForTest(time.Second) if err != nil { t.Fatal(err) } switch m := m.(type) { - case PeerGoneMessage: + case derp.PeerGoneMessage: got := key.NodePublic(m.Peer) if peer != got { t.Errorf("got gone message for %v; want gone for %v", tc.ts.keyName(got), tc.ts.keyName(peer)) } reason := m.Reason - if reason != PeerGoneReasonDisconnected { - t.Errorf("got gone message for reason %v; wanted %v", reason, PeerGoneReasonDisconnected) + if reason != derp.PeerGoneReasonDisconnected { + t.Errorf("got gone message for reason %v; wanted %v", reason, derp.PeerGoneReasonDisconnected) } default: t.Fatalf("unexpected message type %T", m) @@ -754,863 +754,15 @@ func TestWatch(t *testing.T) { w3.wantGone(t, c1.pub) } -type testFwd int - -func (testFwd) ForwardPacket(key.NodePublic, key.NodePublic, []byte) error { - panic("not called in tests") -} -func (testFwd) String() string { - panic("not called in tests") -} - -func pubAll(b byte) (ret key.NodePublic) { - var bs [32]byte - for i := range bs { - bs[i] = b - } - return key.NodePublicFromRaw32(mem.B(bs[:])) -} - -func TestForwarderRegistration(t *testing.T) { - s := &Server{ - clients: make(map[key.NodePublic]*clientSet), - clientsMesh: map[key.NodePublic]PacketForwarder{}, - } - want := func(want map[key.NodePublic]PacketForwarder) { - t.Helper() - if got := s.clientsMesh; !reflect.DeepEqual(got, want) { - t.Fatalf("mismatch\n got: %v\nwant: %v\n", got, want) - } - } - wantCounter := func(c *expvar.Int, want int) { - t.Helper() - if got := c.Value(); got != int64(want) { - t.Errorf("counter = %v; want %v", got, want) - } - } - singleClient := func(c *sclient) *clientSet { - cs := &clientSet{} - cs.activeClient.Store(c) - return cs - } - - u1 := pubAll(1) - u2 := pubAll(2) - u3 := pubAll(3) - - s.AddPacketForwarder(u1, testFwd(1)) - s.AddPacketForwarder(u2, testFwd(2)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(1), - u2: testFwd(2), - }) - - // Verify a remove of non-registered forwarder is no-op. - s.RemovePacketForwarder(u2, testFwd(999)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(1), - u2: testFwd(2), - }) - - // Verify a remove of non-registered user is no-op. - s.RemovePacketForwarder(u3, testFwd(1)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(1), - u2: testFwd(2), - }) - - // Actual removal. - s.RemovePacketForwarder(u2, testFwd(2)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(1), - }) - - // Adding a dup for a user. - wantCounter(&s.multiForwarderCreated, 0) - s.AddPacketForwarder(u1, testFwd(100)) - s.AddPacketForwarder(u1, testFwd(100)) // dup to trigger dup path - want(map[key.NodePublic]PacketForwarder{ - u1: newMultiForwarder(testFwd(1), testFwd(100)), - }) - wantCounter(&s.multiForwarderCreated, 1) - - // Removing a forwarder in a multi set that doesn't exist; does nothing. - s.RemovePacketForwarder(u1, testFwd(55)) - want(map[key.NodePublic]PacketForwarder{ - u1: newMultiForwarder(testFwd(1), testFwd(100)), - }) - - // Removing a forwarder in a multi set that does exist should collapse it away - // from being a multiForwarder. - wantCounter(&s.multiForwarderDeleted, 0) - s.RemovePacketForwarder(u1, testFwd(1)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(100), - }) - wantCounter(&s.multiForwarderDeleted, 1) - - // Removing an entry for a client that's still connected locally should result - // in a nil forwarder. - u1c := &sclient{ - key: u1, - logf: logger.Discard, - } - s.clients[u1] = singleClient(u1c) - s.RemovePacketForwarder(u1, testFwd(100)) - want(map[key.NodePublic]PacketForwarder{ - u1: nil, - }) - - // But once that client disconnects, it should go away. - s.unregisterClient(u1c) - want(map[key.NodePublic]PacketForwarder{}) - - // But if it already has a forwarder, it's not removed. - s.AddPacketForwarder(u1, testFwd(2)) - s.unregisterClient(u1c) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(2), - }) - - // Now pretend u1 was already connected locally (so clientsMesh[u1] is nil), and then we heard - // that they're also connected to a peer of ours. That shouldn't transition the forwarder - // from nil to the new one, not a multiForwarder. - s.clients[u1] = singleClient(u1c) - s.clientsMesh[u1] = nil - want(map[key.NodePublic]PacketForwarder{ - u1: nil, - }) - s.AddPacketForwarder(u1, testFwd(3)) - want(map[key.NodePublic]PacketForwarder{ - u1: testFwd(3), - }) -} - -type channelFwd struct { - // id is to ensure that different instances that reference the - // same channel are not equal, as they are used as keys in the - // multiForwarder map. - id int - c chan []byte -} - -func (f channelFwd) String() string { return "" } -func (f channelFwd) ForwardPacket(_ key.NodePublic, _ key.NodePublic, packet []byte) error { - f.c <- packet - return nil -} - -func TestMultiForwarder(t *testing.T) { - received := 0 - var wg sync.WaitGroup - ch := make(chan []byte) - ctx, cancel := context.WithCancel(context.Background()) - - s := &Server{ - clients: make(map[key.NodePublic]*clientSet), - clientsMesh: map[key.NodePublic]PacketForwarder{}, - } - u := pubAll(1) - s.AddPacketForwarder(u, channelFwd{1, ch}) - - wg.Add(2) - go func() { - defer wg.Done() - for { - select { - case <-ch: - received += 1 - case <-ctx.Done(): - return - } - } - }() - go func() { - defer wg.Done() - for { - s.AddPacketForwarder(u, channelFwd{2, ch}) - s.AddPacketForwarder(u, channelFwd{3, ch}) - s.RemovePacketForwarder(u, channelFwd{2, ch}) - s.RemovePacketForwarder(u, channelFwd{1, ch}) - s.AddPacketForwarder(u, channelFwd{1, ch}) - s.RemovePacketForwarder(u, channelFwd{3, ch}) - if ctx.Err() != nil { - return - } - } - }() - - // Number of messages is chosen arbitrarily, just for this loop to - // run long enough concurrently with {Add,Remove}PacketForwarder loop above. - numMsgs := 5000 - var fwd PacketForwarder - for i := range numMsgs { - s.mu.Lock() - fwd = s.clientsMesh[u] - s.mu.Unlock() - fwd.ForwardPacket(u, u, []byte(strconv.Itoa(i))) - } - - cancel() - wg.Wait() - if received != numMsgs { - t.Errorf("expected %d messages to be forwarded; got %d", numMsgs, received) - } -} -func TestMetaCert(t *testing.T) { - priv := key.NewNode() - pub := priv.Public() - s := NewServer(priv, t.Logf) - - certBytes := s.MetaCert() - cert, err := x509.ParseCertificate(certBytes) - if err != nil { - log.Fatal(err) - } - if fmt.Sprint(cert.SerialNumber) != fmt.Sprint(ProtocolVersion) { - t.Errorf("serial = %v; want %v", cert.SerialNumber, ProtocolVersion) - } - if g, w := cert.Subject.CommonName, derpconst.MetaCertCommonNamePrefix+pub.UntypedHexString(); g != w { - t.Errorf("CommonName = %q; want %q", g, w) - } - if n := len(cert.Extensions); n != 1 { - t.Fatalf("got %d extensions; want 1", n) - } - - // oidExtensionBasicConstraints is the Basic Constraints ID copied - // from the x509 package. - oidExtensionBasicConstraints := asn1.ObjectIdentifier{2, 5, 29, 19} - - if id := cert.Extensions[0].Id; !id.Equal(oidExtensionBasicConstraints) { - t.Errorf("extension ID = %v; want %v", id, oidExtensionBasicConstraints) - } -} - -type dummyNetConn struct { - net.Conn -} - -func (dummyNetConn) SetReadDeadline(time.Time) error { return nil } - -func TestClientRecv(t *testing.T) { - tests := []struct { - name string - input []byte - want any - }{ - { - name: "ping", - input: []byte{ - byte(framePing), 0, 0, 0, 8, - 1, 2, 3, 4, 5, 6, 7, 8, - }, - want: PingMessage{1, 2, 3, 4, 5, 6, 7, 8}, - }, - { - name: "pong", - input: []byte{ - byte(framePong), 0, 0, 0, 8, - 1, 2, 3, 4, 5, 6, 7, 8, - }, - want: PongMessage{1, 2, 3, 4, 5, 6, 7, 8}, - }, - { - name: "health_bad", - input: []byte{ - byte(frameHealth), 0, 0, 0, 3, - byte('B'), byte('A'), byte('D'), - }, - want: HealthMessage{Problem: "BAD"}, - }, - { - name: "health_ok", - input: []byte{ - byte(frameHealth), 0, 0, 0, 0, - }, - want: HealthMessage{}, - }, - { - name: "server_restarting", - input: []byte{ - byte(frameRestarting), 0, 0, 0, 8, - 0, 0, 0, 1, - 0, 0, 0, 2, - }, - want: ServerRestartingMessage{ - ReconnectIn: 1 * time.Millisecond, - TryFor: 2 * time.Millisecond, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - c := &Client{ - nc: dummyNetConn{}, - br: bufio.NewReader(bytes.NewReader(tt.input)), - logf: t.Logf, - clock: &tstest.Clock{}, - } - got, err := c.Recv() - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("got %#v; want %#v", got, tt.want) - } - }) - } -} - -func TestClientSendPing(t *testing.T) { - var buf bytes.Buffer - c := &Client{ - bw: bufio.NewWriter(&buf), - } - if err := c.SendPing([8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil { - t.Fatal(err) - } - want := []byte{ - byte(framePing), 0, 0, 0, 8, - 1, 2, 3, 4, 5, 6, 7, 8, - } - if !bytes.Equal(buf.Bytes(), want) { - t.Errorf("unexpected output\nwrote: % 02x\n want: % 02x", buf.Bytes(), want) - } -} - -func TestClientSendPong(t *testing.T) { - var buf bytes.Buffer - c := &Client{ - bw: bufio.NewWriter(&buf), - } - if err := c.SendPong([8]byte{1, 2, 3, 4, 5, 6, 7, 8}); err != nil { - t.Fatal(err) - } - want := []byte{ - byte(framePong), 0, 0, 0, 8, - 1, 2, 3, 4, 5, 6, 7, 8, - } - if !bytes.Equal(buf.Bytes(), want) { - t.Errorf("unexpected output\nwrote: % 02x\n want: % 02x", buf.Bytes(), want) - } -} - -func TestServerDupClients(t *testing.T) { - serverPriv := key.NewNode() - var s *Server - - clientPriv := key.NewNode() - clientPub := clientPriv.Public() - - var c1, c2, c3 *sclient - var clientName map[*sclient]string - - // run starts a new test case and resets clients back to their zero values. - run := func(name string, dupPolicy dupPolicy, f func(t *testing.T)) { - s = NewServer(serverPriv, t.Logf) - s.dupPolicy = dupPolicy - c1 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c1: ")} - c2 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c2: ")} - c3 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c3: ")} - clientName = map[*sclient]string{ - c1: "c1", - c2: "c2", - c3: "c3", - } - t.Run(name, f) - } - runBothWays := func(name string, f func(t *testing.T)) { - run(name+"_disablefighters", disableFighters, f) - run(name+"_lastwriteractive", lastWriterIsActive, f) - } - wantSingleClient := func(t *testing.T, want *sclient) { - t.Helper() - got, ok := s.clients[want.key] - if !ok { - t.Error("no clients for key") - return - } - if got.dup != nil { - t.Errorf("unexpected dup set for single client") - } - cur := got.activeClient.Load() - if cur != want { - t.Errorf("active client = %q; want %q", clientName[cur], clientName[want]) - } - if cur != nil { - if cur.isDup.Load() { - t.Errorf("unexpected isDup on singleClient") - } - if cur.isDisabled.Load() { - t.Errorf("unexpected isDisabled on singleClient") - } - } - } - wantNoClient := func(t *testing.T) { - t.Helper() - _, ok := s.clients[clientPub] - if !ok { - // Good - return - } - t.Errorf("got client; want empty") - } - wantDupSet := func(t *testing.T) *dupClientSet { - t.Helper() - cs, ok := s.clients[clientPub] - if !ok { - t.Fatal("no set for key; want dup set") - return nil - } - if cs.dup != nil { - return cs.dup - } - t.Fatalf("no dup set for key; want dup set") - return nil - } - wantActive := func(t *testing.T, want *sclient) { - t.Helper() - set, ok := s.clients[clientPub] - if !ok { - t.Error("no set for key") - return - } - got := set.activeClient.Load() - if got != want { - t.Errorf("active client = %q; want %q", clientName[got], clientName[want]) - } - } - checkDup := func(t *testing.T, c *sclient, want bool) { - t.Helper() - if got := c.isDup.Load(); got != want { - t.Errorf("client %q isDup = %v; want %v", clientName[c], got, want) - } - } - checkDisabled := func(t *testing.T, c *sclient, want bool) { - t.Helper() - if got := c.isDisabled.Load(); got != want { - t.Errorf("client %q isDisabled = %v; want %v", clientName[c], got, want) - } - } - wantDupConns := func(t *testing.T, want int) { - t.Helper() - if got := s.dupClientConns.Value(); got != int64(want) { - t.Errorf("dupClientConns = %v; want %v", got, want) - } - } - wantDupKeys := func(t *testing.T, want int) { - t.Helper() - if got := s.dupClientKeys.Value(); got != int64(want) { - t.Errorf("dupClientKeys = %v; want %v", got, want) - } - } - - // Common case: a single client comes and goes, with no dups. - runBothWays("one_comes_and_goes", func(t *testing.T) { - wantNoClient(t) - s.registerClient(c1) - wantSingleClient(t, c1) - s.unregisterClient(c1) - wantNoClient(t) - }) - - // A still somewhat common case: a single client was - // connected and then their wifi dies or laptop closes - // or they switch networks and connect from a - // different network. They have two connections but - // it's not very bad. Only their new one is - // active. The last one, being dead, doesn't send and - // thus the new one doesn't get disabled. - runBothWays("small_overlap_replacement", func(t *testing.T) { - wantNoClient(t) - s.registerClient(c1) - wantSingleClient(t, c1) - wantActive(t, c1) - wantDupKeys(t, 0) - wantDupKeys(t, 0) - - s.registerClient(c2) // wifi dies; c2 replacement connects - wantDupSet(t) - wantDupConns(t, 2) - wantDupKeys(t, 1) - checkDup(t, c1, true) - checkDup(t, c2, true) - checkDisabled(t, c1, false) - checkDisabled(t, c2, false) - wantActive(t, c2) // sends go to the replacement - - s.unregisterClient(c1) // c1 finally times out - wantSingleClient(t, c2) - checkDup(t, c2, false) // c2 is longer a dup - wantActive(t, c2) - wantDupConns(t, 0) - wantDupKeys(t, 0) - }) - - // Key cloning situation with concurrent clients, both trying - // to write. - run("concurrent_dups_get_disabled", disableFighters, func(t *testing.T) { - wantNoClient(t) - s.registerClient(c1) - wantSingleClient(t, c1) - wantActive(t, c1) - s.registerClient(c2) - wantDupSet(t) - wantDupKeys(t, 1) - wantDupConns(t, 2) - wantActive(t, c2) - checkDup(t, c1, true) - checkDup(t, c2, true) - checkDisabled(t, c1, false) - checkDisabled(t, c2, false) - - s.noteClientActivity(c2) - checkDisabled(t, c1, false) - checkDisabled(t, c2, false) - s.noteClientActivity(c1) - checkDisabled(t, c1, true) - checkDisabled(t, c2, true) - wantActive(t, nil) - - s.registerClient(c3) - wantActive(t, c3) - checkDisabled(t, c3, false) - wantDupKeys(t, 1) - wantDupConns(t, 3) - - s.unregisterClient(c3) - wantActive(t, nil) - wantDupKeys(t, 1) - wantDupConns(t, 2) - - s.unregisterClient(c2) - wantSingleClient(t, c1) - wantDupKeys(t, 0) - wantDupConns(t, 0) - }) - - // Key cloning with an A->B->C->A series instead. - run("concurrent_dups_three_parties", disableFighters, func(t *testing.T) { - wantNoClient(t) - s.registerClient(c1) - s.registerClient(c2) - s.registerClient(c3) - s.noteClientActivity(c1) - checkDisabled(t, c1, true) - checkDisabled(t, c2, true) - checkDisabled(t, c3, true) - wantActive(t, nil) - }) - - run("activity_promotes_primary_when_nil", disableFighters, func(t *testing.T) { - wantNoClient(t) - - // Last registered client is the active one... - s.registerClient(c1) - wantActive(t, c1) - s.registerClient(c2) - wantActive(t, c2) - s.registerClient(c3) - s.noteClientActivity(c2) - wantActive(t, c3) - - // But if the last one goes away, the one with the - // most recent activity wins. - s.unregisterClient(c3) - wantActive(t, c2) - }) - - run("concurrent_dups_three_parties_last_writer", lastWriterIsActive, func(t *testing.T) { - wantNoClient(t) - - s.registerClient(c1) - wantActive(t, c1) - s.registerClient(c2) - wantActive(t, c2) - - s.noteClientActivity(c1) - checkDisabled(t, c1, false) - checkDisabled(t, c2, false) - wantActive(t, c1) - - s.noteClientActivity(c2) - checkDisabled(t, c1, false) - checkDisabled(t, c2, false) - wantActive(t, c2) - - s.unregisterClient(c2) - checkDisabled(t, c1, false) - wantActive(t, c1) - }) -} - -func TestLimiter(t *testing.T) { - rl := rate.NewLimiter(rate.Every(time.Minute), 100) - for i := range 200 { - r := rl.Reserve() - d := r.Delay() - t.Logf("i=%d, allow=%v, d=%v", i, r.OK(), d) - } -} - -// BenchmarkConcurrentStreams exercises mutex contention on a -// single Server instance with multiple concurrent client flows. -func BenchmarkConcurrentStreams(b *testing.B) { - serverPrivateKey := key.NewNode() - s := NewServer(serverPrivateKey, logger.Discard) - defer s.Close() - - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - b.Fatal(err) - } - defer ln.Close() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - go func() { - for ctx.Err() == nil { - connIn, err := ln.Accept() - if err != nil { - if ctx.Err() != nil { - return - } - b.Error(err) - return - } - - brwServer := bufio.NewReadWriter(bufio.NewReader(connIn), bufio.NewWriter(connIn)) - go s.Accept(ctx, connIn, brwServer, "test-client") - } - }() - - newClient := func(t testing.TB) *Client { - t.Helper() - connOut, err := net.Dial("tcp", ln.Addr().String()) - if err != nil { - b.Fatal(err) - } - t.Cleanup(func() { connOut.Close() }) - - k := key.NewNode() - - brw := bufio.NewReadWriter(bufio.NewReader(connOut), bufio.NewWriter(connOut)) - client, err := NewClient(k, connOut, brw, logger.Discard) - if err != nil { - b.Fatalf("client: %v", err) - } - return client - } - - b.RunParallel(func(pb *testing.PB) { - c1, c2 := newClient(b), newClient(b) - const packetSize = 100 - msg := make([]byte, packetSize) - for pb.Next() { - if err := c1.Send(c2.PublicKey(), msg); err != nil { - b.Fatal(err) - } - _, err := c2.Recv() - if err != nil { - return - } - } - }) -} - -func BenchmarkSendRecv(b *testing.B) { - for _, size := range []int{10, 100, 1000, 10000} { - b.Run(fmt.Sprintf("msgsize=%d", size), func(b *testing.B) { benchmarkSendRecvSize(b, size) }) - } -} - -func benchmarkSendRecvSize(b *testing.B, packetSize int) { - serverPrivateKey := key.NewNode() - s := NewServer(serverPrivateKey, logger.Discard) - defer s.Close() - - k := key.NewNode() - clientKey := k.Public() - - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - b.Fatal(err) - } - defer ln.Close() - - connOut, err := net.Dial("tcp", ln.Addr().String()) - if err != nil { - b.Fatal(err) - } - defer connOut.Close() - - connIn, err := ln.Accept() - if err != nil { - b.Fatal(err) - } - defer connIn.Close() - - brwServer := bufio.NewReadWriter(bufio.NewReader(connIn), bufio.NewWriter(connIn)) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go s.Accept(ctx, connIn, brwServer, "test-client") - - brw := bufio.NewReadWriter(bufio.NewReader(connOut), bufio.NewWriter(connOut)) - client, err := NewClient(k, connOut, brw, logger.Discard) - if err != nil { - b.Fatalf("client: %v", err) - } - - go func() { - for { - _, err := client.Recv() - if err != nil { - return - } - } - }() - - msg := make([]byte, packetSize) - b.SetBytes(int64(len(msg))) - b.ReportAllocs() - b.ResetTimer() - for range b.N { - if err := client.Send(clientKey, msg); err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkWriteUint32(b *testing.B) { - w := bufio.NewWriter(io.Discard) - b.ReportAllocs() - b.ResetTimer() - for range b.N { - writeUint32(w, 0x0ba3a) - } -} - -type nopRead struct{} - -func (r nopRead) Read(p []byte) (int, error) { - return len(p), nil -} - -var sinkU32 uint32 - -func BenchmarkReadUint32(b *testing.B) { - r := bufio.NewReader(nopRead{}) - var err error - b.ReportAllocs() - b.ResetTimer() - for range b.N { - sinkU32, err = readUint32(r) - if err != nil { - b.Fatal(err) - } - } -} - func waitConnect(t testing.TB, c *Client) { t.Helper() if m, err := c.Recv(); err != nil { t.Fatalf("client first Recv: %v", err) - } else if v, ok := m.(ServerInfoMessage); !ok { + } else if v, ok := m.(derp.ServerInfoMessage); !ok { t.Fatalf("client first Recv was unexpected type %T", v) } } -func TestParseSSOutput(t *testing.T) { - contents, err := os.ReadFile("testdata/example_ss.txt") - if err != nil { - t.Errorf("os.ReadFile(example_ss.txt) failed: %v", err) - } - seen := parseSSOutput(string(contents)) - if len(seen) == 0 { - t.Errorf("parseSSOutput expected non-empty map") - } -} - -type countWriter struct { - mu sync.Mutex - writes int - bytes int64 -} - -func (w *countWriter) Write(p []byte) (n int, err error) { - w.mu.Lock() - defer w.mu.Unlock() - w.writes++ - w.bytes += int64(len(p)) - return len(p), nil -} - -func (w *countWriter) Stats() (writes int, bytes int64) { - w.mu.Lock() - defer w.mu.Unlock() - return w.writes, w.bytes -} - -func (w *countWriter) ResetStats() { - w.mu.Lock() - defer w.mu.Unlock() - w.writes, w.bytes = 0, 0 -} - -func TestClientSendRateLimiting(t *testing.T) { - cw := new(countWriter) - c := &Client{ - bw: bufio.NewWriter(cw), - clock: &tstest.Clock{}, - } - c.setSendRateLimiter(ServerInfoMessage{}) - - pkt := make([]byte, 1000) - if err := c.send(key.NodePublic{}, pkt); err != nil { - t.Fatal(err) - } - writes1, bytes1 := cw.Stats() - if writes1 != 1 { - t.Errorf("writes = %v, want 1", writes1) - } - - // Flood should all succeed. - cw.ResetStats() - for range 1000 { - if err := c.send(key.NodePublic{}, pkt); err != nil { - t.Fatal(err) - } - } - writes1K, bytes1K := cw.Stats() - if writes1K != 1000 { - t.Logf("writes = %v; want 1000", writes1K) - } - if got, want := bytes1K, bytes1*1000; got != want { - t.Logf("bytes = %v; want %v", got, want) - } - - // Set a rate limiter - cw.ResetStats() - c.setSendRateLimiter(ServerInfoMessage{ - TokenBucketBytesPerSecond: 1, - TokenBucketBytesBurst: int(bytes1 * 2), - }) - for range 1000 { - if err := c.send(key.NodePublic{}, pkt); err != nil { - t.Fatal(err) - } - } - writesLimited, bytesLimited := cw.Stats() - if writesLimited == 0 || writesLimited == writes1K { - t.Errorf("limited conn's write count = %v; want non-zero, less than 1k", writesLimited) - } - if bytesLimited < bytes1*2 || bytesLimited >= bytes1K { - t.Errorf("limited conn's bytes count = %v; want >=%v, <%v", bytesLimited, bytes1K*2, bytes1K) - } -} - func TestServerRepliesToPing(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1627,12 +779,12 @@ func TestServerRepliesToPing(t *testing.T) { } for { - m, err := tc.c.recvTimeout(time.Second) + m, err := tc.c.RecvTimeoutForTest(time.Second) if err != nil { t.Fatal(err) } switch m := m.(type) { - case PongMessage: + case derp.PongMessage: if ([8]byte(m)) != data { t.Fatalf("got pong %2x; want %2x", [8]byte(m), data) } @@ -1640,122 +792,3 @@ func TestServerRepliesToPing(t *testing.T) { } } } - -func TestGetPerClientSendQueueDepth(t *testing.T) { - c := qt.New(t) - envKey := "TS_DEBUG_DERP_PER_CLIENT_SEND_QUEUE_DEPTH" - - testCases := []struct { - envVal string - want int - }{ - // Empty case, envknob treats empty as missing also. - { - "", defaultPerClientSendQueueDepth, - }, - { - "64", 64, - }, - } - - for _, tc := range testCases { - t.Run(cmp.Or(tc.envVal, "empty"), func(t *testing.T) { - t.Setenv(envKey, tc.envVal) - val := getPerClientSendQueueDepth() - c.Assert(val, qt.Equals, tc.want) - }) - } -} - -func TestSetMeshKey(t *testing.T) { - for name, tt := range map[string]struct { - key string - want key.DERPMesh - wantErr bool - }{ - "clobber": { - key: testMeshKey, - wantErr: false, - }, - "invalid": { - key: "badf00d", - wantErr: true, - }, - } { - t.Run(name, func(t *testing.T) { - s := &Server{} - - err := s.SetMeshKey(tt.key) - if tt.wantErr { - if err == nil { - t.Fatalf("expected err") - } - return - } - if err != nil { - t.Fatalf("unexpected err: %v", err) - } - - want, err := key.ParseDERPMesh(tt.key) - if err != nil { - t.Fatal(err) - } - if !s.meshKey.Equal(want) { - t.Fatalf("got %v, want %v", s.meshKey, want) - } - }) - } -} - -func TestIsMeshPeer(t *testing.T) { - s := &Server{} - err := s.SetMeshKey(testMeshKey) - if err != nil { - t.Fatal(err) - } - for name, tt := range map[string]struct { - want bool - meshKey string - wantAllocs float64 - }{ - "nil": { - want: false, - wantAllocs: 0, - }, - "mismatch": { - meshKey: "6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8", - want: false, - wantAllocs: 1, - }, - "match": { - meshKey: testMeshKey, - want: true, - wantAllocs: 0, - }, - } { - t.Run(name, func(t *testing.T) { - var got bool - var mKey key.DERPMesh - if tt.meshKey != "" { - mKey, err = key.ParseDERPMesh(tt.meshKey) - if err != nil { - t.Fatalf("ParseDERPMesh(%q) failed: %v", tt.meshKey, err) - } - } - - info := clientInfo{ - MeshKey: mKey, - } - allocs := testing.AllocsPerRun(1, func() { - got = s.isMeshPeer(&info) - }) - if got != tt.want { - t.Fatalf("got %t, want %t: info = %#v", got, tt.want, info) - } - - if allocs != tt.wantAllocs && tt.want { - t.Errorf("%f allocations, want %f", allocs, tt.wantAllocs) - } - }) - } -} diff --git a/derp/derphttp/derphttp_client.go b/derp/derphttp/derphttp_client.go index 704b8175d07c6..db56c4a44c682 100644 --- a/derp/derphttp/derphttp_client.go +++ b/derp/derphttp/derphttp_client.go @@ -32,6 +32,8 @@ import ( "tailscale.com/derp" "tailscale.com/derp/derpconst" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/dnscache" "tailscale.com/net/netmon" @@ -39,7 +41,6 @@ import ( "tailscale.com/net/netx" "tailscale.com/net/sockstats" "tailscale.com/net/tlsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstime" @@ -522,7 +523,7 @@ func (c *Client) connect(ctx context.Context, caller string) (client *derp.Clien // just to get routed into the server's HTTP Handler so it // can Hijack the request, but we signal with a special header // that we don't want to deal with its HTTP response. - req.Header.Set(fastStartHeader, "1") // suppresses the server's HTTP response + req.Header.Set(derp.FastStartHeader, "1") // suppresses the server's HTTP response if err := req.Write(brw); err != nil { return nil, 0, err } @@ -734,8 +735,12 @@ func (c *Client) dialNode(ctx context.Context, n *tailcfg.DERPNode) (net.Conn, e Path: "/", // unused }, } - if proxyURL, err := tshttpproxy.ProxyFromEnvironment(proxyReq); err == nil && proxyURL != nil { - return c.dialNodeUsingProxy(ctx, n, proxyURL) + if buildfeatures.HasUseProxy { + if proxyFromEnv, ok := feature.HookProxyFromEnvironment.GetOk(); ok { + if proxyURL, err := proxyFromEnv(proxyReq); err == nil && proxyURL != nil { + return c.dialNodeUsingProxy(ctx, n, proxyURL) + } + } } type res struct { @@ -865,10 +870,14 @@ func (c *Client) dialNodeUsingProxy(ctx context.Context, n *tailcfg.DERPNode, pr target := net.JoinHostPort(n.HostName, "443") var authHeader string - if v, err := tshttpproxy.GetAuthHeader(pu); err != nil { - c.logf("derphttp: error getting proxy auth header for %v: %v", proxyURL, err) - } else if v != "" { - authHeader = fmt.Sprintf("Proxy-Authorization: %s\r\n", v) + if buildfeatures.HasUseProxy { + if getAuthHeader, ok := feature.HookProxyGetAuthHeader.GetOk(); ok { + if v, err := getAuthHeader(pu); err != nil { + c.logf("derphttp: error getting proxy auth header for %v: %v", proxyURL, err) + } else if v != "" { + authHeader = fmt.Sprintf("Proxy-Authorization: %s\r\n", v) + } + } } if _, err := fmt.Fprintf(proxyConn, "CONNECT %s HTTP/1.1\r\nHost: %s\r\n%s\r\n", target, target, authHeader); err != nil { diff --git a/derp/derphttp/derphttp_test.go b/derp/derphttp/derphttp_test.go index 6e8e0bd21c9e9..76681d4984252 100644 --- a/derp/derphttp/derphttp_test.go +++ b/derp/derphttp/derphttp_test.go @@ -1,13 +1,14 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package derphttp +package derphttp_test import ( "bytes" "context" "crypto/tls" "encoding/json" + "errors" "flag" "fmt" "maps" @@ -18,12 +19,17 @@ import ( "strings" "sync" "testing" + "testing/synctest" "time" "tailscale.com/derp" + "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" + "tailscale.com/net/memnet" "tailscale.com/net/netmon" "tailscale.com/net/netx" "tailscale.com/tailcfg" + "tailscale.com/tstest" "tailscale.com/types/key" ) @@ -41,12 +47,12 @@ func TestSendRecv(t *testing.T) { clientKeys = append(clientKeys, priv.Public()) } - s := derp.NewServer(serverPrivateKey, t.Logf) + s := derpserver.New(serverPrivateKey, t.Logf) defer s.Close() httpsrv := &http.Server{ TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), - Handler: Handler(s), + Handler: derpserver.Handler(s), } ln, err := net.Listen("tcp4", "localhost:0") @@ -65,7 +71,7 @@ func TestSendRecv(t *testing.T) { } }() - var clients []*Client + var clients []*derphttp.Client var recvChs []chan []byte done := make(chan struct{}) var wg sync.WaitGroup @@ -78,7 +84,7 @@ func TestSendRecv(t *testing.T) { }() for i := range numClients { key := clientPrivateKeys[i] - c, err := NewClient(key, serverURL, t.Logf, netMon) + c, err := derphttp.NewClient(key, serverURL, t.Logf, netMon) if err != nil { t.Fatalf("client %d: %v", i, err) } @@ -158,7 +164,7 @@ func TestSendRecv(t *testing.T) { recvNothing(1) } -func waitConnect(t testing.TB, c *Client) { +func waitConnect(t testing.TB, c *derphttp.Client) { t.Helper() if m, err := c.Recv(); err != nil { t.Fatalf("client first Recv: %v", err) @@ -169,12 +175,12 @@ func waitConnect(t testing.TB, c *Client) { func TestPing(t *testing.T) { serverPrivateKey := key.NewNode() - s := derp.NewServer(serverPrivateKey, t.Logf) + s := derpserver.New(serverPrivateKey, t.Logf) defer s.Close() httpsrv := &http.Server{ TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), - Handler: Handler(s), + Handler: derpserver.Handler(s), } ln, err := net.Listen("tcp4", "localhost:0") @@ -193,7 +199,7 @@ func TestPing(t *testing.T) { } }() - c, err := NewClient(key.NewNode(), serverURL, t.Logf, netmon.NewStatic()) + c, err := derphttp.NewClient(key.NewNode(), serverURL, t.Logf, netmon.NewStatic()) if err != nil { t.Fatalf("NewClient: %v", err) } @@ -221,24 +227,21 @@ func TestPing(t *testing.T) { const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" -func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derp.Server) { - s = derp.NewServer(k, t.Logf) +func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derpserver.Server, ln *memnet.Listener) { + s = derpserver.New(k, t.Logf) httpsrv := &http.Server{ TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), - Handler: Handler(s), + Handler: derpserver.Handler(s), } - ln, err := net.Listen("tcp4", "localhost:0") - if err != nil { - t.Fatal(err) - } + ln = memnet.Listen("localhost:0") + serverURL = "http://" + ln.Addr().String() s.SetMeshKey(testMeshKey) go func() { if err := httpsrv.Serve(ln); err != nil { - if err == http.ErrServerClosed { - t.Logf("server closed") + if errors.Is(err, net.ErrClosed) { return } panic(err) @@ -247,8 +250,8 @@ func newTestServer(t *testing.T, k key.NodePrivate) (serverURL string, s *derp.S return } -func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToWatchURL string) (c *Client) { - c, err := NewClient(watcherPrivateKey, serverToWatchURL, t.Logf, netmon.NewStatic()) +func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToWatchURL string, ln *memnet.Listener) (c *derphttp.Client) { + c, err := derphttp.NewClient(watcherPrivateKey, serverToWatchURL, t.Logf, netmon.NewStatic()) if err != nil { t.Fatal(err) } @@ -257,188 +260,179 @@ func newWatcherClient(t *testing.T, watcherPrivateKey key.NodePrivate, serverToW t.Fatal(err) } c.MeshKey = k + c.SetURLDialer(ln.Dial) return } -// breakConnection breaks the connection, which should trigger a reconnect. -func (c *Client) breakConnection(brokenClient *derp.Client) { - c.mu.Lock() - defer c.mu.Unlock() - if c.client != brokenClient { - return - } - if c.netConn != nil { - c.netConn.Close() - c.netConn = nil - } - c.client = nil -} - // Test that a watcher connection successfully reconnects and processes peer // updates after a different thread breaks and reconnects the connection, while // the watcher is waiting on recv(). func TestBreakWatcherConnRecv(t *testing.T) { - // Set the wait time before a retry after connection failure to be much lower. - // This needs to be early in the test, for defer to run right at the end after - // the DERP client has finished. - origRetryInterval := retryInterval - retryInterval = 50 * time.Millisecond - defer func() { retryInterval = origRetryInterval }() - - var wg sync.WaitGroup - // Make the watcher server - serverPrivateKey1 := key.NewNode() - _, s1 := newTestServer(t, serverPrivateKey1) - defer s1.Close() - - // Make the watched server - serverPrivateKey2 := key.NewNode() - serverURL2, s2 := newTestServer(t, serverPrivateKey2) - defer s2.Close() - - // Make the watcher (but it is not connected yet) - watcher := newWatcherClient(t, serverPrivateKey1, serverURL2) - defer watcher.Close() - - ctx, cancel := context.WithCancel(context.Background()) + synctest.Test(t, func(t *testing.T) { + // Set the wait time before a retry after connection failure to be much lower. + // This needs to be early in the test, for defer to run right at the end after + // the DERP client has finished. + tstest.Replace(t, derphttp.RetryInterval, 50*time.Millisecond) + + var wg sync.WaitGroup + // Make the watcher server + serverPrivateKey1 := key.NewNode() + _, s1, ln1 := newTestServer(t, serverPrivateKey1) + defer s1.Close() + defer ln1.Close() + + // Make the watched server + serverPrivateKey2 := key.NewNode() + serverURL2, s2, ln2 := newTestServer(t, serverPrivateKey2) + defer s2.Close() + defer ln2.Close() + + // Make the watcher (but it is not connected yet) + watcher := newWatcherClient(t, serverPrivateKey1, serverURL2, ln2) + defer watcher.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + watcherChan := make(chan int, 1) + defer close(watcherChan) + errChan := make(chan error, 1) + + // Start the watcher thread (which connects to the watched server) + wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343 + go func() { + defer wg.Done() + var peers int + add := func(m derp.PeerPresentMessage) { + t.Logf("add: %v", m.Key.ShortString()) + peers++ + // Signal that the watcher has run + watcherChan <- peers + } + remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- } + notifyErr := func(err error) { + select { + case errChan <- err: + case <-ctx.Done(): + } + } - watcherChan := make(chan int, 1) - defer close(watcherChan) - errChan := make(chan error, 1) - defer close(errChan) + watcher.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyErr) + }() - // Start the watcher thread (which connects to the watched server) - wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343 - go func() { - defer wg.Done() - var peers int - add := func(m derp.PeerPresentMessage) { - t.Logf("add: %v", m.Key.ShortString()) - peers++ - // Signal that the watcher has run - watcherChan <- peers - } - remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- } - notifyErr := func(err error) { - errChan <- err - } + synctest.Wait() - watcher.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyErr) - }() + // Wait for the watcher to run, then break the connection and check if it + // reconnected and received peer updates. + for range 10 { + select { + case peers := <-watcherChan: + if peers != 1 { + t.Fatalf("wrong number of peers added during watcher connection: have %d, want 1", peers) + } + case err := <-errChan: + if err.Error() != "derp.Recv: EOF" { + t.Fatalf("expected notifyError connection error to be EOF, got %v", err) + } + } - timer := time.NewTimer(5 * time.Second) - defer timer.Stop() + synctest.Wait() - // Wait for the watcher to run, then break the connection and check if it - // reconnected and received peer updates. - for range 10 { - select { - case peers := <-watcherChan: - if peers != 1 { - t.Fatalf("wrong number of peers added during watcher connection: have %d, want 1", peers) - } - case err := <-errChan: - if !strings.Contains(err.Error(), "use of closed network connection") { - t.Fatalf("expected notifyError connection error to contain 'use of closed network connection', got %v", err) - } - case <-timer.C: - t.Fatalf("watcher did not process the peer update") + watcher.BreakConnection(watcher) + // re-establish connection by sending a packet + watcher.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) } - timer.Reset(5 * time.Second) - watcher.breakConnection(watcher.client) - // re-establish connection by sending a packet - watcher.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) - } - cancel() // Cancel the context to stop the watcher loop. - wg.Wait() + cancel() // Cancel the context to stop the watcher loop. + wg.Wait() + }) } // Test that a watcher connection successfully reconnects and processes peer // updates after a different thread breaks and reconnects the connection, while // the watcher is not waiting on recv(). func TestBreakWatcherConn(t *testing.T) { - // Set the wait time before a retry after connection failure to be much lower. - // This needs to be early in the test, for defer to run right at the end after - // the DERP client has finished. - origRetryInterval := retryInterval - retryInterval = 50 * time.Millisecond - defer func() { retryInterval = origRetryInterval }() - - var wg sync.WaitGroup - // Make the watcher server - serverPrivateKey1 := key.NewNode() - _, s1 := newTestServer(t, serverPrivateKey1) - defer s1.Close() - - // Make the watched server - serverPrivateKey2 := key.NewNode() - serverURL2, s2 := newTestServer(t, serverPrivateKey2) - defer s2.Close() - - // Make the watcher (but it is not connected yet) - watcher1 := newWatcherClient(t, serverPrivateKey1, serverURL2) - defer watcher1.Close() + synctest.Test(t, func(t *testing.T) { + // Set the wait time before a retry after connection failure to be much lower. + // This needs to be early in the test, for defer to run right at the end after + // the DERP client has finished. + tstest.Replace(t, derphttp.RetryInterval, 50*time.Millisecond) + + var wg sync.WaitGroup + // Make the watcher server + serverPrivateKey1 := key.NewNode() + _, s1, ln1 := newTestServer(t, serverPrivateKey1) + defer s1.Close() + defer ln1.Close() + + // Make the watched server + serverPrivateKey2 := key.NewNode() + serverURL2, s2, ln2 := newTestServer(t, serverPrivateKey2) + defer s2.Close() + defer ln2.Close() + + // Make the watcher (but it is not connected yet) + watcher1 := newWatcherClient(t, serverPrivateKey1, serverURL2, ln2) + defer watcher1.Close() + + ctx, cancel := context.WithCancel(context.Background()) + + watcherChan := make(chan int, 1) + breakerChan := make(chan bool, 1) + errorChan := make(chan error, 1) + + // Start the watcher thread (which connects to the watched server) + wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343 + go func() { + defer wg.Done() + var peers int + add := func(m derp.PeerPresentMessage) { + t.Logf("add: %v", m.Key.ShortString()) + peers++ + // Signal that the watcher has run + watcherChan <- peers + select { + case <-ctx.Done(): + return + // Wait for breaker to run + case <-breakerChan: + } + } + remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- } + notifyError := func(err error) { + errorChan <- err + } - ctx, cancel := context.WithCancel(context.Background()) + watcher1.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyError) + }() - watcherChan := make(chan int, 1) - breakerChan := make(chan bool, 1) - errorChan := make(chan error, 1) + synctest.Wait() - // Start the watcher thread (which connects to the watched server) - wg.Add(1) // To avoid using t.Logf after the test ends. See https://golang.org/issue/40343 - go func() { - defer wg.Done() - var peers int - add := func(m derp.PeerPresentMessage) { - t.Logf("add: %v", m.Key.ShortString()) - peers++ - // Signal that the watcher has run - watcherChan <- peers + // Wait for the watcher to run, then break the connection and check if it + // reconnected and received peer updates. + for range 10 { select { - case <-ctx.Done(): - return - // Wait for breaker to run - case <-breakerChan: + case peers := <-watcherChan: + if peers != 1 { + t.Fatalf("wrong number of peers added during watcher connection have %d, want 1", peers) + } + case err := <-errorChan: + if !errors.Is(err, net.ErrClosed) { + t.Fatalf("expected notifyError connection error to fail with ErrClosed, got %v", err) + } } - } - remove := func(m derp.PeerGoneMessage) { t.Logf("remove: %v", m.Peer.ShortString()); peers-- } - notifyError := func(err error) { - errorChan <- err - } - - watcher1.RunWatchConnectionLoop(ctx, serverPrivateKey1.Public(), t.Logf, add, remove, notifyError) - }() - timer := time.NewTimer(5 * time.Second) - defer timer.Stop() + synctest.Wait() - // Wait for the watcher to run, then break the connection and check if it - // reconnected and received peer updates. - for range 10 { - select { - case peers := <-watcherChan: - if peers != 1 { - t.Fatalf("wrong number of peers added during watcher connection have %d, want 1", peers) - } - case err := <-errorChan: - if !strings.Contains(err.Error(), "use of closed network connection") { - t.Fatalf("expected notifyError connection error to contain 'use of closed network connection', got %v", err) - } - case <-timer.C: - t.Fatalf("watcher did not process the peer update") + watcher1.BreakConnection(watcher1) + // re-establish connection by sending a packet + watcher1.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) + // signal that the breaker is done + breakerChan <- true } - watcher1.breakConnection(watcher1.client) - // re-establish connection by sending a packet - watcher1.ForwardPacket(key.NodePublic{}, key.NodePublic{}, []byte("bogus")) - // signal that the breaker is done - breakerChan <- true - - timer.Reset(5 * time.Second) - } - watcher1.Close() - cancel() - wg.Wait() + watcher1.Close() + cancel() + wg.Wait() + }) } func noopAdd(derp.PeerPresentMessage) {} @@ -446,22 +440,23 @@ func noopRemove(derp.PeerGoneMessage) {} func noopNotifyError(error) {} func TestRunWatchConnectionLoopServeConnect(t *testing.T) { - defer func() { testHookWatchLookConnectResult = nil }() + defer derphttp.SetTestHookWatchLookConnectResult(nil) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() priv := key.NewNode() - serverURL, s := newTestServer(t, priv) + serverURL, s, ln := newTestServer(t, priv) defer s.Close() + defer ln.Close() pub := priv.Public() - watcher := newWatcherClient(t, priv, serverURL) + watcher := newWatcherClient(t, priv, serverURL, ln) defer watcher.Close() // Test connecting to ourselves, and that we get hung up on. - testHookWatchLookConnectResult = func(err error, wasSelfConnect bool) bool { + derphttp.SetTestHookWatchLookConnectResult(func(err error, wasSelfConnect bool) bool { t.Helper() if err != nil { t.Fatalf("error connecting to server: %v", err) @@ -470,12 +465,12 @@ func TestRunWatchConnectionLoopServeConnect(t *testing.T) { t.Error("wanted self-connect; wasn't") } return false - } + }) watcher.RunWatchConnectionLoop(ctx, pub, t.Logf, noopAdd, noopRemove, noopNotifyError) // Test connecting to the server with a zero value for ignoreServerKey, // so we should always connect. - testHookWatchLookConnectResult = func(err error, wasSelfConnect bool) bool { + derphttp.SetTestHookWatchLookConnectResult(func(err error, wasSelfConnect bool) bool { t.Helper() if err != nil { t.Fatalf("error connecting to server: %v", err) @@ -484,16 +479,14 @@ func TestRunWatchConnectionLoopServeConnect(t *testing.T) { t.Error("wanted normal connect; got self connect") } return false - } + }) watcher.RunWatchConnectionLoop(ctx, key.NodePublic{}, t.Logf, noopAdd, noopRemove, noopNotifyError) } // verify that the LocalAddr method doesn't acquire the mutex. // See https://github.com/tailscale/tailscale/issues/11519 func TestLocalAddrNoMutex(t *testing.T) { - var c Client - c.mu.Lock() - defer c.mu.Unlock() // not needed in test but for symmetry + var c derphttp.Client _, err := c.LocalAddr() if got, want := fmt.Sprint(err), "client not connected"; got != want { @@ -502,7 +495,7 @@ func TestLocalAddrNoMutex(t *testing.T) { } func TestProbe(t *testing.T) { - h := Handler(nil) + h := derpserver.Handler(nil) tests := []struct { path string @@ -523,25 +516,26 @@ func TestProbe(t *testing.T) { } func TestNotifyError(t *testing.T) { - defer func() { testHookWatchLookConnectResult = nil }() + defer derphttp.SetTestHookWatchLookConnectResult(nil) ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() priv := key.NewNode() - serverURL, s := newTestServer(t, priv) + serverURL, s, ln := newTestServer(t, priv) defer s.Close() + defer ln.Close() pub := priv.Public() // Test early error notification when c.connect fails. - watcher := newWatcherClient(t, priv, serverURL) + watcher := newWatcherClient(t, priv, serverURL, ln) watcher.SetURLDialer(netx.DialFunc(func(ctx context.Context, network, addr string) (net.Conn, error) { t.Helper() return nil, fmt.Errorf("test error: %s", addr) })) defer watcher.Close() - testHookWatchLookConnectResult = func(err error, wasSelfConnect bool) bool { + derphttp.SetTestHookWatchLookConnectResult(func(err error, wasSelfConnect bool) bool { t.Helper() if err == nil { t.Fatal("expected error connecting to server, got nil") @@ -550,7 +544,7 @@ func TestNotifyError(t *testing.T) { t.Error("wanted normal connect; got self connect") } return false - } + }) errChan := make(chan error, 1) notifyError := func(err error) { @@ -587,7 +581,7 @@ func TestManualDial(t *testing.T) { region := slices.Sorted(maps.Keys(dm.Regions))[0] netMon := netmon.NewStatic() - rc := NewRegionClient(key.NewNode(), t.Logf, netMon, func() *tailcfg.DERPRegion { + rc := derphttp.NewRegionClient(key.NewNode(), t.Logf, netMon, func() *tailcfg.DERPRegion { return dm.Regions[region] }) defer rc.Close() @@ -625,7 +619,7 @@ func TestURLDial(t *testing.T) { } } netMon := netmon.NewStatic() - c, err := NewClient(key.NewNode(), "https://"+hostname+"/", t.Logf, netMon) + c, err := derphttp.NewClient(key.NewNode(), "https://"+hostname+"/", t.Logf, netMon) defer c.Close() if err := c.Connect(context.Background()); err != nil { diff --git a/derp/derphttp/export_test.go b/derp/derphttp/export_test.go new file mode 100644 index 0000000000000..59d8324dcba3e --- /dev/null +++ b/derp/derphttp/export_test.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package derphttp + +func SetTestHookWatchLookConnectResult(f func(connectError error, wasSelfConnect bool) (keepRunning bool)) { + testHookWatchLookConnectResult = f +} + +// breakConnection breaks the connection, which should trigger a reconnect. +func (c *Client) BreakConnection(brokenClient *Client) { + c.mu.Lock() + defer c.mu.Unlock() + if c.client != brokenClient.client { + return + } + if c.netConn != nil { + c.netConn.Close() + c.netConn = nil + } + c.client = nil +} + +var RetryInterval = &retryInterval diff --git a/derp/derp_server.go b/derp/derpserver/derpserver.go similarity index 94% rename from derp/derp_server.go rename to derp/derpserver/derpserver.go index f0c635a5aef50..31cf9363a43bf 100644 --- a/derp/derp_server.go +++ b/derp/derpserver/derpserver.go @@ -1,7 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package derp +// Package derpserver implements a DERP server. +package derpserver // TODO(crawshaw): with predefined serverKey in clients and HMAC on packets we could skip TLS @@ -38,6 +39,7 @@ import ( "go4.org/mem" "golang.org/x/sync/errgroup" "tailscale.com/client/local" + "tailscale.com/derp" "tailscale.com/derp/derpconst" "tailscale.com/disco" "tailscale.com/envknob" @@ -59,15 +61,9 @@ import ( // verbosely log whenever DERP drops a packet. var verboseDropKeys = map[key.NodePublic]bool{} -// IdealNodeHeader is the HTTP request header sent on DERP HTTP client requests -// to indicate that they're connecting to their ideal (Region.Nodes[0]) node. -// The HTTP header value is the name of the node they wish they were connected -// to. This is an optional header. -const IdealNodeHeader = "Ideal-Node" - // IdealNodeContextKey is the context key used to pass the IdealNodeHeader value // from the HTTP handler to the DERP server's Accept method. -var IdealNodeContextKey = ctxkey.New[string]("ideal-node", "") +var IdealNodeContextKey = ctxkey.New("ideal-node", "") func init() { keys := envknob.String("TS_DEBUG_VERBOSE_DROPS") @@ -183,7 +179,7 @@ type Server struct { mu sync.Mutex closed bool - netConns map[Conn]chan struct{} // chan is closed when conn closes + netConns map[derp.Conn]chan struct{} // chan is closed when conn closes clients map[key.NodePublic]*clientSet watchers set.Set[*sclient] // mesh peers // clientsMesh tracks all clients in the cluster, both locally @@ -356,9 +352,9 @@ var bytesDropped = metrics.NewMultiLabelMap[dropReasonKindLabels]( "DERP bytes dropped by reason and by kind", ) -// NewServer returns a new DERP server. It doesn't listen on its own. +// New returns a new DERP server. It doesn't listen on its own. // Connections are given to it via Server.Accept. -func NewServer(privateKey key.NodePrivate, logf logger.Logf) *Server { +func New(privateKey key.NodePrivate, logf logger.Logf) *Server { var ms runtime.MemStats runtime.ReadMemStats(&ms) @@ -371,7 +367,7 @@ func NewServer(privateKey key.NodePrivate, logf logger.Logf) *Server { packetsRecvByKind: metrics.LabelMap{Label: "kind"}, clients: map[key.NodePublic]*clientSet{}, clientsMesh: map[key.NodePublic]PacketForwarder{}, - netConns: map[Conn]chan struct{}{}, + netConns: map[derp.Conn]chan struct{}{}, memSys0: ms.Sys, watchers: set.Set[*sclient]{}, peerGoneWatchers: map[key.NodePublic]set.HandleSet[func(key.NodePublic)]{}, @@ -572,7 +568,7 @@ func (s *Server) IsClientConnectedForTest(k key.NodePublic) bool { // on its own. // // Accept closes nc. -func (s *Server) Accept(ctx context.Context, nc Conn, brw *bufio.ReadWriter, remoteAddr string) { +func (s *Server) Accept(ctx context.Context, nc derp.Conn, brw *bufio.ReadWriter, remoteAddr string) { closed := make(chan struct{}) s.mu.Lock() @@ -620,7 +616,7 @@ func (s *Server) initMetacert() { log.Fatal(err) } tmpl := &x509.Certificate{ - SerialNumber: big.NewInt(ProtocolVersion), + SerialNumber: big.NewInt(derp.ProtocolVersion), Subject: pkix.Name{ CommonName: derpconst.MetaCertCommonNamePrefix + s.publicKey.UntypedHexString(), }, @@ -724,7 +720,7 @@ func (s *Server) registerClient(c *sclient) { // presence changed. // // s.mu must be held. -func (s *Server) broadcastPeerStateChangeLocked(peer key.NodePublic, ipPort netip.AddrPort, flags PeerPresentFlags, present bool) { +func (s *Server) broadcastPeerStateChangeLocked(peer key.NodePublic, ipPort netip.AddrPort, flags derp.PeerPresentFlags, present bool) { for w := range s.watchers { w.peerStateChange = append(w.peerStateChange, peerConnState{ peer: peer, @@ -868,7 +864,7 @@ func (s *Server) notePeerGoneFromRegionLocked(key key.NodePublic) { // requestPeerGoneWriteLimited sends a request to write a "peer gone" // frame, but only in reply to a disco packet, and only if we haven't // sent one recently. -func (c *sclient) requestPeerGoneWriteLimited(peer key.NodePublic, contents []byte, reason PeerGoneReasonType) { +func (c *sclient) requestPeerGoneWriteLimited(peer key.NodePublic, contents []byte, reason derp.PeerGoneReasonType) { if disco.LooksLikeDiscoWrapper(contents) != true { return } @@ -912,7 +908,7 @@ func (s *Server) addWatcher(c *sclient) { go c.requestMeshUpdate() } -func (s *Server) accept(ctx context.Context, nc Conn, brw *bufio.ReadWriter, remoteAddr string, connNum int64) error { +func (s *Server) accept(ctx context.Context, nc derp.Conn, brw *bufio.ReadWriter, remoteAddr string, connNum int64) error { br := brw.Reader nc.SetDeadline(time.Now().Add(10 * time.Second)) bw := &lazyBufioWriter{w: nc, lbw: brw.Writer} @@ -1010,7 +1006,7 @@ func (c *sclient) run(ctx context.Context) error { c.startStatsLoop(sendCtx) for { - ft, fl, err := readFrameHeader(c.br) + ft, fl, err := derp.ReadFrameHeader(c.br) c.debugLogf("read frame type %d len %d err %v", ft, fl, err) if err != nil { if errors.Is(err, io.EOF) { @@ -1025,17 +1021,17 @@ func (c *sclient) run(ctx context.Context) error { } c.s.noteClientActivity(c) switch ft { - case frameNotePreferred: + case derp.FrameNotePreferred: err = c.handleFrameNotePreferred(ft, fl) - case frameSendPacket: + case derp.FrameSendPacket: err = c.handleFrameSendPacket(ft, fl) - case frameForwardPacket: + case derp.FrameForwardPacket: err = c.handleFrameForwardPacket(ft, fl) - case frameWatchConns: + case derp.FrameWatchConns: err = c.handleFrameWatchConns(ft, fl) - case frameClosePeer: + case derp.FrameClosePeer: err = c.handleFrameClosePeer(ft, fl) - case framePing: + case derp.FramePing: err = c.handleFramePing(ft, fl) default: err = c.handleUnknownFrame(ft, fl) @@ -1046,12 +1042,12 @@ func (c *sclient) run(ctx context.Context) error { } } -func (c *sclient) handleUnknownFrame(ft frameType, fl uint32) error { +func (c *sclient) handleUnknownFrame(ft derp.FrameType, fl uint32) error { _, err := io.CopyN(io.Discard, c.br, int64(fl)) return err } -func (c *sclient) handleFrameNotePreferred(ft frameType, fl uint32) error { +func (c *sclient) handleFrameNotePreferred(ft derp.FrameType, fl uint32) error { if fl != 1 { return fmt.Errorf("frameNotePreferred wrong size") } @@ -1063,7 +1059,7 @@ func (c *sclient) handleFrameNotePreferred(ft frameType, fl uint32) error { return nil } -func (c *sclient) handleFrameWatchConns(ft frameType, fl uint32) error { +func (c *sclient) handleFrameWatchConns(ft derp.FrameType, fl uint32) error { if fl != 0 { return fmt.Errorf("handleFrameWatchConns wrong size") } @@ -1074,9 +1070,9 @@ func (c *sclient) handleFrameWatchConns(ft frameType, fl uint32) error { return nil } -func (c *sclient) handleFramePing(ft frameType, fl uint32) error { +func (c *sclient) handleFramePing(ft derp.FrameType, fl uint32) error { c.s.gotPing.Add(1) - var m PingMessage + var m derp.PingMessage if fl < uint32(len(m)) { return fmt.Errorf("short ping: %v", fl) } @@ -1101,8 +1097,8 @@ func (c *sclient) handleFramePing(ft frameType, fl uint32) error { return err } -func (c *sclient) handleFrameClosePeer(ft frameType, fl uint32) error { - if fl != keyLen { +func (c *sclient) handleFrameClosePeer(ft derp.FrameType, fl uint32) error { + if fl != derp.KeyLen { return fmt.Errorf("handleFrameClosePeer wrong size") } if !c.canMesh { @@ -1135,7 +1131,7 @@ func (c *sclient) handleFrameClosePeer(ft frameType, fl uint32) error { // handleFrameForwardPacket reads a "forward packet" frame from the client // (which must be a trusted client, a peer in our mesh). -func (c *sclient) handleFrameForwardPacket(ft frameType, fl uint32) error { +func (c *sclient) handleFrameForwardPacket(ft derp.FrameType, fl uint32) error { if !c.canMesh { return fmt.Errorf("insufficient permissions") } @@ -1162,7 +1158,7 @@ func (c *sclient) handleFrameForwardPacket(ft frameType, fl uint32) error { if dstLen > 1 { reason = dropReasonDupClient } else { - c.requestPeerGoneWriteLimited(dstKey, contents, PeerGoneReasonNotHere) + c.requestPeerGoneWriteLimited(dstKey, contents, derp.PeerGoneReasonNotHere) } s.recordDrop(contents, srcKey, dstKey, reason) return nil @@ -1178,7 +1174,7 @@ func (c *sclient) handleFrameForwardPacket(ft frameType, fl uint32) error { } // handleFrameSendPacket reads a "send packet" frame from the client. -func (c *sclient) handleFrameSendPacket(ft frameType, fl uint32) error { +func (c *sclient) handleFrameSendPacket(ft derp.FrameType, fl uint32) error { s := c.s dstKey, contents, err := s.recvPacket(c.br, fl) @@ -1215,7 +1211,7 @@ func (c *sclient) handleFrameSendPacket(ft frameType, fl uint32) error { if dstLen > 1 { reason = dropReasonDupClient } else { - c.requestPeerGoneWriteLimited(dstKey, contents, PeerGoneReasonNotHere) + c.requestPeerGoneWriteLimited(dstKey, contents, derp.PeerGoneReasonNotHere) } s.recordDrop(contents, c.key, dstKey, reason) c.debugLogf("SendPacket for %s, dropping with reason=%s", dstKey.ShortString(), reason) @@ -1325,13 +1321,13 @@ func (c *sclient) sendPkt(dst *sclient, p pkt) error { // notified (in a new goroutine) whenever a peer has disconnected from all DERP // nodes in the current region. func (c *sclient) onPeerGoneFromRegion(peer key.NodePublic) { - c.requestPeerGoneWrite(peer, PeerGoneReasonDisconnected) + c.requestPeerGoneWrite(peer, derp.PeerGoneReasonDisconnected) } // requestPeerGoneWrite sends a request to write a "peer gone" frame // with an explanation of why it is gone. It blocks until either the // write request is scheduled, or the client has closed. -func (c *sclient) requestPeerGoneWrite(peer key.NodePublic, reason PeerGoneReasonType) { +func (c *sclient) requestPeerGoneWrite(peer key.NodePublic, reason derp.PeerGoneReasonType) { select { case c.peerGone <- peerGoneMsg{ peer: peer, @@ -1358,7 +1354,7 @@ func (c *sclient) requestMeshUpdate() { // isMeshPeer reports whether the client is a trusted mesh peer // node in the DERP region. -func (s *Server) isMeshPeer(info *clientInfo) bool { +func (s *Server) isMeshPeer(info *derp.ClientInfo) bool { // Compare mesh keys in constant time to prevent timing attacks. // Since mesh keys are a fixed length, we don’t need to be concerned // about timing attacks on client mesh keys that are the wrong length. @@ -1372,7 +1368,7 @@ func (s *Server) isMeshPeer(info *clientInfo) bool { // verifyClient checks whether the client is allowed to connect to the derper, // depending on how & whether the server's been configured to verify. -func (s *Server) verifyClient(ctx context.Context, clientKey key.NodePublic, info *clientInfo, clientIP netip.Addr) error { +func (s *Server) verifyClient(ctx context.Context, clientKey key.NodePublic, info *derp.ClientInfo, clientIP netip.Addr) error { if s.isMeshPeer(info) { // Trusted mesh peer. No need to verify further. In fact, verifying // further wouldn't work: it's not part of the tailnet so tailscaled and @@ -1436,10 +1432,10 @@ func (s *Server) verifyClient(ctx context.Context, clientKey key.NodePublic, inf } func (s *Server) sendServerKey(lw *lazyBufioWriter) error { - buf := make([]byte, 0, len(magic)+key.NodePublicRawLen) - buf = append(buf, magic...) + buf := make([]byte, 0, len(derp.Magic)+key.NodePublicRawLen) + buf = append(buf, derp.Magic...) buf = s.publicKey.AppendTo(buf) - err := writeFrame(lw.bw(), frameServerKey, buf) + err := derp.WriteFrame(lw.bw(), derp.FrameServerKey, buf) lw.Flush() // redundant (no-op) flush to release bufio.Writer return err } @@ -1504,21 +1500,16 @@ func (s *Server) noteClientActivity(c *sclient) { dup.sendHistory = append(dup.sendHistory, c) } -type serverInfo struct { - Version int `json:"version,omitempty"` - - TokenBucketBytesPerSecond int `json:",omitempty"` - TokenBucketBytesBurst int `json:",omitempty"` -} +type ServerInfo = derp.ServerInfo func (s *Server) sendServerInfo(bw *lazyBufioWriter, clientKey key.NodePublic) error { - msg, err := json.Marshal(serverInfo{Version: ProtocolVersion}) + msg, err := json.Marshal(ServerInfo{Version: derp.ProtocolVersion}) if err != nil { return err } msgbox := s.privateKey.SealTo(clientKey, msg) - if err := writeFrameHeader(bw.bw(), frameServerInfo, uint32(len(msgbox))); err != nil { + if err := derp.WriteFrameHeader(bw.bw(), derp.FrameServerInfo, uint32(len(msgbox))); err != nil { return err } if _, err := bw.Write(msgbox); err != nil { @@ -1530,12 +1521,12 @@ func (s *Server) sendServerInfo(bw *lazyBufioWriter, clientKey key.NodePublic) e // recvClientKey reads the frameClientInfo frame from the client (its // proof of identity) upon its initial connection. It should be // considered especially untrusted at this point. -func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info *clientInfo, err error) { - fl, err := readFrameTypeHeader(br, frameClientInfo) +func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info *derp.ClientInfo, err error) { + fl, err := derp.ReadFrameTypeHeader(br, derp.FrameClientInfo) if err != nil { return zpub, nil, err } - const minLen = keyLen + nonceLen + const minLen = derp.KeyLen + derp.NonceLen if fl < minLen { return zpub, nil, errors.New("short client info") } @@ -1547,7 +1538,7 @@ func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info if err := clientKey.ReadRawWithoutAllocating(br); err != nil { return zpub, nil, err } - msgLen := int(fl - keyLen) + msgLen := int(fl - derp.KeyLen) msgbox := make([]byte, msgLen) if _, err := io.ReadFull(br, msgbox); err != nil { return zpub, nil, fmt.Errorf("msgbox: %v", err) @@ -1556,7 +1547,7 @@ func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info if !ok { return zpub, nil, fmt.Errorf("msgbox: cannot open len=%d with client key %s", msgLen, clientKey) } - info = new(clientInfo) + info = new(derp.ClientInfo) if err := json.Unmarshal(msg, info); err != nil { return zpub, nil, fmt.Errorf("msg: %v", err) } @@ -1564,15 +1555,15 @@ func (s *Server) recvClientKey(br *bufio.Reader) (clientKey key.NodePublic, info } func (s *Server) recvPacket(br *bufio.Reader, frameLen uint32) (dstKey key.NodePublic, contents []byte, err error) { - if frameLen < keyLen { + if frameLen < derp.KeyLen { return zpub, nil, errors.New("short send packet frame") } if err := dstKey.ReadRawWithoutAllocating(br); err != nil { return zpub, nil, err } - packetLen := frameLen - keyLen - if packetLen > MaxPacketSize { - return zpub, nil, fmt.Errorf("data packet longer (%d) than max of %v", packetLen, MaxPacketSize) + packetLen := frameLen - derp.KeyLen + if packetLen > derp.MaxPacketSize { + return zpub, nil, fmt.Errorf("data packet longer (%d) than max of %v", packetLen, derp.MaxPacketSize) } contents = make([]byte, packetLen) if _, err := io.ReadFull(br, contents); err != nil { @@ -1592,7 +1583,7 @@ func (s *Server) recvPacket(br *bufio.Reader, frameLen uint32) (dstKey key.NodeP var zpub key.NodePublic func (s *Server) recvForwardPacket(br *bufio.Reader, frameLen uint32) (srcKey, dstKey key.NodePublic, contents []byte, err error) { - if frameLen < keyLen*2 { + if frameLen < derp.KeyLen*2 { return zpub, zpub, nil, errors.New("short send packet frame") } if err := srcKey.ReadRawWithoutAllocating(br); err != nil { @@ -1601,9 +1592,9 @@ func (s *Server) recvForwardPacket(br *bufio.Reader, frameLen uint32) (srcKey, d if err := dstKey.ReadRawWithoutAllocating(br); err != nil { return zpub, zpub, nil, err } - packetLen := frameLen - keyLen*2 - if packetLen > MaxPacketSize { - return zpub, zpub, nil, fmt.Errorf("data packet longer (%d) than max of %v", packetLen, MaxPacketSize) + packetLen := frameLen - derp.KeyLen*2 + if packetLen > derp.MaxPacketSize { + return zpub, zpub, nil, fmt.Errorf("data packet longer (%d) than max of %v", packetLen, derp.MaxPacketSize) } contents = make([]byte, packetLen) if _, err := io.ReadFull(br, contents); err != nil { @@ -1626,9 +1617,9 @@ type sclient struct { // Static after construction. connNum int64 // process-wide unique counter, incremented each Accept s *Server - nc Conn + nc derp.Conn key key.NodePublic - info clientInfo + info derp.ClientInfo logf logger.Logf done <-chan struct{} // closed when connection closes remoteIPPort netip.AddrPort // zero if remoteAddr is not ip:port. @@ -1666,19 +1657,19 @@ type sclient struct { peerGoneLim *rate.Limiter } -func (c *sclient) presentFlags() PeerPresentFlags { - var f PeerPresentFlags +func (c *sclient) presentFlags() derp.PeerPresentFlags { + var f derp.PeerPresentFlags if c.info.IsProber { - f |= PeerPresentIsProber + f |= derp.PeerPresentIsProber } if c.canMesh { - f |= PeerPresentIsMeshPeer + f |= derp.PeerPresentIsMeshPeer } if c.isNotIdealConn { - f |= PeerPresentNotIdeal + f |= derp.PeerPresentNotIdeal } if f == 0 { - return PeerPresentIsRegular + return derp.PeerPresentIsRegular } return f } @@ -1688,7 +1679,7 @@ func (c *sclient) presentFlags() PeerPresentFlags { type peerConnState struct { ipPort netip.AddrPort // if present, the peer's IP:port peer key.NodePublic - flags PeerPresentFlags + flags derp.PeerPresentFlags present bool } @@ -1709,7 +1700,7 @@ type pkt struct { // peerGoneMsg is a request to write a peerGone frame to an sclient type peerGoneMsg struct { peer key.NodePublic - reason PeerGoneReasonType + reason derp.PeerGoneReasonType } func (c *sclient) setPreferred(v bool) { @@ -1788,7 +1779,7 @@ func (c *sclient) sendLoop(ctx context.Context) error { defer c.onSendLoopDone() jitter := rand.N(5 * time.Second) - keepAliveTick, keepAliveTickChannel := c.s.clock.NewTicker(KeepAlive + jitter) + keepAliveTick, keepAliveTickChannel := c.s.clock.NewTicker(derp.KeepAlive + jitter) defer keepAliveTick.Stop() var werr error // last write error @@ -1887,14 +1878,14 @@ func (c *sclient) setWriteDeadline() { // sendKeepAlive sends a keep-alive frame, without flushing. func (c *sclient) sendKeepAlive() error { c.setWriteDeadline() - return writeFrameHeader(c.bw.bw(), frameKeepAlive, 0) + return derp.WriteFrameHeader(c.bw.bw(), derp.FrameKeepAlive, 0) } // sendPong sends a pong reply, without flushing. func (c *sclient) sendPong(data [8]byte) error { c.s.sentPong.Add(1) c.setWriteDeadline() - if err := writeFrameHeader(c.bw.bw(), framePong, uint32(len(data))); err != nil { + if err := derp.WriteFrameHeader(c.bw.bw(), derp.FramePong, uint32(len(data))); err != nil { return err } _, err := c.bw.Write(data[:]) @@ -1902,23 +1893,23 @@ func (c *sclient) sendPong(data [8]byte) error { } const ( - peerGoneFrameLen = keyLen + 1 - peerPresentFrameLen = keyLen + 16 + 2 + 1 // 16 byte IP + 2 byte port + 1 byte flags + peerGoneFrameLen = derp.KeyLen + 1 + peerPresentFrameLen = derp.KeyLen + 16 + 2 + 1 // 16 byte IP + 2 byte port + 1 byte flags ) // sendPeerGone sends a peerGone frame, without flushing. -func (c *sclient) sendPeerGone(peer key.NodePublic, reason PeerGoneReasonType) error { +func (c *sclient) sendPeerGone(peer key.NodePublic, reason derp.PeerGoneReasonType) error { switch reason { - case PeerGoneReasonDisconnected: + case derp.PeerGoneReasonDisconnected: c.s.peerGoneDisconnectedFrames.Add(1) - case PeerGoneReasonNotHere: + case derp.PeerGoneReasonNotHere: c.s.peerGoneNotHereFrames.Add(1) } c.setWriteDeadline() data := make([]byte, 0, peerGoneFrameLen) data = peer.AppendTo(data) data = append(data, byte(reason)) - if err := writeFrameHeader(c.bw.bw(), framePeerGone, uint32(len(data))); err != nil { + if err := derp.WriteFrameHeader(c.bw.bw(), derp.FramePeerGone, uint32(len(data))); err != nil { return err } @@ -1927,17 +1918,17 @@ func (c *sclient) sendPeerGone(peer key.NodePublic, reason PeerGoneReasonType) e } // sendPeerPresent sends a peerPresent frame, without flushing. -func (c *sclient) sendPeerPresent(peer key.NodePublic, ipPort netip.AddrPort, flags PeerPresentFlags) error { +func (c *sclient) sendPeerPresent(peer key.NodePublic, ipPort netip.AddrPort, flags derp.PeerPresentFlags) error { c.setWriteDeadline() - if err := writeFrameHeader(c.bw.bw(), framePeerPresent, peerPresentFrameLen); err != nil { + if err := derp.WriteFrameHeader(c.bw.bw(), derp.FramePeerPresent, peerPresentFrameLen); err != nil { return err } payload := make([]byte, peerPresentFrameLen) _ = peer.AppendTo(payload[:0]) a16 := ipPort.Addr().As16() - copy(payload[keyLen:], a16[:]) - binary.BigEndian.PutUint16(payload[keyLen+16:], ipPort.Port()) - payload[keyLen+18] = byte(flags) + copy(payload[derp.KeyLen:], a16[:]) + binary.BigEndian.PutUint16(payload[derp.KeyLen+16:], ipPort.Port()) + payload[derp.KeyLen+18] = byte(flags) _, err := c.bw.Write(payload) return err } @@ -1975,7 +1966,7 @@ func (c *sclient) sendMeshUpdates() error { if pcs.present { err = c.sendPeerPresent(pcs.peer, pcs.ipPort, pcs.flags) } else { - err = c.sendPeerGone(pcs.peer, PeerGoneReasonDisconnected) + err = c.sendPeerGone(pcs.peer, derp.PeerGoneReasonDisconnected) } if err != nil { return err @@ -2010,7 +2001,7 @@ func (c *sclient) sendPacket(srcKey key.NodePublic, contents []byte) (err error) pktLen += key.NodePublicRawLen c.noteSendFromSrc(srcKey) } - if err = writeFrameHeader(c.bw.bw(), frameRecvPacket, uint32(pktLen)); err != nil { + if err = derp.WriteFrameHeader(c.bw.bw(), derp.FrameRecvPacket, uint32(pktLen)); err != nil { return err } if withKey { @@ -2286,7 +2277,7 @@ func (s *Server) checkVerifyClientsLocalTailscaled() error { if err != nil { return fmt.Errorf("localClient.Status: %w", err) } - info := &clientInfo{ + info := &derp.ClientInfo{ IsProber: true, } clientIP := netip.IPv6Loopback() diff --git a/derp/derp_server_default.go b/derp/derpserver/derpserver_default.go similarity index 91% rename from derp/derp_server_default.go rename to derp/derpserver/derpserver_default.go index 014cfffd642c2..874e590d3c812 100644 --- a/derp/derp_server_default.go +++ b/derp/derpserver/derpserver_default.go @@ -3,7 +3,7 @@ //go:build !linux || android -package derp +package derpserver import "context" diff --git a/derp/derp_server_linux.go b/derp/derpserver/derpserver_linux.go similarity index 99% rename from derp/derp_server_linux.go rename to derp/derpserver/derpserver_linux.go index 5a40e114eecd2..768e6a2ab6ab7 100644 --- a/derp/derp_server_linux.go +++ b/derp/derpserver/derpserver_linux.go @@ -3,7 +3,7 @@ //go:build linux && !android -package derp +package derpserver import ( "context" diff --git a/derp/derpserver/derpserver_test.go b/derp/derpserver/derpserver_test.go new file mode 100644 index 0000000000000..2db5f25bc00b7 --- /dev/null +++ b/derp/derpserver/derpserver_test.go @@ -0,0 +1,782 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package derpserver + +import ( + "bufio" + "cmp" + "context" + "crypto/x509" + "encoding/asn1" + "expvar" + "fmt" + "log" + "net" + "os" + "reflect" + "strconv" + "sync" + "testing" + "time" + + qt "github.com/frankban/quicktest" + "go4.org/mem" + "golang.org/x/time/rate" + "tailscale.com/derp" + "tailscale.com/derp/derpconst" + "tailscale.com/types/key" + "tailscale.com/types/logger" +) + +const testMeshKey = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" + +func TestSetMeshKey(t *testing.T) { + for name, tt := range map[string]struct { + key string + want key.DERPMesh + wantErr bool + }{ + "clobber": { + key: testMeshKey, + wantErr: false, + }, + "invalid": { + key: "badf00d", + wantErr: true, + }, + } { + t.Run(name, func(t *testing.T) { + s := &Server{} + + err := s.SetMeshKey(tt.key) + if tt.wantErr { + if err == nil { + t.Fatalf("expected err") + } + return + } + if err != nil { + t.Fatalf("unexpected err: %v", err) + } + + want, err := key.ParseDERPMesh(tt.key) + if err != nil { + t.Fatal(err) + } + if !s.meshKey.Equal(want) { + t.Fatalf("got %v, want %v", s.meshKey, want) + } + }) + } +} + +func TestIsMeshPeer(t *testing.T) { + s := &Server{} + err := s.SetMeshKey(testMeshKey) + if err != nil { + t.Fatal(err) + } + for name, tt := range map[string]struct { + want bool + meshKey string + wantAllocs float64 + }{ + "nil": { + want: false, + wantAllocs: 0, + }, + "mismatch": { + meshKey: "6d529e9d4ef632d22d4a4214cb49da8f1ba1b72697061fb24e312984c35ec8d8", + want: false, + wantAllocs: 1, + }, + "match": { + meshKey: testMeshKey, + want: true, + wantAllocs: 0, + }, + } { + t.Run(name, func(t *testing.T) { + var got bool + var mKey key.DERPMesh + if tt.meshKey != "" { + mKey, err = key.ParseDERPMesh(tt.meshKey) + if err != nil { + t.Fatalf("ParseDERPMesh(%q) failed: %v", tt.meshKey, err) + } + } + + info := derp.ClientInfo{ + MeshKey: mKey, + } + allocs := testing.AllocsPerRun(1, func() { + got = s.isMeshPeer(&info) + }) + if got != tt.want { + t.Fatalf("got %t, want %t: info = %#v", got, tt.want, info) + } + + if allocs != tt.wantAllocs && tt.want { + t.Errorf("%f allocations, want %f", allocs, tt.wantAllocs) + } + }) + } +} + +type testFwd int + +func (testFwd) ForwardPacket(key.NodePublic, key.NodePublic, []byte) error { + panic("not called in tests") +} +func (testFwd) String() string { + panic("not called in tests") +} + +func pubAll(b byte) (ret key.NodePublic) { + var bs [32]byte + for i := range bs { + bs[i] = b + } + return key.NodePublicFromRaw32(mem.B(bs[:])) +} + +func TestForwarderRegistration(t *testing.T) { + s := &Server{ + clients: make(map[key.NodePublic]*clientSet), + clientsMesh: map[key.NodePublic]PacketForwarder{}, + } + want := func(want map[key.NodePublic]PacketForwarder) { + t.Helper() + if got := s.clientsMesh; !reflect.DeepEqual(got, want) { + t.Fatalf("mismatch\n got: %v\nwant: %v\n", got, want) + } + } + wantCounter := func(c *expvar.Int, want int) { + t.Helper() + if got := c.Value(); got != int64(want) { + t.Errorf("counter = %v; want %v", got, want) + } + } + singleClient := func(c *sclient) *clientSet { + cs := &clientSet{} + cs.activeClient.Store(c) + return cs + } + + u1 := pubAll(1) + u2 := pubAll(2) + u3 := pubAll(3) + + s.AddPacketForwarder(u1, testFwd(1)) + s.AddPacketForwarder(u2, testFwd(2)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(1), + u2: testFwd(2), + }) + + // Verify a remove of non-registered forwarder is no-op. + s.RemovePacketForwarder(u2, testFwd(999)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(1), + u2: testFwd(2), + }) + + // Verify a remove of non-registered user is no-op. + s.RemovePacketForwarder(u3, testFwd(1)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(1), + u2: testFwd(2), + }) + + // Actual removal. + s.RemovePacketForwarder(u2, testFwd(2)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(1), + }) + + // Adding a dup for a user. + wantCounter(&s.multiForwarderCreated, 0) + s.AddPacketForwarder(u1, testFwd(100)) + s.AddPacketForwarder(u1, testFwd(100)) // dup to trigger dup path + want(map[key.NodePublic]PacketForwarder{ + u1: newMultiForwarder(testFwd(1), testFwd(100)), + }) + wantCounter(&s.multiForwarderCreated, 1) + + // Removing a forwarder in a multi set that doesn't exist; does nothing. + s.RemovePacketForwarder(u1, testFwd(55)) + want(map[key.NodePublic]PacketForwarder{ + u1: newMultiForwarder(testFwd(1), testFwd(100)), + }) + + // Removing a forwarder in a multi set that does exist should collapse it away + // from being a multiForwarder. + wantCounter(&s.multiForwarderDeleted, 0) + s.RemovePacketForwarder(u1, testFwd(1)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(100), + }) + wantCounter(&s.multiForwarderDeleted, 1) + + // Removing an entry for a client that's still connected locally should result + // in a nil forwarder. + u1c := &sclient{ + key: u1, + logf: logger.Discard, + } + s.clients[u1] = singleClient(u1c) + s.RemovePacketForwarder(u1, testFwd(100)) + want(map[key.NodePublic]PacketForwarder{ + u1: nil, + }) + + // But once that client disconnects, it should go away. + s.unregisterClient(u1c) + want(map[key.NodePublic]PacketForwarder{}) + + // But if it already has a forwarder, it's not removed. + s.AddPacketForwarder(u1, testFwd(2)) + s.unregisterClient(u1c) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(2), + }) + + // Now pretend u1 was already connected locally (so clientsMesh[u1] is nil), and then we heard + // that they're also connected to a peer of ours. That shouldn't transition the forwarder + // from nil to the new one, not a multiForwarder. + s.clients[u1] = singleClient(u1c) + s.clientsMesh[u1] = nil + want(map[key.NodePublic]PacketForwarder{ + u1: nil, + }) + s.AddPacketForwarder(u1, testFwd(3)) + want(map[key.NodePublic]PacketForwarder{ + u1: testFwd(3), + }) +} + +type channelFwd struct { + // id is to ensure that different instances that reference the + // same channel are not equal, as they are used as keys in the + // multiForwarder map. + id int + c chan []byte +} + +func (f channelFwd) String() string { return "" } +func (f channelFwd) ForwardPacket(_ key.NodePublic, _ key.NodePublic, packet []byte) error { + f.c <- packet + return nil +} + +func TestMultiForwarder(t *testing.T) { + received := 0 + var wg sync.WaitGroup + ch := make(chan []byte) + ctx, cancel := context.WithCancel(context.Background()) + + s := &Server{ + clients: make(map[key.NodePublic]*clientSet), + clientsMesh: map[key.NodePublic]PacketForwarder{}, + } + u := pubAll(1) + s.AddPacketForwarder(u, channelFwd{1, ch}) + + wg.Add(2) + go func() { + defer wg.Done() + for { + select { + case <-ch: + received += 1 + case <-ctx.Done(): + return + } + } + }() + go func() { + defer wg.Done() + for { + s.AddPacketForwarder(u, channelFwd{2, ch}) + s.AddPacketForwarder(u, channelFwd{3, ch}) + s.RemovePacketForwarder(u, channelFwd{2, ch}) + s.RemovePacketForwarder(u, channelFwd{1, ch}) + s.AddPacketForwarder(u, channelFwd{1, ch}) + s.RemovePacketForwarder(u, channelFwd{3, ch}) + if ctx.Err() != nil { + return + } + } + }() + + // Number of messages is chosen arbitrarily, just for this loop to + // run long enough concurrently with {Add,Remove}PacketForwarder loop above. + numMsgs := 5000 + var fwd PacketForwarder + for i := range numMsgs { + s.mu.Lock() + fwd = s.clientsMesh[u] + s.mu.Unlock() + fwd.ForwardPacket(u, u, []byte(strconv.Itoa(i))) + } + + cancel() + wg.Wait() + if received != numMsgs { + t.Errorf("expected %d messages to be forwarded; got %d", numMsgs, received) + } +} +func TestMetaCert(t *testing.T) { + priv := key.NewNode() + pub := priv.Public() + s := New(priv, t.Logf) + + certBytes := s.MetaCert() + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + log.Fatal(err) + } + if fmt.Sprint(cert.SerialNumber) != fmt.Sprint(derp.ProtocolVersion) { + t.Errorf("serial = %v; want %v", cert.SerialNumber, derp.ProtocolVersion) + } + if g, w := cert.Subject.CommonName, derpconst.MetaCertCommonNamePrefix+pub.UntypedHexString(); g != w { + t.Errorf("CommonName = %q; want %q", g, w) + } + if n := len(cert.Extensions); n != 1 { + t.Fatalf("got %d extensions; want 1", n) + } + + // oidExtensionBasicConstraints is the Basic Constraints ID copied + // from the x509 package. + oidExtensionBasicConstraints := asn1.ObjectIdentifier{2, 5, 29, 19} + + if id := cert.Extensions[0].Id; !id.Equal(oidExtensionBasicConstraints) { + t.Errorf("extension ID = %v; want %v", id, oidExtensionBasicConstraints) + } +} + +func TestServerDupClients(t *testing.T) { + serverPriv := key.NewNode() + var s *Server + + clientPriv := key.NewNode() + clientPub := clientPriv.Public() + + var c1, c2, c3 *sclient + var clientName map[*sclient]string + + // run starts a new test case and resets clients back to their zero values. + run := func(name string, dupPolicy dupPolicy, f func(t *testing.T)) { + s = New(serverPriv, t.Logf) + s.dupPolicy = dupPolicy + c1 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c1: ")} + c2 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c2: ")} + c3 = &sclient{key: clientPub, logf: logger.WithPrefix(t.Logf, "c3: ")} + clientName = map[*sclient]string{ + c1: "c1", + c2: "c2", + c3: "c3", + } + t.Run(name, f) + } + runBothWays := func(name string, f func(t *testing.T)) { + run(name+"_disablefighters", disableFighters, f) + run(name+"_lastwriteractive", lastWriterIsActive, f) + } + wantSingleClient := func(t *testing.T, want *sclient) { + t.Helper() + got, ok := s.clients[want.key] + if !ok { + t.Error("no clients for key") + return + } + if got.dup != nil { + t.Errorf("unexpected dup set for single client") + } + cur := got.activeClient.Load() + if cur != want { + t.Errorf("active client = %q; want %q", clientName[cur], clientName[want]) + } + if cur != nil { + if cur.isDup.Load() { + t.Errorf("unexpected isDup on singleClient") + } + if cur.isDisabled.Load() { + t.Errorf("unexpected isDisabled on singleClient") + } + } + } + wantNoClient := func(t *testing.T) { + t.Helper() + _, ok := s.clients[clientPub] + if !ok { + // Good + return + } + t.Errorf("got client; want empty") + } + wantDupSet := func(t *testing.T) *dupClientSet { + t.Helper() + cs, ok := s.clients[clientPub] + if !ok { + t.Fatal("no set for key; want dup set") + return nil + } + if cs.dup != nil { + return cs.dup + } + t.Fatalf("no dup set for key; want dup set") + return nil + } + wantActive := func(t *testing.T, want *sclient) { + t.Helper() + set, ok := s.clients[clientPub] + if !ok { + t.Error("no set for key") + return + } + got := set.activeClient.Load() + if got != want { + t.Errorf("active client = %q; want %q", clientName[got], clientName[want]) + } + } + checkDup := func(t *testing.T, c *sclient, want bool) { + t.Helper() + if got := c.isDup.Load(); got != want { + t.Errorf("client %q isDup = %v; want %v", clientName[c], got, want) + } + } + checkDisabled := func(t *testing.T, c *sclient, want bool) { + t.Helper() + if got := c.isDisabled.Load(); got != want { + t.Errorf("client %q isDisabled = %v; want %v", clientName[c], got, want) + } + } + wantDupConns := func(t *testing.T, want int) { + t.Helper() + if got := s.dupClientConns.Value(); got != int64(want) { + t.Errorf("dupClientConns = %v; want %v", got, want) + } + } + wantDupKeys := func(t *testing.T, want int) { + t.Helper() + if got := s.dupClientKeys.Value(); got != int64(want) { + t.Errorf("dupClientKeys = %v; want %v", got, want) + } + } + + // Common case: a single client comes and goes, with no dups. + runBothWays("one_comes_and_goes", func(t *testing.T) { + wantNoClient(t) + s.registerClient(c1) + wantSingleClient(t, c1) + s.unregisterClient(c1) + wantNoClient(t) + }) + + // A still somewhat common case: a single client was + // connected and then their wifi dies or laptop closes + // or they switch networks and connect from a + // different network. They have two connections but + // it's not very bad. Only their new one is + // active. The last one, being dead, doesn't send and + // thus the new one doesn't get disabled. + runBothWays("small_overlap_replacement", func(t *testing.T) { + wantNoClient(t) + s.registerClient(c1) + wantSingleClient(t, c1) + wantActive(t, c1) + wantDupKeys(t, 0) + wantDupKeys(t, 0) + + s.registerClient(c2) // wifi dies; c2 replacement connects + wantDupSet(t) + wantDupConns(t, 2) + wantDupKeys(t, 1) + checkDup(t, c1, true) + checkDup(t, c2, true) + checkDisabled(t, c1, false) + checkDisabled(t, c2, false) + wantActive(t, c2) // sends go to the replacement + + s.unregisterClient(c1) // c1 finally times out + wantSingleClient(t, c2) + checkDup(t, c2, false) // c2 is longer a dup + wantActive(t, c2) + wantDupConns(t, 0) + wantDupKeys(t, 0) + }) + + // Key cloning situation with concurrent clients, both trying + // to write. + run("concurrent_dups_get_disabled", disableFighters, func(t *testing.T) { + wantNoClient(t) + s.registerClient(c1) + wantSingleClient(t, c1) + wantActive(t, c1) + s.registerClient(c2) + wantDupSet(t) + wantDupKeys(t, 1) + wantDupConns(t, 2) + wantActive(t, c2) + checkDup(t, c1, true) + checkDup(t, c2, true) + checkDisabled(t, c1, false) + checkDisabled(t, c2, false) + + s.noteClientActivity(c2) + checkDisabled(t, c1, false) + checkDisabled(t, c2, false) + s.noteClientActivity(c1) + checkDisabled(t, c1, true) + checkDisabled(t, c2, true) + wantActive(t, nil) + + s.registerClient(c3) + wantActive(t, c3) + checkDisabled(t, c3, false) + wantDupKeys(t, 1) + wantDupConns(t, 3) + + s.unregisterClient(c3) + wantActive(t, nil) + wantDupKeys(t, 1) + wantDupConns(t, 2) + + s.unregisterClient(c2) + wantSingleClient(t, c1) + wantDupKeys(t, 0) + wantDupConns(t, 0) + }) + + // Key cloning with an A->B->C->A series instead. + run("concurrent_dups_three_parties", disableFighters, func(t *testing.T) { + wantNoClient(t) + s.registerClient(c1) + s.registerClient(c2) + s.registerClient(c3) + s.noteClientActivity(c1) + checkDisabled(t, c1, true) + checkDisabled(t, c2, true) + checkDisabled(t, c3, true) + wantActive(t, nil) + }) + + run("activity_promotes_primary_when_nil", disableFighters, func(t *testing.T) { + wantNoClient(t) + + // Last registered client is the active one... + s.registerClient(c1) + wantActive(t, c1) + s.registerClient(c2) + wantActive(t, c2) + s.registerClient(c3) + s.noteClientActivity(c2) + wantActive(t, c3) + + // But if the last one goes away, the one with the + // most recent activity wins. + s.unregisterClient(c3) + wantActive(t, c2) + }) + + run("concurrent_dups_three_parties_last_writer", lastWriterIsActive, func(t *testing.T) { + wantNoClient(t) + + s.registerClient(c1) + wantActive(t, c1) + s.registerClient(c2) + wantActive(t, c2) + + s.noteClientActivity(c1) + checkDisabled(t, c1, false) + checkDisabled(t, c2, false) + wantActive(t, c1) + + s.noteClientActivity(c2) + checkDisabled(t, c1, false) + checkDisabled(t, c2, false) + wantActive(t, c2) + + s.unregisterClient(c2) + checkDisabled(t, c1, false) + wantActive(t, c1) + }) +} + +func TestLimiter(t *testing.T) { + rl := rate.NewLimiter(rate.Every(time.Minute), 100) + for i := range 200 { + r := rl.Reserve() + d := r.Delay() + t.Logf("i=%d, allow=%v, d=%v", i, r.OK(), d) + } +} + +// BenchmarkConcurrentStreams exercises mutex contention on a +// single Server instance with multiple concurrent client flows. +func BenchmarkConcurrentStreams(b *testing.B) { + serverPrivateKey := key.NewNode() + s := New(serverPrivateKey, logger.Discard) + defer s.Close() + + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + b.Fatal(err) + } + defer ln.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + for ctx.Err() == nil { + connIn, err := ln.Accept() + if err != nil { + if ctx.Err() != nil { + return + } + b.Error(err) + return + } + + brwServer := bufio.NewReadWriter(bufio.NewReader(connIn), bufio.NewWriter(connIn)) + go s.Accept(ctx, connIn, brwServer, "test-client") + } + }() + + newClient := func(t testing.TB) *derp.Client { + t.Helper() + connOut, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + b.Fatal(err) + } + t.Cleanup(func() { connOut.Close() }) + + k := key.NewNode() + + brw := bufio.NewReadWriter(bufio.NewReader(connOut), bufio.NewWriter(connOut)) + client, err := derp.NewClient(k, connOut, brw, logger.Discard) + if err != nil { + b.Fatalf("client: %v", err) + } + return client + } + + b.RunParallel(func(pb *testing.PB) { + c1, c2 := newClient(b), newClient(b) + const packetSize = 100 + msg := make([]byte, packetSize) + for pb.Next() { + if err := c1.Send(c2.PublicKey(), msg); err != nil { + b.Fatal(err) + } + _, err := c2.Recv() + if err != nil { + return + } + } + }) +} + +func BenchmarkSendRecv(b *testing.B) { + for _, size := range []int{10, 100, 1000, 10000} { + b.Run(fmt.Sprintf("msgsize=%d", size), func(b *testing.B) { benchmarkSendRecvSize(b, size) }) + } +} + +func benchmarkSendRecvSize(b *testing.B, packetSize int) { + serverPrivateKey := key.NewNode() + s := New(serverPrivateKey, logger.Discard) + defer s.Close() + + k := key.NewNode() + clientKey := k.Public() + + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + b.Fatal(err) + } + defer ln.Close() + + connOut, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + b.Fatal(err) + } + defer connOut.Close() + + connIn, err := ln.Accept() + if err != nil { + b.Fatal(err) + } + defer connIn.Close() + + brwServer := bufio.NewReadWriter(bufio.NewReader(connIn), bufio.NewWriter(connIn)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go s.Accept(ctx, connIn, brwServer, "test-client") + + brw := bufio.NewReadWriter(bufio.NewReader(connOut), bufio.NewWriter(connOut)) + client, err := derp.NewClient(k, connOut, brw, logger.Discard) + if err != nil { + b.Fatalf("client: %v", err) + } + + go func() { + for { + _, err := client.Recv() + if err != nil { + return + } + } + }() + + msg := make([]byte, packetSize) + b.SetBytes(int64(len(msg))) + b.ReportAllocs() + b.ResetTimer() + for range b.N { + if err := client.Send(clientKey, msg); err != nil { + b.Fatal(err) + } + } +} + +func TestParseSSOutput(t *testing.T) { + contents, err := os.ReadFile("testdata/example_ss.txt") + if err != nil { + t.Errorf("os.ReadFile(example_ss.txt) failed: %v", err) + } + seen := parseSSOutput(string(contents)) + if len(seen) == 0 { + t.Errorf("parseSSOutput expected non-empty map") + } +} + +func TestGetPerClientSendQueueDepth(t *testing.T) { + c := qt.New(t) + envKey := "TS_DEBUG_DERP_PER_CLIENT_SEND_QUEUE_DEPTH" + + testCases := []struct { + envVal string + want int + }{ + // Empty case, envknob treats empty as missing also. + { + "", defaultPerClientSendQueueDepth, + }, + { + "64", 64, + }, + } + + for _, tc := range testCases { + t.Run(cmp.Or(tc.envVal, "empty"), func(t *testing.T) { + t.Setenv(envKey, tc.envVal) + val := getPerClientSendQueueDepth() + c.Assert(val, qt.Equals, tc.want) + }) + } +} diff --git a/derp/derphttp/derphttp_server.go b/derp/derpserver/handler.go similarity index 86% rename from derp/derphttp/derphttp_server.go rename to derp/derpserver/handler.go index 50aba774a9f1c..7cd6aa2fd5b95 100644 --- a/derp/derphttp/derphttp_server.go +++ b/derp/derpserver/handler.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package derphttp +package derpserver import ( "fmt" @@ -12,14 +12,8 @@ import ( "tailscale.com/derp" ) -// fastStartHeader is the header (with value "1") that signals to the HTTP -// server that the DERP HTTP client does not want the HTTP 101 response -// headers and it will begin writing & reading the DERP protocol immediately -// following its HTTP request. -const fastStartHeader = "Derp-Fast-Start" - // Handler returns an http.Handler to be mounted at /derp, serving s. -func Handler(s *derp.Server) http.Handler { +func Handler(s *Server) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() @@ -42,7 +36,7 @@ func Handler(s *derp.Server) http.Handler { return } - fastStart := r.Header.Get(fastStartHeader) == "1" + fastStart := r.Header.Get(derp.FastStartHeader) == "1" h, ok := w.(http.Hijacker) if !ok { @@ -69,7 +63,7 @@ func Handler(s *derp.Server) http.Handler { } if v := r.Header.Get(derp.IdealNodeHeader); v != "" { - ctx = derp.IdealNodeContextKey.WithValue(ctx, v) + ctx = IdealNodeContextKey.WithValue(ctx, v) } s.Accept(ctx, netConn, conn, netConn.RemoteAddr().String()) diff --git a/derp/testdata/example_ss.txt b/derp/derpserver/testdata/example_ss.txt similarity index 100% rename from derp/testdata/example_ss.txt rename to derp/derpserver/testdata/example_ss.txt diff --git a/derp/export_test.go b/derp/export_test.go new file mode 100644 index 0000000000000..677a4932d2657 --- /dev/null +++ b/derp/export_test.go @@ -0,0 +1,10 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package derp + +import "time" + +func (c *Client) RecvTimeoutForTest(timeout time.Duration) (m ReceivedMessage, err error) { + return c.recvTimeout(timeout) +} diff --git a/derp/xdp/xdp_linux.go b/derp/xdp/xdp_linux.go index 3ebe0a0520efc..309d9ee9a92b4 100644 --- a/derp/xdp/xdp_linux.go +++ b/derp/xdp/xdp_linux.go @@ -14,7 +14,6 @@ import ( "github.com/cilium/ebpf" "github.com/cilium/ebpf/link" "github.com/prometheus/client_golang/prometheus" - "tailscale.com/util/multierr" ) //go:generate go run github.com/cilium/ebpf/cmd/bpf2go -type config -type counters_key -type counter_key_af -type counter_key_packets_bytes_action -type counter_key_prog_end bpf xdp.c -- -I headers @@ -110,7 +109,7 @@ func (s *STUNServer) Close() error { errs = append(errs, s.link.Close()) } errs = append(errs, s.objs.Close()) - return multierr.New(errs...) + return errors.Join(errs...) } type stunServerMetrics struct { diff --git a/docs/windows/policy/en-US/tailscale.adml b/docs/windows/policy/en-US/tailscale.adml index 2e143d49c9c6c..a0be5e8314a2b 100644 --- a/docs/windows/policy/en-US/tailscale.adml +++ b/docs/windows/policy/en-US/tailscale.adml @@ -20,6 +20,7 @@ Tailscale version 1.82.0 and later Tailscale version 1.84.0 and later Tailscale version 1.86.0 and later + Tailscale version 1.90.0 and later Tailscale UI customization Settings @@ -60,7 +61,7 @@ Managing authentication keys via Group Policy and MDM solutions poses significan While MDM solutions tend to offer better control over who can access the policy setting values, they can still be compromised. Additionally, with both Group Policy and MDM solutions, the auth key is always readable by all users who have access to the device where this policy setting applies, as well as by all applications running on the device. A compromised auth key can potentially be used by a malicious actor to gain or elevate access to the target network. -Only consider this option after carefully reviewing the organization's security posture. For example, ensure you configure the auth keys specifically for the tag of the device and that access control policies only grant necessary access between the tailnet and the tagged device. Additionally, consider using short-lived auth keys, one-time auth keys (with one GPO/MDM configuration per device), Device Approval, and/or Tailnet lock to minimize risk. If you suspect an auth key has been compromised, revoke the auth key immediately. +Only consider this option after carefully reviewing the organization's security posture. For example, ensure you configure the auth keys specifically for the tag of the device and that access control policies only grant necessary access between the tailnet and the tagged device. Additionally, consider using short-lived auth keys, one-time auth keys (with one GPO/MDM configuration per device), Device Approval, and/or Tailnet Lock to minimize risk. If you suspect an auth key has been compromised, revoke the auth key immediately. If you enable this policy setting and specify an auth key, it will be used to authenticate the device unless the device is already logged in or an auth key is explicitly specified via the CLI. @@ -121,6 +122,14 @@ If you enable this policy setting, you can specify how long Tailscale will wait If you disable or don't configure this policy setting, Tailscale will only reconnect if a user chooses to or if required by a different policy setting. Refer to https://pkg.go.dev/time#ParseDuration for information about the supported duration strings.]]> + Allow users to restart tailscaled + Allow Local Network Access when an Exit Node is in use + + + @@ -187,6 +191,16 @@ + + + + + + + + + + diff --git a/drive/remote.go b/drive/remote.go index 9aeead710ad01..2c6fba894dbff 100644 --- a/drive/remote.go +++ b/drive/remote.go @@ -9,7 +9,6 @@ import ( "bytes" "errors" "net/http" - "regexp" "strings" ) @@ -21,10 +20,6 @@ var ( ErrInvalidShareName = errors.New("Share names may only contain the letters a-z, underscore _, parentheses (), or spaces") ) -var ( - shareNameRegex = regexp.MustCompile(`^[a-z0-9_\(\) ]+$`) -) - // AllowShareAs reports whether sharing files as a specific user is allowed. func AllowShareAs() bool { return !DisallowShareAs && doAllowShareAs() @@ -125,9 +120,26 @@ func NormalizeShareName(name string) (string, error) { // Trim whitespace name = strings.TrimSpace(name) - if !shareNameRegex.MatchString(name) { + if !validShareName(name) { return "", ErrInvalidShareName } return name, nil } + +func validShareName(name string) bool { + if name == "" { + return false + } + for _, r := range name { + if 'a' <= r && r <= 'z' || '0' <= r && r <= '9' { + continue + } + switch r { + case '_', ' ', '(', ')': + continue + } + return false + } + return true +} diff --git a/envknob/envknob.go b/envknob/envknob.go index e581eb27e11cb..9dea8f74d15df 100644 --- a/envknob/envknob.go +++ b/envknob/envknob.go @@ -32,6 +32,7 @@ import ( "sync/atomic" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/kube/kubetypes" "tailscale.com/types/opt" "tailscale.com/version" @@ -463,7 +464,12 @@ var allowRemoteUpdate = RegisterBool("TS_ALLOW_ADMIN_CONSOLE_REMOTE_UPDATE") // AllowsRemoteUpdate reports whether this node has opted-in to letting the // Tailscale control plane initiate a Tailscale update (e.g. on behalf of an // admin on the admin console). -func AllowsRemoteUpdate() bool { return allowRemoteUpdate() } +func AllowsRemoteUpdate() bool { + if !buildfeatures.HasClientUpdate { + return false + } + return allowRemoteUpdate() +} // SetNoLogsNoSupport enables no-logs-no-support mode. func SetNoLogsNoSupport() { @@ -474,6 +480,9 @@ func SetNoLogsNoSupport() { var notInInit atomic.Bool func assertNotInInit() { + if !buildfeatures.HasDebug { + return + } if notInInit.Load() { return } @@ -533,6 +542,11 @@ func ApplyDiskConfigError() error { return applyDiskConfigErr } // for App Store builds // - /etc/tailscale/tailscaled-env.txt for tailscaled-on-macOS (homebrew, etc) func ApplyDiskConfig() (err error) { + if runtime.GOOS == "linux" && !(buildfeatures.HasDebug || buildfeatures.HasSynology) { + // This function does nothing on Linux, unless you're + // using TS_DEBUG_ENV_FILE or are on Synology. + return nil + } var f *os.File defer func() { if err != nil { @@ -593,7 +607,7 @@ func getPlatformEnvFiles() []string { filepath.Join(os.Getenv("ProgramData"), "Tailscale", "tailscaled-env.txt"), } case "linux": - if distro.Get() == distro.Synology { + if buildfeatures.HasSynology && distro.Get() == distro.Synology { return []string{"/etc/tailscale/tailscaled-env.txt"} } case "darwin": diff --git a/feature/ace/ace.go b/feature/ace/ace.go new file mode 100644 index 0000000000000..b6d36543c5281 --- /dev/null +++ b/feature/ace/ace.go @@ -0,0 +1,25 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package ace registers support for Alternate Connectivity Endpoints (ACE). +package ace + +import ( + "net/netip" + + "tailscale.com/control/controlhttp" + "tailscale.com/net/ace" + "tailscale.com/net/netx" +) + +func init() { + controlhttp.HookMakeACEDialer.Set(mkDialer) +} + +func mkDialer(dialer netx.DialFunc, aceHost string, optIP netip.Addr) netx.DialFunc { + return (&ace.Dialer{ + ACEHost: aceHost, + ACEHostIP: optIP, // may be zero + NetDialer: dialer, + }).Dial +} diff --git a/feature/appconnectors/appconnectors.go b/feature/appconnectors/appconnectors.go new file mode 100644 index 0000000000000..28f5ccde35acb --- /dev/null +++ b/feature/appconnectors/appconnectors.go @@ -0,0 +1,39 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package appconnectors registers support for Tailscale App Connectors. +package appconnectors + +import ( + "encoding/json" + "net/http" + + "tailscale.com/ipn/ipnlocal" + "tailscale.com/tailcfg" +) + +func init() { + ipnlocal.RegisterC2N("GET /appconnector/routes", handleC2NAppConnectorDomainRoutesGet) +} + +// handleC2NAppConnectorDomainRoutesGet handles returning the domains +// that the app connector is responsible for, as well as the resolved +// IP addresses for each domain. If the node is not configured as +// an app connector, an empty map is returned. +func handleC2NAppConnectorDomainRoutesGet(b *ipnlocal.LocalBackend, w http.ResponseWriter, r *http.Request) { + logf := b.Logger() + logf("c2n: GET /appconnector/routes received") + + var res tailcfg.C2NAppConnectorDomainRoutesResponse + appConnector := b.AppConnector() + if appConnector == nil { + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) + return + } + + res.Domains = appConnector.DomainRoutes() + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} diff --git a/feature/buildfeatures/buildfeatures.go b/feature/buildfeatures/buildfeatures.go new file mode 100644 index 0000000000000..cdb31dc015673 --- /dev/null +++ b/feature/buildfeatures/buildfeatures.go @@ -0,0 +1,10 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:generate go run gen.go + +// The buildfeatures package contains boolean constants indicating which +// features were included in the binary (via build tags), for use in dead code +// elimination when using separate build tag protected files is impractical +// or undesirable. +package buildfeatures diff --git a/feature/buildfeatures/feature_ace_disabled.go b/feature/buildfeatures/feature_ace_disabled.go new file mode 100644 index 0000000000000..b4808d4976b02 --- /dev/null +++ b/feature/buildfeatures/feature_ace_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_ace + +package buildfeatures + +// HasACE is whether the binary was built with support for modular feature "Alternate Connectivity Endpoints". +// Specifically, it's whether the binary was NOT built with the "ts_omit_ace" build tag. +// It's a const so it can be used for dead code elimination. +const HasACE = false diff --git a/feature/buildfeatures/feature_ace_enabled.go b/feature/buildfeatures/feature_ace_enabled.go new file mode 100644 index 0000000000000..4812f9a61cd4c --- /dev/null +++ b/feature/buildfeatures/feature_ace_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_ace + +package buildfeatures + +// HasACE is whether the binary was built with support for modular feature "Alternate Connectivity Endpoints". +// Specifically, it's whether the binary was NOT built with the "ts_omit_ace" build tag. +// It's a const so it can be used for dead code elimination. +const HasACE = true diff --git a/feature/buildfeatures/feature_acme_disabled.go b/feature/buildfeatures/feature_acme_disabled.go new file mode 100644 index 0000000000000..0a7f25a821cc5 --- /dev/null +++ b/feature/buildfeatures/feature_acme_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_acme + +package buildfeatures + +// HasACME is whether the binary was built with support for modular feature "ACME TLS certificate management". +// Specifically, it's whether the binary was NOT built with the "ts_omit_acme" build tag. +// It's a const so it can be used for dead code elimination. +const HasACME = false diff --git a/feature/buildfeatures/feature_acme_enabled.go b/feature/buildfeatures/feature_acme_enabled.go new file mode 100644 index 0000000000000..f074bfb4e1a7e --- /dev/null +++ b/feature/buildfeatures/feature_acme_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_acme + +package buildfeatures + +// HasACME is whether the binary was built with support for modular feature "ACME TLS certificate management". +// Specifically, it's whether the binary was NOT built with the "ts_omit_acme" build tag. +// It's a const so it can be used for dead code elimination. +const HasACME = true diff --git a/feature/buildfeatures/feature_advertiseexitnode_disabled.go b/feature/buildfeatures/feature_advertiseexitnode_disabled.go new file mode 100644 index 0000000000000..d4fdcec22db3c --- /dev/null +++ b/feature/buildfeatures/feature_advertiseexitnode_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_advertiseexitnode + +package buildfeatures + +// HasAdvertiseExitNode is whether the binary was built with support for modular feature "Run an exit node". +// Specifically, it's whether the binary was NOT built with the "ts_omit_advertiseexitnode" build tag. +// It's a const so it can be used for dead code elimination. +const HasAdvertiseExitNode = false diff --git a/feature/buildfeatures/feature_advertiseexitnode_enabled.go b/feature/buildfeatures/feature_advertiseexitnode_enabled.go new file mode 100644 index 0000000000000..28246143ecb3c --- /dev/null +++ b/feature/buildfeatures/feature_advertiseexitnode_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_advertiseexitnode + +package buildfeatures + +// HasAdvertiseExitNode is whether the binary was built with support for modular feature "Run an exit node". +// Specifically, it's whether the binary was NOT built with the "ts_omit_advertiseexitnode" build tag. +// It's a const so it can be used for dead code elimination. +const HasAdvertiseExitNode = true diff --git a/feature/buildfeatures/feature_advertiseroutes_disabled.go b/feature/buildfeatures/feature_advertiseroutes_disabled.go new file mode 100644 index 0000000000000..59042720f3870 --- /dev/null +++ b/feature/buildfeatures/feature_advertiseroutes_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_advertiseroutes + +package buildfeatures + +// HasAdvertiseRoutes is whether the binary was built with support for modular feature "Advertise routes for other nodes to use". +// Specifically, it's whether the binary was NOT built with the "ts_omit_advertiseroutes" build tag. +// It's a const so it can be used for dead code elimination. +const HasAdvertiseRoutes = false diff --git a/feature/buildfeatures/feature_advertiseroutes_enabled.go b/feature/buildfeatures/feature_advertiseroutes_enabled.go new file mode 100644 index 0000000000000..118fcd55d64e4 --- /dev/null +++ b/feature/buildfeatures/feature_advertiseroutes_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_advertiseroutes + +package buildfeatures + +// HasAdvertiseRoutes is whether the binary was built with support for modular feature "Advertise routes for other nodes to use". +// Specifically, it's whether the binary was NOT built with the "ts_omit_advertiseroutes" build tag. +// It's a const so it can be used for dead code elimination. +const HasAdvertiseRoutes = true diff --git a/feature/buildfeatures/feature_appconnectors_disabled.go b/feature/buildfeatures/feature_appconnectors_disabled.go new file mode 100644 index 0000000000000..64ea8f86b4104 --- /dev/null +++ b/feature/buildfeatures/feature_appconnectors_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_appconnectors + +package buildfeatures + +// HasAppConnectors is whether the binary was built with support for modular feature "App Connectors support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_appconnectors" build tag. +// It's a const so it can be used for dead code elimination. +const HasAppConnectors = false diff --git a/feature/buildfeatures/feature_appconnectors_enabled.go b/feature/buildfeatures/feature_appconnectors_enabled.go new file mode 100644 index 0000000000000..e00eaffa3e6fc --- /dev/null +++ b/feature/buildfeatures/feature_appconnectors_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_appconnectors + +package buildfeatures + +// HasAppConnectors is whether the binary was built with support for modular feature "App Connectors support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_appconnectors" build tag. +// It's a const so it can be used for dead code elimination. +const HasAppConnectors = true diff --git a/feature/buildfeatures/feature_aws_disabled.go b/feature/buildfeatures/feature_aws_disabled.go new file mode 100644 index 0000000000000..66b670c1fe451 --- /dev/null +++ b/feature/buildfeatures/feature_aws_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_aws + +package buildfeatures + +// HasAWS is whether the binary was built with support for modular feature "AWS integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_aws" build tag. +// It's a const so it can be used for dead code elimination. +const HasAWS = false diff --git a/feature/buildfeatures/feature_aws_enabled.go b/feature/buildfeatures/feature_aws_enabled.go new file mode 100644 index 0000000000000..30203b2aa6df8 --- /dev/null +++ b/feature/buildfeatures/feature_aws_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_aws + +package buildfeatures + +// HasAWS is whether the binary was built with support for modular feature "AWS integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_aws" build tag. +// It's a const so it can be used for dead code elimination. +const HasAWS = true diff --git a/feature/buildfeatures/feature_bakedroots_disabled.go b/feature/buildfeatures/feature_bakedroots_disabled.go new file mode 100644 index 0000000000000..f203bc1b06d44 --- /dev/null +++ b/feature/buildfeatures/feature_bakedroots_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_bakedroots + +package buildfeatures + +// HasBakedRoots is whether the binary was built with support for modular feature "Embed CA (LetsEncrypt) x509 roots to use as fallback". +// Specifically, it's whether the binary was NOT built with the "ts_omit_bakedroots" build tag. +// It's a const so it can be used for dead code elimination. +const HasBakedRoots = false diff --git a/feature/buildfeatures/feature_bakedroots_enabled.go b/feature/buildfeatures/feature_bakedroots_enabled.go new file mode 100644 index 0000000000000..69cf2c34ccf6a --- /dev/null +++ b/feature/buildfeatures/feature_bakedroots_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_bakedroots + +package buildfeatures + +// HasBakedRoots is whether the binary was built with support for modular feature "Embed CA (LetsEncrypt) x509 roots to use as fallback". +// Specifically, it's whether the binary was NOT built with the "ts_omit_bakedroots" build tag. +// It's a const so it can be used for dead code elimination. +const HasBakedRoots = true diff --git a/feature/buildfeatures/feature_bird_disabled.go b/feature/buildfeatures/feature_bird_disabled.go new file mode 100644 index 0000000000000..469aa41f954a9 --- /dev/null +++ b/feature/buildfeatures/feature_bird_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_bird + +package buildfeatures + +// HasBird is whether the binary was built with support for modular feature "Bird BGP integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_bird" build tag. +// It's a const so it can be used for dead code elimination. +const HasBird = false diff --git a/feature/buildfeatures/feature_bird_enabled.go b/feature/buildfeatures/feature_bird_enabled.go new file mode 100644 index 0000000000000..792129f64f567 --- /dev/null +++ b/feature/buildfeatures/feature_bird_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_bird + +package buildfeatures + +// HasBird is whether the binary was built with support for modular feature "Bird BGP integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_bird" build tag. +// It's a const so it can be used for dead code elimination. +const HasBird = true diff --git a/feature/buildfeatures/feature_c2n_disabled.go b/feature/buildfeatures/feature_c2n_disabled.go new file mode 100644 index 0000000000000..bc37e9e7bfd23 --- /dev/null +++ b/feature/buildfeatures/feature_c2n_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_c2n + +package buildfeatures + +// HasC2N is whether the binary was built with support for modular feature "Control-to-node (C2N) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_c2n" build tag. +// It's a const so it can be used for dead code elimination. +const HasC2N = false diff --git a/feature/buildfeatures/feature_c2n_enabled.go b/feature/buildfeatures/feature_c2n_enabled.go new file mode 100644 index 0000000000000..5950e71571652 --- /dev/null +++ b/feature/buildfeatures/feature_c2n_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_c2n + +package buildfeatures + +// HasC2N is whether the binary was built with support for modular feature "Control-to-node (C2N) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_c2n" build tag. +// It's a const so it can be used for dead code elimination. +const HasC2N = true diff --git a/feature/buildfeatures/feature_captiveportal_disabled.go b/feature/buildfeatures/feature_captiveportal_disabled.go new file mode 100644 index 0000000000000..367fef81bdc16 --- /dev/null +++ b/feature/buildfeatures/feature_captiveportal_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_captiveportal + +package buildfeatures + +// HasCaptivePortal is whether the binary was built with support for modular feature "Captive portal detection". +// Specifically, it's whether the binary was NOT built with the "ts_omit_captiveportal" build tag. +// It's a const so it can be used for dead code elimination. +const HasCaptivePortal = false diff --git a/feature/buildfeatures/feature_captiveportal_enabled.go b/feature/buildfeatures/feature_captiveportal_enabled.go new file mode 100644 index 0000000000000..bd8e1f6a80ff1 --- /dev/null +++ b/feature/buildfeatures/feature_captiveportal_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_captiveportal + +package buildfeatures + +// HasCaptivePortal is whether the binary was built with support for modular feature "Captive portal detection". +// Specifically, it's whether the binary was NOT built with the "ts_omit_captiveportal" build tag. +// It's a const so it can be used for dead code elimination. +const HasCaptivePortal = true diff --git a/feature/buildfeatures/feature_capture_disabled.go b/feature/buildfeatures/feature_capture_disabled.go new file mode 100644 index 0000000000000..58535958f26e8 --- /dev/null +++ b/feature/buildfeatures/feature_capture_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_capture + +package buildfeatures + +// HasCapture is whether the binary was built with support for modular feature "Packet capture". +// Specifically, it's whether the binary was NOT built with the "ts_omit_capture" build tag. +// It's a const so it can be used for dead code elimination. +const HasCapture = false diff --git a/feature/buildfeatures/feature_capture_enabled.go b/feature/buildfeatures/feature_capture_enabled.go new file mode 100644 index 0000000000000..7120a3d06fa7d --- /dev/null +++ b/feature/buildfeatures/feature_capture_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_capture + +package buildfeatures + +// HasCapture is whether the binary was built with support for modular feature "Packet capture". +// Specifically, it's whether the binary was NOT built with the "ts_omit_capture" build tag. +// It's a const so it can be used for dead code elimination. +const HasCapture = true diff --git a/feature/buildfeatures/feature_cliconndiag_disabled.go b/feature/buildfeatures/feature_cliconndiag_disabled.go new file mode 100644 index 0000000000000..06d8c7935fd4a --- /dev/null +++ b/feature/buildfeatures/feature_cliconndiag_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_cliconndiag + +package buildfeatures + +// HasCLIConnDiag is whether the binary was built with support for modular feature "CLI connection error diagnostics". +// Specifically, it's whether the binary was NOT built with the "ts_omit_cliconndiag" build tag. +// It's a const so it can be used for dead code elimination. +const HasCLIConnDiag = false diff --git a/feature/buildfeatures/feature_cliconndiag_enabled.go b/feature/buildfeatures/feature_cliconndiag_enabled.go new file mode 100644 index 0000000000000..d6125ef08051c --- /dev/null +++ b/feature/buildfeatures/feature_cliconndiag_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_cliconndiag + +package buildfeatures + +// HasCLIConnDiag is whether the binary was built with support for modular feature "CLI connection error diagnostics". +// Specifically, it's whether the binary was NOT built with the "ts_omit_cliconndiag" build tag. +// It's a const so it can be used for dead code elimination. +const HasCLIConnDiag = true diff --git a/feature/buildfeatures/feature_clientmetrics_disabled.go b/feature/buildfeatures/feature_clientmetrics_disabled.go new file mode 100644 index 0000000000000..721908bb079a2 --- /dev/null +++ b/feature/buildfeatures/feature_clientmetrics_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_clientmetrics + +package buildfeatures + +// HasClientMetrics is whether the binary was built with support for modular feature "Client metrics support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_clientmetrics" build tag. +// It's a const so it can be used for dead code elimination. +const HasClientMetrics = false diff --git a/feature/buildfeatures/feature_clientmetrics_enabled.go b/feature/buildfeatures/feature_clientmetrics_enabled.go new file mode 100644 index 0000000000000..deaeb6e69b1c3 --- /dev/null +++ b/feature/buildfeatures/feature_clientmetrics_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_clientmetrics + +package buildfeatures + +// HasClientMetrics is whether the binary was built with support for modular feature "Client metrics support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_clientmetrics" build tag. +// It's a const so it can be used for dead code elimination. +const HasClientMetrics = true diff --git a/feature/buildfeatures/feature_clientupdate_disabled.go b/feature/buildfeatures/feature_clientupdate_disabled.go new file mode 100644 index 0000000000000..165c9cc9a409d --- /dev/null +++ b/feature/buildfeatures/feature_clientupdate_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_clientupdate + +package buildfeatures + +// HasClientUpdate is whether the binary was built with support for modular feature "Client auto-update support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_clientupdate" build tag. +// It's a const so it can be used for dead code elimination. +const HasClientUpdate = false diff --git a/feature/buildfeatures/feature_clientupdate_enabled.go b/feature/buildfeatures/feature_clientupdate_enabled.go new file mode 100644 index 0000000000000..3c3c7878c53a9 --- /dev/null +++ b/feature/buildfeatures/feature_clientupdate_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_clientupdate + +package buildfeatures + +// HasClientUpdate is whether the binary was built with support for modular feature "Client auto-update support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_clientupdate" build tag. +// It's a const so it can be used for dead code elimination. +const HasClientUpdate = true diff --git a/feature/buildfeatures/feature_cloud_disabled.go b/feature/buildfeatures/feature_cloud_disabled.go new file mode 100644 index 0000000000000..3b877a9c68d40 --- /dev/null +++ b/feature/buildfeatures/feature_cloud_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_cloud + +package buildfeatures + +// HasCloud is whether the binary was built with support for modular feature "detect cloud environment to learn instances IPs and DNS servers". +// Specifically, it's whether the binary was NOT built with the "ts_omit_cloud" build tag. +// It's a const so it can be used for dead code elimination. +const HasCloud = false diff --git a/feature/buildfeatures/feature_cloud_enabled.go b/feature/buildfeatures/feature_cloud_enabled.go new file mode 100644 index 0000000000000..8fd748de56c7e --- /dev/null +++ b/feature/buildfeatures/feature_cloud_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_cloud + +package buildfeatures + +// HasCloud is whether the binary was built with support for modular feature "detect cloud environment to learn instances IPs and DNS servers". +// Specifically, it's whether the binary was NOT built with the "ts_omit_cloud" build tag. +// It's a const so it can be used for dead code elimination. +const HasCloud = true diff --git a/feature/buildfeatures/feature_completion_disabled.go b/feature/buildfeatures/feature_completion_disabled.go new file mode 100644 index 0000000000000..ea319beb0af3e --- /dev/null +++ b/feature/buildfeatures/feature_completion_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_completion + +package buildfeatures + +// HasCompletion is whether the binary was built with support for modular feature "CLI shell completion". +// Specifically, it's whether the binary was NOT built with the "ts_omit_completion" build tag. +// It's a const so it can be used for dead code elimination. +const HasCompletion = false diff --git a/feature/buildfeatures/feature_completion_enabled.go b/feature/buildfeatures/feature_completion_enabled.go new file mode 100644 index 0000000000000..6db41c97b3e76 --- /dev/null +++ b/feature/buildfeatures/feature_completion_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_completion + +package buildfeatures + +// HasCompletion is whether the binary was built with support for modular feature "CLI shell completion". +// Specifically, it's whether the binary was NOT built with the "ts_omit_completion" build tag. +// It's a const so it can be used for dead code elimination. +const HasCompletion = true diff --git a/feature/buildfeatures/feature_dbus_disabled.go b/feature/buildfeatures/feature_dbus_disabled.go new file mode 100644 index 0000000000000..e6ab896773fd1 --- /dev/null +++ b/feature/buildfeatures/feature_dbus_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_dbus + +package buildfeatures + +// HasDBus is whether the binary was built with support for modular feature "Linux DBus support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_dbus" build tag. +// It's a const so it can be used for dead code elimination. +const HasDBus = false diff --git a/feature/buildfeatures/feature_dbus_enabled.go b/feature/buildfeatures/feature_dbus_enabled.go new file mode 100644 index 0000000000000..374331cdabe0c --- /dev/null +++ b/feature/buildfeatures/feature_dbus_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_dbus + +package buildfeatures + +// HasDBus is whether the binary was built with support for modular feature "Linux DBus support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_dbus" build tag. +// It's a const so it can be used for dead code elimination. +const HasDBus = true diff --git a/feature/buildfeatures/feature_debug_disabled.go b/feature/buildfeatures/feature_debug_disabled.go new file mode 100644 index 0000000000000..eb048c0826eb9 --- /dev/null +++ b/feature/buildfeatures/feature_debug_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_debug + +package buildfeatures + +// HasDebug is whether the binary was built with support for modular feature "various debug support, for things that don't have or need their own more specific feature". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debug" build tag. +// It's a const so it can be used for dead code elimination. +const HasDebug = false diff --git a/feature/buildfeatures/feature_debug_enabled.go b/feature/buildfeatures/feature_debug_enabled.go new file mode 100644 index 0000000000000..12a2700a45761 --- /dev/null +++ b/feature/buildfeatures/feature_debug_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_debug + +package buildfeatures + +// HasDebug is whether the binary was built with support for modular feature "various debug support, for things that don't have or need their own more specific feature". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debug" build tag. +// It's a const so it can be used for dead code elimination. +const HasDebug = true diff --git a/feature/buildfeatures/feature_debugeventbus_disabled.go b/feature/buildfeatures/feature_debugeventbus_disabled.go new file mode 100644 index 0000000000000..2eb59993444af --- /dev/null +++ b/feature/buildfeatures/feature_debugeventbus_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_debugeventbus + +package buildfeatures + +// HasDebugEventBus is whether the binary was built with support for modular feature "eventbus debug support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debugeventbus" build tag. +// It's a const so it can be used for dead code elimination. +const HasDebugEventBus = false diff --git a/feature/buildfeatures/feature_debugeventbus_enabled.go b/feature/buildfeatures/feature_debugeventbus_enabled.go new file mode 100644 index 0000000000000..df13b6fa23167 --- /dev/null +++ b/feature/buildfeatures/feature_debugeventbus_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_debugeventbus + +package buildfeatures + +// HasDebugEventBus is whether the binary was built with support for modular feature "eventbus debug support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debugeventbus" build tag. +// It's a const so it can be used for dead code elimination. +const HasDebugEventBus = true diff --git a/feature/buildfeatures/feature_debugportmapper_disabled.go b/feature/buildfeatures/feature_debugportmapper_disabled.go new file mode 100644 index 0000000000000..eff85b8baaf50 --- /dev/null +++ b/feature/buildfeatures/feature_debugportmapper_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_debugportmapper + +package buildfeatures + +// HasDebugPortMapper is whether the binary was built with support for modular feature "portmapper debug support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debugportmapper" build tag. +// It's a const so it can be used for dead code elimination. +const HasDebugPortMapper = false diff --git a/feature/buildfeatures/feature_debugportmapper_enabled.go b/feature/buildfeatures/feature_debugportmapper_enabled.go new file mode 100644 index 0000000000000..491aa5ed84af1 --- /dev/null +++ b/feature/buildfeatures/feature_debugportmapper_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_debugportmapper + +package buildfeatures + +// HasDebugPortMapper is whether the binary was built with support for modular feature "portmapper debug support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_debugportmapper" build tag. +// It's a const so it can be used for dead code elimination. +const HasDebugPortMapper = true diff --git a/feature/buildfeatures/feature_desktop_sessions_disabled.go b/feature/buildfeatures/feature_desktop_sessions_disabled.go new file mode 100644 index 0000000000000..1536c886fec25 --- /dev/null +++ b/feature/buildfeatures/feature_desktop_sessions_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_desktop_sessions + +package buildfeatures + +// HasDesktopSessions is whether the binary was built with support for modular feature "Desktop sessions support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_desktop_sessions" build tag. +// It's a const so it can be used for dead code elimination. +const HasDesktopSessions = false diff --git a/feature/buildfeatures/feature_desktop_sessions_enabled.go b/feature/buildfeatures/feature_desktop_sessions_enabled.go new file mode 100644 index 0000000000000..84658de952c86 --- /dev/null +++ b/feature/buildfeatures/feature_desktop_sessions_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_desktop_sessions + +package buildfeatures + +// HasDesktopSessions is whether the binary was built with support for modular feature "Desktop sessions support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_desktop_sessions" build tag. +// It's a const so it can be used for dead code elimination. +const HasDesktopSessions = true diff --git a/feature/buildfeatures/feature_dns_disabled.go b/feature/buildfeatures/feature_dns_disabled.go new file mode 100644 index 0000000000000..30d7379cb9092 --- /dev/null +++ b/feature/buildfeatures/feature_dns_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_dns + +package buildfeatures + +// HasDNS is whether the binary was built with support for modular feature "MagicDNS and system DNS configuration support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_dns" build tag. +// It's a const so it can be used for dead code elimination. +const HasDNS = false diff --git a/feature/buildfeatures/feature_dns_enabled.go b/feature/buildfeatures/feature_dns_enabled.go new file mode 100644 index 0000000000000..962f2596bf5c9 --- /dev/null +++ b/feature/buildfeatures/feature_dns_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_dns + +package buildfeatures + +// HasDNS is whether the binary was built with support for modular feature "MagicDNS and system DNS configuration support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_dns" build tag. +// It's a const so it can be used for dead code elimination. +const HasDNS = true diff --git a/feature/buildfeatures/feature_doctor_disabled.go b/feature/buildfeatures/feature_doctor_disabled.go new file mode 100644 index 0000000000000..8c15e951e311f --- /dev/null +++ b/feature/buildfeatures/feature_doctor_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_doctor + +package buildfeatures + +// HasDoctor is whether the binary was built with support for modular feature "Diagnose possible issues with Tailscale and its host environment". +// Specifically, it's whether the binary was NOT built with the "ts_omit_doctor" build tag. +// It's a const so it can be used for dead code elimination. +const HasDoctor = false diff --git a/feature/buildfeatures/feature_doctor_enabled.go b/feature/buildfeatures/feature_doctor_enabled.go new file mode 100644 index 0000000000000..a8a0bb7d2056b --- /dev/null +++ b/feature/buildfeatures/feature_doctor_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_doctor + +package buildfeatures + +// HasDoctor is whether the binary was built with support for modular feature "Diagnose possible issues with Tailscale and its host environment". +// Specifically, it's whether the binary was NOT built with the "ts_omit_doctor" build tag. +// It's a const so it can be used for dead code elimination. +const HasDoctor = true diff --git a/feature/buildfeatures/feature_drive_disabled.go b/feature/buildfeatures/feature_drive_disabled.go new file mode 100644 index 0000000000000..07202638952e8 --- /dev/null +++ b/feature/buildfeatures/feature_drive_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_drive + +package buildfeatures + +// HasDrive is whether the binary was built with support for modular feature "Tailscale Drive (file server) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_drive" build tag. +// It's a const so it can be used for dead code elimination. +const HasDrive = false diff --git a/feature/buildfeatures/feature_drive_enabled.go b/feature/buildfeatures/feature_drive_enabled.go new file mode 100644 index 0000000000000..9f58836a43fc7 --- /dev/null +++ b/feature/buildfeatures/feature_drive_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_drive + +package buildfeatures + +// HasDrive is whether the binary was built with support for modular feature "Tailscale Drive (file server) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_drive" build tag. +// It's a const so it can be used for dead code elimination. +const HasDrive = true diff --git a/feature/buildfeatures/feature_gro_disabled.go b/feature/buildfeatures/feature_gro_disabled.go new file mode 100644 index 0000000000000..ffbd0da2e3e4f --- /dev/null +++ b/feature/buildfeatures/feature_gro_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_gro + +package buildfeatures + +// HasGRO is whether the binary was built with support for modular feature "Generic Receive Offload support (performance)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_gro" build tag. +// It's a const so it can be used for dead code elimination. +const HasGRO = false diff --git a/feature/buildfeatures/feature_gro_enabled.go b/feature/buildfeatures/feature_gro_enabled.go new file mode 100644 index 0000000000000..e2c8024e07815 --- /dev/null +++ b/feature/buildfeatures/feature_gro_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_gro + +package buildfeatures + +// HasGRO is whether the binary was built with support for modular feature "Generic Receive Offload support (performance)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_gro" build tag. +// It's a const so it can be used for dead code elimination. +const HasGRO = true diff --git a/feature/buildfeatures/feature_health_disabled.go b/feature/buildfeatures/feature_health_disabled.go new file mode 100644 index 0000000000000..2f2bcf240a455 --- /dev/null +++ b/feature/buildfeatures/feature_health_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_health + +package buildfeatures + +// HasHealth is whether the binary was built with support for modular feature "Health checking support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_health" build tag. +// It's a const so it can be used for dead code elimination. +const HasHealth = false diff --git a/feature/buildfeatures/feature_health_enabled.go b/feature/buildfeatures/feature_health_enabled.go new file mode 100644 index 0000000000000..00ce3684eb6db --- /dev/null +++ b/feature/buildfeatures/feature_health_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_health + +package buildfeatures + +// HasHealth is whether the binary was built with support for modular feature "Health checking support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_health" build tag. +// It's a const so it can be used for dead code elimination. +const HasHealth = true diff --git a/feature/buildfeatures/feature_hujsonconf_disabled.go b/feature/buildfeatures/feature_hujsonconf_disabled.go new file mode 100644 index 0000000000000..cee076bc24527 --- /dev/null +++ b/feature/buildfeatures/feature_hujsonconf_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_hujsonconf + +package buildfeatures + +// HasHuJSONConf is whether the binary was built with support for modular feature "HuJSON config file support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_hujsonconf" build tag. +// It's a const so it can be used for dead code elimination. +const HasHuJSONConf = false diff --git a/feature/buildfeatures/feature_hujsonconf_enabled.go b/feature/buildfeatures/feature_hujsonconf_enabled.go new file mode 100644 index 0000000000000..aefeeace5f0b9 --- /dev/null +++ b/feature/buildfeatures/feature_hujsonconf_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_hujsonconf + +package buildfeatures + +// HasHuJSONConf is whether the binary was built with support for modular feature "HuJSON config file support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_hujsonconf" build tag. +// It's a const so it can be used for dead code elimination. +const HasHuJSONConf = true diff --git a/feature/buildfeatures/feature_identity_federation_disabled.go b/feature/buildfeatures/feature_identity_federation_disabled.go new file mode 100644 index 0000000000000..c7b16f729cbc5 --- /dev/null +++ b/feature/buildfeatures/feature_identity_federation_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_identity_federation + +package buildfeatures + +// HasIdentityFederation is whether the binary was built with support for modular feature "Identity token exchange for auth key support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_identity_federation" build tag. +// It's a const so it can be used for dead code elimination. +const HasIdentityFederation = false diff --git a/feature/buildfeatures/feature_identity_federation_enabled.go b/feature/buildfeatures/feature_identity_federation_enabled.go new file mode 100644 index 0000000000000..1f7cf17423c96 --- /dev/null +++ b/feature/buildfeatures/feature_identity_federation_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_identity_federation + +package buildfeatures + +// HasIdentityFederation is whether the binary was built with support for modular feature "Identity token exchange for auth key support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_identity_federation" build tag. +// It's a const so it can be used for dead code elimination. +const HasIdentityFederation = true diff --git a/feature/buildfeatures/feature_iptables_disabled.go b/feature/buildfeatures/feature_iptables_disabled.go new file mode 100644 index 0000000000000..8cda5be5d6ae6 --- /dev/null +++ b/feature/buildfeatures/feature_iptables_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_iptables + +package buildfeatures + +// HasIPTables is whether the binary was built with support for modular feature "Linux iptables support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_iptables" build tag. +// It's a const so it can be used for dead code elimination. +const HasIPTables = false diff --git a/feature/buildfeatures/feature_iptables_enabled.go b/feature/buildfeatures/feature_iptables_enabled.go new file mode 100644 index 0000000000000..44d98473f05f2 --- /dev/null +++ b/feature/buildfeatures/feature_iptables_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_iptables + +package buildfeatures + +// HasIPTables is whether the binary was built with support for modular feature "Linux iptables support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_iptables" build tag. +// It's a const so it can be used for dead code elimination. +const HasIPTables = true diff --git a/feature/buildfeatures/feature_kube_disabled.go b/feature/buildfeatures/feature_kube_disabled.go new file mode 100644 index 0000000000000..2b76c57e78b94 --- /dev/null +++ b/feature/buildfeatures/feature_kube_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_kube + +package buildfeatures + +// HasKube is whether the binary was built with support for modular feature "Kubernetes integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_kube" build tag. +// It's a const so it can be used for dead code elimination. +const HasKube = false diff --git a/feature/buildfeatures/feature_kube_enabled.go b/feature/buildfeatures/feature_kube_enabled.go new file mode 100644 index 0000000000000..7abca1759fc49 --- /dev/null +++ b/feature/buildfeatures/feature_kube_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_kube + +package buildfeatures + +// HasKube is whether the binary was built with support for modular feature "Kubernetes integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_kube" build tag. +// It's a const so it can be used for dead code elimination. +const HasKube = true diff --git a/feature/buildfeatures/feature_lazywg_disabled.go b/feature/buildfeatures/feature_lazywg_disabled.go new file mode 100644 index 0000000000000..ce81d80bab6a1 --- /dev/null +++ b/feature/buildfeatures/feature_lazywg_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_lazywg + +package buildfeatures + +// HasLazyWG is whether the binary was built with support for modular feature "Lazy WireGuard configuration for memory-constrained devices with large netmaps". +// Specifically, it's whether the binary was NOT built with the "ts_omit_lazywg" build tag. +// It's a const so it can be used for dead code elimination. +const HasLazyWG = false diff --git a/feature/buildfeatures/feature_lazywg_enabled.go b/feature/buildfeatures/feature_lazywg_enabled.go new file mode 100644 index 0000000000000..259357f7f86ef --- /dev/null +++ b/feature/buildfeatures/feature_lazywg_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_lazywg + +package buildfeatures + +// HasLazyWG is whether the binary was built with support for modular feature "Lazy WireGuard configuration for memory-constrained devices with large netmaps". +// Specifically, it's whether the binary was NOT built with the "ts_omit_lazywg" build tag. +// It's a const so it can be used for dead code elimination. +const HasLazyWG = true diff --git a/feature/buildfeatures/feature_linkspeed_disabled.go b/feature/buildfeatures/feature_linkspeed_disabled.go new file mode 100644 index 0000000000000..19e254a740ff7 --- /dev/null +++ b/feature/buildfeatures/feature_linkspeed_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_linkspeed + +package buildfeatures + +// HasLinkSpeed is whether the binary was built with support for modular feature "Set link speed on TUN device for better OS integration (Linux only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_linkspeed" build tag. +// It's a const so it can be used for dead code elimination. +const HasLinkSpeed = false diff --git a/feature/buildfeatures/feature_linkspeed_enabled.go b/feature/buildfeatures/feature_linkspeed_enabled.go new file mode 100644 index 0000000000000..939858a162910 --- /dev/null +++ b/feature/buildfeatures/feature_linkspeed_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_linkspeed + +package buildfeatures + +// HasLinkSpeed is whether the binary was built with support for modular feature "Set link speed on TUN device for better OS integration (Linux only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_linkspeed" build tag. +// It's a const so it can be used for dead code elimination. +const HasLinkSpeed = true diff --git a/feature/buildfeatures/feature_linuxdnsfight_disabled.go b/feature/buildfeatures/feature_linuxdnsfight_disabled.go new file mode 100644 index 0000000000000..2e5b50ea06af0 --- /dev/null +++ b/feature/buildfeatures/feature_linuxdnsfight_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_linuxdnsfight + +package buildfeatures + +// HasLinuxDNSFight is whether the binary was built with support for modular feature "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_linuxdnsfight" build tag. +// It's a const so it can be used for dead code elimination. +const HasLinuxDNSFight = false diff --git a/feature/buildfeatures/feature_linuxdnsfight_enabled.go b/feature/buildfeatures/feature_linuxdnsfight_enabled.go new file mode 100644 index 0000000000000..b9419fccbfc09 --- /dev/null +++ b/feature/buildfeatures/feature_linuxdnsfight_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_linuxdnsfight + +package buildfeatures + +// HasLinuxDNSFight is whether the binary was built with support for modular feature "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_linuxdnsfight" build tag. +// It's a const so it can be used for dead code elimination. +const HasLinuxDNSFight = true diff --git a/feature/buildfeatures/feature_listenrawdisco_disabled.go b/feature/buildfeatures/feature_listenrawdisco_disabled.go new file mode 100644 index 0000000000000..2911780636cb7 --- /dev/null +++ b/feature/buildfeatures/feature_listenrawdisco_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_listenrawdisco + +package buildfeatures + +// HasListenRawDisco is whether the binary was built with support for modular feature "Use raw sockets for more robust disco (NAT traversal) message receiving (Linux only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_listenrawdisco" build tag. +// It's a const so it can be used for dead code elimination. +const HasListenRawDisco = false diff --git a/feature/buildfeatures/feature_listenrawdisco_enabled.go b/feature/buildfeatures/feature_listenrawdisco_enabled.go new file mode 100644 index 0000000000000..4a4f85ae37319 --- /dev/null +++ b/feature/buildfeatures/feature_listenrawdisco_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_listenrawdisco + +package buildfeatures + +// HasListenRawDisco is whether the binary was built with support for modular feature "Use raw sockets for more robust disco (NAT traversal) message receiving (Linux only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_listenrawdisco" build tag. +// It's a const so it can be used for dead code elimination. +const HasListenRawDisco = true diff --git a/feature/buildfeatures/feature_logtail_disabled.go b/feature/buildfeatures/feature_logtail_disabled.go new file mode 100644 index 0000000000000..140092a2eba5b --- /dev/null +++ b/feature/buildfeatures/feature_logtail_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_logtail + +package buildfeatures + +// HasLogTail is whether the binary was built with support for modular feature "upload logs to log.tailscale.com (debug logs for bug reports and also by network flow logs if enabled)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_logtail" build tag. +// It's a const so it can be used for dead code elimination. +const HasLogTail = false diff --git a/feature/buildfeatures/feature_logtail_enabled.go b/feature/buildfeatures/feature_logtail_enabled.go new file mode 100644 index 0000000000000..6e777216bf3cb --- /dev/null +++ b/feature/buildfeatures/feature_logtail_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_logtail + +package buildfeatures + +// HasLogTail is whether the binary was built with support for modular feature "upload logs to log.tailscale.com (debug logs for bug reports and also by network flow logs if enabled)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_logtail" build tag. +// It's a const so it can be used for dead code elimination. +const HasLogTail = true diff --git a/feature/buildfeatures/feature_netlog_disabled.go b/feature/buildfeatures/feature_netlog_disabled.go new file mode 100644 index 0000000000000..60367a12600f3 --- /dev/null +++ b/feature/buildfeatures/feature_netlog_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_netlog + +package buildfeatures + +// HasNetLog is whether the binary was built with support for modular feature "Network flow logging support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_netlog" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetLog = false diff --git a/feature/buildfeatures/feature_netlog_enabled.go b/feature/buildfeatures/feature_netlog_enabled.go new file mode 100644 index 0000000000000..f9d2abad30553 --- /dev/null +++ b/feature/buildfeatures/feature_netlog_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_netlog + +package buildfeatures + +// HasNetLog is whether the binary was built with support for modular feature "Network flow logging support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_netlog" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetLog = true diff --git a/feature/buildfeatures/feature_netstack_disabled.go b/feature/buildfeatures/feature_netstack_disabled.go new file mode 100644 index 0000000000000..acb6e8e76396e --- /dev/null +++ b/feature/buildfeatures/feature_netstack_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_netstack + +package buildfeatures + +// HasNetstack is whether the binary was built with support for modular feature "gVisor netstack (userspace networking) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_netstack" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetstack = false diff --git a/feature/buildfeatures/feature_netstack_enabled.go b/feature/buildfeatures/feature_netstack_enabled.go new file mode 100644 index 0000000000000..04f67118523a0 --- /dev/null +++ b/feature/buildfeatures/feature_netstack_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_netstack + +package buildfeatures + +// HasNetstack is whether the binary was built with support for modular feature "gVisor netstack (userspace networking) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_netstack" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetstack = true diff --git a/feature/buildfeatures/feature_networkmanager_disabled.go b/feature/buildfeatures/feature_networkmanager_disabled.go new file mode 100644 index 0000000000000..d0ec6f01796ab --- /dev/null +++ b/feature/buildfeatures/feature_networkmanager_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_networkmanager + +package buildfeatures + +// HasNetworkManager is whether the binary was built with support for modular feature "Linux NetworkManager integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_networkmanager" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetworkManager = false diff --git a/feature/buildfeatures/feature_networkmanager_enabled.go b/feature/buildfeatures/feature_networkmanager_enabled.go new file mode 100644 index 0000000000000..ec284c3109f75 --- /dev/null +++ b/feature/buildfeatures/feature_networkmanager_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_networkmanager + +package buildfeatures + +// HasNetworkManager is whether the binary was built with support for modular feature "Linux NetworkManager integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_networkmanager" build tag. +// It's a const so it can be used for dead code elimination. +const HasNetworkManager = true diff --git a/feature/buildfeatures/feature_oauthkey_disabled.go b/feature/buildfeatures/feature_oauthkey_disabled.go new file mode 100644 index 0000000000000..72ad1723b1d14 --- /dev/null +++ b/feature/buildfeatures/feature_oauthkey_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_oauthkey + +package buildfeatures + +// HasOAuthKey is whether the binary was built with support for modular feature "OAuth secret-to-authkey resolution support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_oauthkey" build tag. +// It's a const so it can be used for dead code elimination. +const HasOAuthKey = false diff --git a/feature/buildfeatures/feature_oauthkey_enabled.go b/feature/buildfeatures/feature_oauthkey_enabled.go new file mode 100644 index 0000000000000..39c52a2b0b46d --- /dev/null +++ b/feature/buildfeatures/feature_oauthkey_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_oauthkey + +package buildfeatures + +// HasOAuthKey is whether the binary was built with support for modular feature "OAuth secret-to-authkey resolution support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_oauthkey" build tag. +// It's a const so it can be used for dead code elimination. +const HasOAuthKey = true diff --git a/feature/buildfeatures/feature_osrouter_disabled.go b/feature/buildfeatures/feature_osrouter_disabled.go new file mode 100644 index 0000000000000..ccd7192bb8899 --- /dev/null +++ b/feature/buildfeatures/feature_osrouter_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_osrouter + +package buildfeatures + +// HasOSRouter is whether the binary was built with support for modular feature "Configure the operating system's network stack, IPs, and routing tables". +// Specifically, it's whether the binary was NOT built with the "ts_omit_osrouter" build tag. +// It's a const so it can be used for dead code elimination. +const HasOSRouter = false diff --git a/feature/buildfeatures/feature_osrouter_enabled.go b/feature/buildfeatures/feature_osrouter_enabled.go new file mode 100644 index 0000000000000..a5dacc596bfbc --- /dev/null +++ b/feature/buildfeatures/feature_osrouter_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_osrouter + +package buildfeatures + +// HasOSRouter is whether the binary was built with support for modular feature "Configure the operating system's network stack, IPs, and routing tables". +// Specifically, it's whether the binary was NOT built with the "ts_omit_osrouter" build tag. +// It's a const so it can be used for dead code elimination. +const HasOSRouter = true diff --git a/feature/buildfeatures/feature_outboundproxy_disabled.go b/feature/buildfeatures/feature_outboundproxy_disabled.go new file mode 100644 index 0000000000000..bf74db0600927 --- /dev/null +++ b/feature/buildfeatures/feature_outboundproxy_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_outboundproxy + +package buildfeatures + +// HasOutboundProxy is whether the binary was built with support for modular feature "Support running an outbound localhost HTTP/SOCK5 proxy support that sends traffic over Tailscale". +// Specifically, it's whether the binary was NOT built with the "ts_omit_outboundproxy" build tag. +// It's a const so it can be used for dead code elimination. +const HasOutboundProxy = false diff --git a/feature/buildfeatures/feature_outboundproxy_enabled.go b/feature/buildfeatures/feature_outboundproxy_enabled.go new file mode 100644 index 0000000000000..53bb99d5c6a79 --- /dev/null +++ b/feature/buildfeatures/feature_outboundproxy_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_outboundproxy + +package buildfeatures + +// HasOutboundProxy is whether the binary was built with support for modular feature "Support running an outbound localhost HTTP/SOCK5 proxy support that sends traffic over Tailscale". +// Specifically, it's whether the binary was NOT built with the "ts_omit_outboundproxy" build tag. +// It's a const so it can be used for dead code elimination. +const HasOutboundProxy = true diff --git a/feature/buildfeatures/feature_peerapiclient_disabled.go b/feature/buildfeatures/feature_peerapiclient_disabled.go new file mode 100644 index 0000000000000..83cc2bdfeef5c --- /dev/null +++ b/feature/buildfeatures/feature_peerapiclient_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_peerapiclient + +package buildfeatures + +// HasPeerAPIClient is whether the binary was built with support for modular feature "PeerAPI client support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_peerapiclient" build tag. +// It's a const so it can be used for dead code elimination. +const HasPeerAPIClient = false diff --git a/feature/buildfeatures/feature_peerapiclient_enabled.go b/feature/buildfeatures/feature_peerapiclient_enabled.go new file mode 100644 index 0000000000000..0bd3f50a869ca --- /dev/null +++ b/feature/buildfeatures/feature_peerapiclient_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_peerapiclient + +package buildfeatures + +// HasPeerAPIClient is whether the binary was built with support for modular feature "PeerAPI client support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_peerapiclient" build tag. +// It's a const so it can be used for dead code elimination. +const HasPeerAPIClient = true diff --git a/feature/buildfeatures/feature_peerapiserver_disabled.go b/feature/buildfeatures/feature_peerapiserver_disabled.go new file mode 100644 index 0000000000000..4a4f32b8a4065 --- /dev/null +++ b/feature/buildfeatures/feature_peerapiserver_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_peerapiserver + +package buildfeatures + +// HasPeerAPIServer is whether the binary was built with support for modular feature "PeerAPI server support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_peerapiserver" build tag. +// It's a const so it can be used for dead code elimination. +const HasPeerAPIServer = false diff --git a/feature/buildfeatures/feature_peerapiserver_enabled.go b/feature/buildfeatures/feature_peerapiserver_enabled.go new file mode 100644 index 0000000000000..17d0547b80946 --- /dev/null +++ b/feature/buildfeatures/feature_peerapiserver_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_peerapiserver + +package buildfeatures + +// HasPeerAPIServer is whether the binary was built with support for modular feature "PeerAPI server support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_peerapiserver" build tag. +// It's a const so it can be used for dead code elimination. +const HasPeerAPIServer = true diff --git a/feature/buildfeatures/feature_portlist_disabled.go b/feature/buildfeatures/feature_portlist_disabled.go new file mode 100644 index 0000000000000..934061fd8328f --- /dev/null +++ b/feature/buildfeatures/feature_portlist_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_portlist + +package buildfeatures + +// HasPortList is whether the binary was built with support for modular feature "Optionally advertise listening service ports". +// Specifically, it's whether the binary was NOT built with the "ts_omit_portlist" build tag. +// It's a const so it can be used for dead code elimination. +const HasPortList = false diff --git a/feature/buildfeatures/feature_portlist_enabled.go b/feature/buildfeatures/feature_portlist_enabled.go new file mode 100644 index 0000000000000..c1dc1c163b80e --- /dev/null +++ b/feature/buildfeatures/feature_portlist_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_portlist + +package buildfeatures + +// HasPortList is whether the binary was built with support for modular feature "Optionally advertise listening service ports". +// Specifically, it's whether the binary was NOT built with the "ts_omit_portlist" build tag. +// It's a const so it can be used for dead code elimination. +const HasPortList = true diff --git a/feature/buildfeatures/feature_portmapper_disabled.go b/feature/buildfeatures/feature_portmapper_disabled.go new file mode 100644 index 0000000000000..212b22d40abfb --- /dev/null +++ b/feature/buildfeatures/feature_portmapper_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_portmapper + +package buildfeatures + +// HasPortMapper is whether the binary was built with support for modular feature "NAT-PMP/PCP/UPnP port mapping support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_portmapper" build tag. +// It's a const so it can be used for dead code elimination. +const HasPortMapper = false diff --git a/feature/buildfeatures/feature_portmapper_enabled.go b/feature/buildfeatures/feature_portmapper_enabled.go new file mode 100644 index 0000000000000..2f915d277a313 --- /dev/null +++ b/feature/buildfeatures/feature_portmapper_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_portmapper + +package buildfeatures + +// HasPortMapper is whether the binary was built with support for modular feature "NAT-PMP/PCP/UPnP port mapping support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_portmapper" build tag. +// It's a const so it can be used for dead code elimination. +const HasPortMapper = true diff --git a/feature/buildfeatures/feature_posture_disabled.go b/feature/buildfeatures/feature_posture_disabled.go new file mode 100644 index 0000000000000..a78b1a95720cf --- /dev/null +++ b/feature/buildfeatures/feature_posture_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_posture + +package buildfeatures + +// HasPosture is whether the binary was built with support for modular feature "Device posture checking support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_posture" build tag. +// It's a const so it can be used for dead code elimination. +const HasPosture = false diff --git a/feature/buildfeatures/feature_posture_enabled.go b/feature/buildfeatures/feature_posture_enabled.go new file mode 100644 index 0000000000000..dcd9595f9ca96 --- /dev/null +++ b/feature/buildfeatures/feature_posture_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_posture + +package buildfeatures + +// HasPosture is whether the binary was built with support for modular feature "Device posture checking support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_posture" build tag. +// It's a const so it can be used for dead code elimination. +const HasPosture = true diff --git a/feature/buildfeatures/feature_relayserver_disabled.go b/feature/buildfeatures/feature_relayserver_disabled.go new file mode 100644 index 0000000000000..08ced83101f96 --- /dev/null +++ b/feature/buildfeatures/feature_relayserver_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_relayserver + +package buildfeatures + +// HasRelayServer is whether the binary was built with support for modular feature "Relay server". +// Specifically, it's whether the binary was NOT built with the "ts_omit_relayserver" build tag. +// It's a const so it can be used for dead code elimination. +const HasRelayServer = false diff --git a/feature/buildfeatures/feature_relayserver_enabled.go b/feature/buildfeatures/feature_relayserver_enabled.go new file mode 100644 index 0000000000000..6a35f8305d68f --- /dev/null +++ b/feature/buildfeatures/feature_relayserver_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_relayserver + +package buildfeatures + +// HasRelayServer is whether the binary was built with support for modular feature "Relay server". +// Specifically, it's whether the binary was NOT built with the "ts_omit_relayserver" build tag. +// It's a const so it can be used for dead code elimination. +const HasRelayServer = true diff --git a/feature/buildfeatures/feature_resolved_disabled.go b/feature/buildfeatures/feature_resolved_disabled.go new file mode 100644 index 0000000000000..283dd20c76aaa --- /dev/null +++ b/feature/buildfeatures/feature_resolved_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_resolved + +package buildfeatures + +// HasResolved is whether the binary was built with support for modular feature "Linux systemd-resolved integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_resolved" build tag. +// It's a const so it can be used for dead code elimination. +const HasResolved = false diff --git a/feature/buildfeatures/feature_resolved_enabled.go b/feature/buildfeatures/feature_resolved_enabled.go new file mode 100644 index 0000000000000..af1b3b41e9358 --- /dev/null +++ b/feature/buildfeatures/feature_resolved_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_resolved + +package buildfeatures + +// HasResolved is whether the binary was built with support for modular feature "Linux systemd-resolved integration". +// Specifically, it's whether the binary was NOT built with the "ts_omit_resolved" build tag. +// It's a const so it can be used for dead code elimination. +const HasResolved = true diff --git a/feature/buildfeatures/feature_sdnotify_disabled.go b/feature/buildfeatures/feature_sdnotify_disabled.go new file mode 100644 index 0000000000000..7efa2d22ff587 --- /dev/null +++ b/feature/buildfeatures/feature_sdnotify_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_sdnotify + +package buildfeatures + +// HasSDNotify is whether the binary was built with support for modular feature "systemd notification support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_sdnotify" build tag. +// It's a const so it can be used for dead code elimination. +const HasSDNotify = false diff --git a/feature/buildfeatures/feature_sdnotify_enabled.go b/feature/buildfeatures/feature_sdnotify_enabled.go new file mode 100644 index 0000000000000..40fec9755dd16 --- /dev/null +++ b/feature/buildfeatures/feature_sdnotify_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_sdnotify + +package buildfeatures + +// HasSDNotify is whether the binary was built with support for modular feature "systemd notification support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_sdnotify" build tag. +// It's a const so it can be used for dead code elimination. +const HasSDNotify = true diff --git a/feature/buildfeatures/feature_serve_disabled.go b/feature/buildfeatures/feature_serve_disabled.go new file mode 100644 index 0000000000000..6d79713500e29 --- /dev/null +++ b/feature/buildfeatures/feature_serve_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_serve + +package buildfeatures + +// HasServe is whether the binary was built with support for modular feature "Serve and Funnel support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_serve" build tag. +// It's a const so it can be used for dead code elimination. +const HasServe = false diff --git a/feature/buildfeatures/feature_serve_enabled.go b/feature/buildfeatures/feature_serve_enabled.go new file mode 100644 index 0000000000000..57bf2c6b0fc2b --- /dev/null +++ b/feature/buildfeatures/feature_serve_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_serve + +package buildfeatures + +// HasServe is whether the binary was built with support for modular feature "Serve and Funnel support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_serve" build tag. +// It's a const so it can be used for dead code elimination. +const HasServe = true diff --git a/feature/buildfeatures/feature_ssh_disabled.go b/feature/buildfeatures/feature_ssh_disabled.go new file mode 100644 index 0000000000000..754f50eb6a816 --- /dev/null +++ b/feature/buildfeatures/feature_ssh_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_ssh + +package buildfeatures + +// HasSSH is whether the binary was built with support for modular feature "Tailscale SSH support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_ssh" build tag. +// It's a const so it can be used for dead code elimination. +const HasSSH = false diff --git a/feature/buildfeatures/feature_ssh_enabled.go b/feature/buildfeatures/feature_ssh_enabled.go new file mode 100644 index 0000000000000..dbdc3a89fa027 --- /dev/null +++ b/feature/buildfeatures/feature_ssh_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_ssh + +package buildfeatures + +// HasSSH is whether the binary was built with support for modular feature "Tailscale SSH support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_ssh" build tag. +// It's a const so it can be used for dead code elimination. +const HasSSH = true diff --git a/feature/buildfeatures/feature_synology_disabled.go b/feature/buildfeatures/feature_synology_disabled.go new file mode 100644 index 0000000000000..0cdf084c32d8e --- /dev/null +++ b/feature/buildfeatures/feature_synology_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_synology + +package buildfeatures + +// HasSynology is whether the binary was built with support for modular feature "Synology NAS integration (applies to Linux builds only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_synology" build tag. +// It's a const so it can be used for dead code elimination. +const HasSynology = false diff --git a/feature/buildfeatures/feature_synology_enabled.go b/feature/buildfeatures/feature_synology_enabled.go new file mode 100644 index 0000000000000..dde4123b61eb0 --- /dev/null +++ b/feature/buildfeatures/feature_synology_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_synology + +package buildfeatures + +// HasSynology is whether the binary was built with support for modular feature "Synology NAS integration (applies to Linux builds only)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_synology" build tag. +// It's a const so it can be used for dead code elimination. +const HasSynology = true diff --git a/feature/buildfeatures/feature_syspolicy_disabled.go b/feature/buildfeatures/feature_syspolicy_disabled.go new file mode 100644 index 0000000000000..54d32e32e71d8 --- /dev/null +++ b/feature/buildfeatures/feature_syspolicy_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_syspolicy + +package buildfeatures + +// HasSystemPolicy is whether the binary was built with support for modular feature "System policy configuration (MDM) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_syspolicy" build tag. +// It's a const so it can be used for dead code elimination. +const HasSystemPolicy = false diff --git a/feature/buildfeatures/feature_syspolicy_enabled.go b/feature/buildfeatures/feature_syspolicy_enabled.go new file mode 100644 index 0000000000000..f7c403ae9d68b --- /dev/null +++ b/feature/buildfeatures/feature_syspolicy_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_syspolicy + +package buildfeatures + +// HasSystemPolicy is whether the binary was built with support for modular feature "System policy configuration (MDM) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_syspolicy" build tag. +// It's a const so it can be used for dead code elimination. +const HasSystemPolicy = true diff --git a/feature/buildfeatures/feature_systray_disabled.go b/feature/buildfeatures/feature_systray_disabled.go new file mode 100644 index 0000000000000..4ae1edb0ab83f --- /dev/null +++ b/feature/buildfeatures/feature_systray_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_systray + +package buildfeatures + +// HasSysTray is whether the binary was built with support for modular feature "Linux system tray". +// Specifically, it's whether the binary was NOT built with the "ts_omit_systray" build tag. +// It's a const so it can be used for dead code elimination. +const HasSysTray = false diff --git a/feature/buildfeatures/feature_systray_enabled.go b/feature/buildfeatures/feature_systray_enabled.go new file mode 100644 index 0000000000000..5fd7fd220325a --- /dev/null +++ b/feature/buildfeatures/feature_systray_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_systray + +package buildfeatures + +// HasSysTray is whether the binary was built with support for modular feature "Linux system tray". +// Specifically, it's whether the binary was NOT built with the "ts_omit_systray" build tag. +// It's a const so it can be used for dead code elimination. +const HasSysTray = true diff --git a/feature/buildfeatures/feature_taildrop_disabled.go b/feature/buildfeatures/feature_taildrop_disabled.go new file mode 100644 index 0000000000000..8ffe90617839f --- /dev/null +++ b/feature/buildfeatures/feature_taildrop_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_taildrop + +package buildfeatures + +// HasTaildrop is whether the binary was built with support for modular feature "Taildrop (file sending) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_taildrop" build tag. +// It's a const so it can be used for dead code elimination. +const HasTaildrop = false diff --git a/feature/buildfeatures/feature_taildrop_enabled.go b/feature/buildfeatures/feature_taildrop_enabled.go new file mode 100644 index 0000000000000..4f55d2801c516 --- /dev/null +++ b/feature/buildfeatures/feature_taildrop_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_taildrop + +package buildfeatures + +// HasTaildrop is whether the binary was built with support for modular feature "Taildrop (file sending) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_taildrop" build tag. +// It's a const so it can be used for dead code elimination. +const HasTaildrop = true diff --git a/feature/buildfeatures/feature_tailnetlock_disabled.go b/feature/buildfeatures/feature_tailnetlock_disabled.go new file mode 100644 index 0000000000000..6b5a57f24ba4f --- /dev/null +++ b/feature/buildfeatures/feature_tailnetlock_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_tailnetlock + +package buildfeatures + +// HasTailnetLock is whether the binary was built with support for modular feature "Tailnet Lock support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tailnetlock" build tag. +// It's a const so it can be used for dead code elimination. +const HasTailnetLock = false diff --git a/feature/buildfeatures/feature_tailnetlock_enabled.go b/feature/buildfeatures/feature_tailnetlock_enabled.go new file mode 100644 index 0000000000000..afedb7faad312 --- /dev/null +++ b/feature/buildfeatures/feature_tailnetlock_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_tailnetlock + +package buildfeatures + +// HasTailnetLock is whether the binary was built with support for modular feature "Tailnet Lock support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tailnetlock" build tag. +// It's a const so it can be used for dead code elimination. +const HasTailnetLock = true diff --git a/feature/buildfeatures/feature_tap_disabled.go b/feature/buildfeatures/feature_tap_disabled.go new file mode 100644 index 0000000000000..f0b3eec8d7e6f --- /dev/null +++ b/feature/buildfeatures/feature_tap_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_tap + +package buildfeatures + +// HasTap is whether the binary was built with support for modular feature "Experimental Layer 2 (ethernet) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tap" build tag. +// It's a const so it can be used for dead code elimination. +const HasTap = false diff --git a/feature/buildfeatures/feature_tap_enabled.go b/feature/buildfeatures/feature_tap_enabled.go new file mode 100644 index 0000000000000..1363c4b44afb2 --- /dev/null +++ b/feature/buildfeatures/feature_tap_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_tap + +package buildfeatures + +// HasTap is whether the binary was built with support for modular feature "Experimental Layer 2 (ethernet) support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tap" build tag. +// It's a const so it can be used for dead code elimination. +const HasTap = true diff --git a/feature/buildfeatures/feature_tpm_disabled.go b/feature/buildfeatures/feature_tpm_disabled.go new file mode 100644 index 0000000000000..b9d55815ef5df --- /dev/null +++ b/feature/buildfeatures/feature_tpm_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_tpm + +package buildfeatures + +// HasTPM is whether the binary was built with support for modular feature "TPM support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tpm" build tag. +// It's a const so it can be used for dead code elimination. +const HasTPM = false diff --git a/feature/buildfeatures/feature_tpm_enabled.go b/feature/buildfeatures/feature_tpm_enabled.go new file mode 100644 index 0000000000000..dcfc8a30442ad --- /dev/null +++ b/feature/buildfeatures/feature_tpm_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_tpm + +package buildfeatures + +// HasTPM is whether the binary was built with support for modular feature "TPM support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_tpm" build tag. +// It's a const so it can be used for dead code elimination. +const HasTPM = true diff --git a/feature/buildfeatures/feature_unixsocketidentity_disabled.go b/feature/buildfeatures/feature_unixsocketidentity_disabled.go new file mode 100644 index 0000000000000..d64e48b825eac --- /dev/null +++ b/feature/buildfeatures/feature_unixsocketidentity_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_unixsocketidentity + +package buildfeatures + +// HasUnixSocketIdentity is whether the binary was built with support for modular feature "differentiate between users accessing the LocalAPI over unix sockets (if omitted, all users have full access)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_unixsocketidentity" build tag. +// It's a const so it can be used for dead code elimination. +const HasUnixSocketIdentity = false diff --git a/feature/buildfeatures/feature_unixsocketidentity_enabled.go b/feature/buildfeatures/feature_unixsocketidentity_enabled.go new file mode 100644 index 0000000000000..463ac2ced3636 --- /dev/null +++ b/feature/buildfeatures/feature_unixsocketidentity_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_unixsocketidentity + +package buildfeatures + +// HasUnixSocketIdentity is whether the binary was built with support for modular feature "differentiate between users accessing the LocalAPI over unix sockets (if omitted, all users have full access)". +// Specifically, it's whether the binary was NOT built with the "ts_omit_unixsocketidentity" build tag. +// It's a const so it can be used for dead code elimination. +const HasUnixSocketIdentity = true diff --git a/feature/buildfeatures/feature_useexitnode_disabled.go b/feature/buildfeatures/feature_useexitnode_disabled.go new file mode 100644 index 0000000000000..51bec8046cb35 --- /dev/null +++ b/feature/buildfeatures/feature_useexitnode_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_useexitnode + +package buildfeatures + +// HasUseExitNode is whether the binary was built with support for modular feature "Use exit nodes". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useexitnode" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseExitNode = false diff --git a/feature/buildfeatures/feature_useexitnode_enabled.go b/feature/buildfeatures/feature_useexitnode_enabled.go new file mode 100644 index 0000000000000..f7ab414de9477 --- /dev/null +++ b/feature/buildfeatures/feature_useexitnode_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_useexitnode + +package buildfeatures + +// HasUseExitNode is whether the binary was built with support for modular feature "Use exit nodes". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useexitnode" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseExitNode = true diff --git a/feature/buildfeatures/feature_useproxy_disabled.go b/feature/buildfeatures/feature_useproxy_disabled.go new file mode 100644 index 0000000000000..9f29a9820eb99 --- /dev/null +++ b/feature/buildfeatures/feature_useproxy_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_useproxy + +package buildfeatures + +// HasUseProxy is whether the binary was built with support for modular feature "Support using system proxies as specified by env vars or the system configuration to reach Tailscale servers.". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useproxy" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseProxy = false diff --git a/feature/buildfeatures/feature_useproxy_enabled.go b/feature/buildfeatures/feature_useproxy_enabled.go new file mode 100644 index 0000000000000..9195f2fdce784 --- /dev/null +++ b/feature/buildfeatures/feature_useproxy_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_useproxy + +package buildfeatures + +// HasUseProxy is whether the binary was built with support for modular feature "Support using system proxies as specified by env vars or the system configuration to reach Tailscale servers.". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useproxy" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseProxy = true diff --git a/feature/buildfeatures/feature_usermetrics_disabled.go b/feature/buildfeatures/feature_usermetrics_disabled.go new file mode 100644 index 0000000000000..092c89c3b543f --- /dev/null +++ b/feature/buildfeatures/feature_usermetrics_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_usermetrics + +package buildfeatures + +// HasUserMetrics is whether the binary was built with support for modular feature "Usermetrics (documented, stable) metrics support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_usermetrics" build tag. +// It's a const so it can be used for dead code elimination. +const HasUserMetrics = false diff --git a/feature/buildfeatures/feature_usermetrics_enabled.go b/feature/buildfeatures/feature_usermetrics_enabled.go new file mode 100644 index 0000000000000..813e3c3477b66 --- /dev/null +++ b/feature/buildfeatures/feature_usermetrics_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_usermetrics + +package buildfeatures + +// HasUserMetrics is whether the binary was built with support for modular feature "Usermetrics (documented, stable) metrics support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_usermetrics" build tag. +// It's a const so it can be used for dead code elimination. +const HasUserMetrics = true diff --git a/feature/buildfeatures/feature_useroutes_disabled.go b/feature/buildfeatures/feature_useroutes_disabled.go new file mode 100644 index 0000000000000..ecf9d022bed74 --- /dev/null +++ b/feature/buildfeatures/feature_useroutes_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_useroutes + +package buildfeatures + +// HasUseRoutes is whether the binary was built with support for modular feature "Use routes advertised by other nodes". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useroutes" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseRoutes = false diff --git a/feature/buildfeatures/feature_useroutes_enabled.go b/feature/buildfeatures/feature_useroutes_enabled.go new file mode 100644 index 0000000000000..c0a59322ecdc1 --- /dev/null +++ b/feature/buildfeatures/feature_useroutes_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_useroutes + +package buildfeatures + +// HasUseRoutes is whether the binary was built with support for modular feature "Use routes advertised by other nodes". +// Specifically, it's whether the binary was NOT built with the "ts_omit_useroutes" build tag. +// It's a const so it can be used for dead code elimination. +const HasUseRoutes = true diff --git a/feature/buildfeatures/feature_wakeonlan_disabled.go b/feature/buildfeatures/feature_wakeonlan_disabled.go new file mode 100644 index 0000000000000..816ac661f78ce --- /dev/null +++ b/feature/buildfeatures/feature_wakeonlan_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_wakeonlan + +package buildfeatures + +// HasWakeOnLAN is whether the binary was built with support for modular feature "Wake-on-LAN support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_wakeonlan" build tag. +// It's a const so it can be used for dead code elimination. +const HasWakeOnLAN = false diff --git a/feature/buildfeatures/feature_wakeonlan_enabled.go b/feature/buildfeatures/feature_wakeonlan_enabled.go new file mode 100644 index 0000000000000..34b3348a10fef --- /dev/null +++ b/feature/buildfeatures/feature_wakeonlan_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_wakeonlan + +package buildfeatures + +// HasWakeOnLAN is whether the binary was built with support for modular feature "Wake-on-LAN support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_wakeonlan" build tag. +// It's a const so it can be used for dead code elimination. +const HasWakeOnLAN = true diff --git a/feature/buildfeatures/feature_webclient_disabled.go b/feature/buildfeatures/feature_webclient_disabled.go new file mode 100644 index 0000000000000..a7b24f4ac2dda --- /dev/null +++ b/feature/buildfeatures/feature_webclient_disabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build ts_omit_webclient + +package buildfeatures + +// HasWebClient is whether the binary was built with support for modular feature "Web client support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_webclient" build tag. +// It's a const so it can be used for dead code elimination. +const HasWebClient = false diff --git a/feature/buildfeatures/feature_webclient_enabled.go b/feature/buildfeatures/feature_webclient_enabled.go new file mode 100644 index 0000000000000..e40dad33c6ebb --- /dev/null +++ b/feature/buildfeatures/feature_webclient_enabled.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by gen.go; DO NOT EDIT. + +//go:build !ts_omit_webclient + +package buildfeatures + +// HasWebClient is whether the binary was built with support for modular feature "Web client support". +// Specifically, it's whether the binary was NOT built with the "ts_omit_webclient" build tag. +// It's a const so it can be used for dead code elimination. +const HasWebClient = true diff --git a/feature/buildfeatures/gen.go b/feature/buildfeatures/gen.go new file mode 100644 index 0000000000000..e967cb8ff1906 --- /dev/null +++ b/feature/buildfeatures/gen.go @@ -0,0 +1,49 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ignore + +// The gens.go program generates the feature__enabled.go +// and feature__disabled.go files for each feature tag. +package main + +import ( + "cmp" + "fmt" + "os" + "strings" + + "tailscale.com/feature/featuretags" + "tailscale.com/util/must" +) + +const header = `// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code g|e|n|e|r|a|t|e|d by gen.go; D|O N|OT E|D|I|T. + +` + +func main() { + header := strings.ReplaceAll(header, "|", "") // to avoid this file being marked as generated + for k, m := range featuretags.Features { + if !k.IsOmittable() { + continue + } + sym := "Has" + cmp.Or(m.Sym, strings.ToUpper(string(k)[:1])+string(k)[1:]) + for _, suf := range []string{"enabled", "disabled"} { + bang := "" + if suf == "enabled" { + bang = "!" // !ts_omit_... + } + must.Do(os.WriteFile("feature_"+string(k)+"_"+suf+".go", + fmt.Appendf(nil, "%s//go:build %s%s\n\npackage buildfeatures\n\n"+ + "// %s is whether the binary was built with support for modular feature %q.\n"+ + "// Specifically, it's whether the binary was NOT built with the %q build tag.\n"+ + "// It's a const so it can be used for dead code elimination.\n"+ + "const %s = %t\n", + header, bang, k.OmitTag(), sym, m.Desc, k.OmitTag(), sym, suf == "enabled"), 0644)) + + } + } +} diff --git a/feature/c2n/c2n.go b/feature/c2n/c2n.go new file mode 100644 index 0000000000000..ae942e31d0d95 --- /dev/null +++ b/feature/c2n/c2n.go @@ -0,0 +1,70 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package c2n registers support for C2N (Control-to-Node) communications. +package c2n + +import ( + "bufio" + "bytes" + "context" + "net/http" + "time" + + "tailscale.com/control/controlclient" + "tailscale.com/tailcfg" + "tailscale.com/tempfork/httprec" + "tailscale.com/types/logger" +) + +func init() { + controlclient.HookAnswerC2NPing.Set(answerC2NPing) +} + +func answerC2NPing(logf logger.Logf, c2nHandler http.Handler, c *http.Client, pr *tailcfg.PingRequest) { + if c2nHandler == nil { + logf("answerC2NPing: c2nHandler not defined") + return + } + hreq, err := http.ReadRequest(bufio.NewReader(bytes.NewReader(pr.Payload))) + if err != nil { + logf("answerC2NPing: ReadRequest: %v", err) + return + } + if pr.Log { + logf("answerC2NPing: got c2n request for %v ...", hreq.RequestURI) + } + handlerTimeout := time.Minute + if v := hreq.Header.Get("C2n-Handler-Timeout"); v != "" { + handlerTimeout, _ = time.ParseDuration(v) + } + handlerCtx, cancel := context.WithTimeout(context.Background(), handlerTimeout) + defer cancel() + hreq = hreq.WithContext(handlerCtx) + rec := httprec.NewRecorder() + c2nHandler.ServeHTTP(rec, hreq) + cancel() + + c2nResBuf := new(bytes.Buffer) + rec.Result().Write(c2nResBuf) + + replyCtx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + req, err := http.NewRequestWithContext(replyCtx, "POST", pr.URL, c2nResBuf) + if err != nil { + logf("answerC2NPing: NewRequestWithContext: %v", err) + return + } + if pr.Log { + logf("answerC2NPing: sending POST ping to %v ...", pr.URL) + } + t0 := time.Now() + _, err = c.Do(req) + d := time.Since(t0).Round(time.Millisecond) + if err != nil { + logf("answerC2NPing error: %v to %v (after %v)", err, pr.URL, d) + } else if pr.Log { + logf("answerC2NPing complete to %v (after %v)", pr.URL, d) + } +} diff --git a/feature/clientupdate/clientupdate.go b/feature/clientupdate/clientupdate.go new file mode 100644 index 0000000000000..45fd21129b4e7 --- /dev/null +++ b/feature/clientupdate/clientupdate.go @@ -0,0 +1,530 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package clientupdate enables the client update feature. +package clientupdate + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "tailscale.com/clientupdate" + "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/ipn" + "tailscale.com/ipn/ipnext" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/ipn/ipnstate" + "tailscale.com/ipn/localapi" + "tailscale.com/tailcfg" + "tailscale.com/types/logger" + "tailscale.com/util/httpm" + "tailscale.com/version" + "tailscale.com/version/distro" +) + +func init() { + ipnext.RegisterExtension("clientupdate", newExt) + + // C2N + ipnlocal.RegisterC2N("GET /update", handleC2NUpdateGet) + ipnlocal.RegisterC2N("POST /update", handleC2NUpdatePost) + + // LocalAPI: + localapi.Register("update/install", serveUpdateInstall) + localapi.Register("update/progress", serveUpdateProgress) +} + +func newExt(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) { + return &extension{ + logf: logf, + sb: sb, + + lastSelfUpdateState: ipnstate.UpdateFinished, + }, nil +} + +type extension struct { + logf logger.Logf + sb ipnext.SafeBackend + + mu sync.Mutex + + // c2nUpdateStatus is the status of c2n-triggered client update. + c2nUpdateStatus updateStatus + prefs ipn.PrefsView + state ipn.State + + lastSelfUpdateState ipnstate.SelfUpdateStatus + selfUpdateProgress []ipnstate.UpdateProgress + + // offlineAutoUpdateCancel stops offline auto-updates when called. It + // should be used via stopOfflineAutoUpdate and + // maybeStartOfflineAutoUpdate. It is nil when offline auto-updates are + // not running. + // + //lint:ignore U1000 only used in Linux and Windows builds in autoupdate.go + offlineAutoUpdateCancel func() +} + +func (e *extension) Name() string { return "clientupdate" } + +func (e *extension) Init(h ipnext.Host) error { + + h.Hooks().ProfileStateChange.Add(e.onChangeProfile) + h.Hooks().BackendStateChange.Add(e.onBackendStateChange) + + // TODO(nickkhyl): remove this after the profileManager refactoring. + // See tailscale/tailscale#15974. + // This same workaround appears in feature/portlist/portlist.go. + profile, prefs := h.Profiles().CurrentProfileState() + e.onChangeProfile(profile, prefs, false) + + return nil +} + +func (e *extension) Shutdown() error { + e.stopOfflineAutoUpdate() + return nil +} + +func (e *extension) onBackendStateChange(newState ipn.State) { + e.mu.Lock() + defer e.mu.Unlock() + e.state = newState + e.updateOfflineAutoUpdateLocked() +} + +func (e *extension) onChangeProfile(profile ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { + e.mu.Lock() + defer e.mu.Unlock() + e.prefs = prefs + e.updateOfflineAutoUpdateLocked() +} + +func (e *extension) updateOfflineAutoUpdateLocked() { + want := e.prefs.Valid() && e.prefs.AutoUpdate().Apply.EqualBool(true) && + e.state != ipn.Running && e.state != ipn.Starting + + cur := e.offlineAutoUpdateCancel != nil + + if want && !cur { + e.maybeStartOfflineAutoUpdateLocked(e.prefs) + } else if !want && cur { + e.stopOfflineAutoUpdateLocked() + } +} + +type updateStatus struct { + started bool +} + +func (e *extension) clearSelfUpdateProgress() { + e.mu.Lock() + defer e.mu.Unlock() + e.selfUpdateProgress = make([]ipnstate.UpdateProgress, 0) + e.lastSelfUpdateState = ipnstate.UpdateFinished +} + +func (e *extension) GetSelfUpdateProgress() []ipnstate.UpdateProgress { + e.mu.Lock() + defer e.mu.Unlock() + res := make([]ipnstate.UpdateProgress, len(e.selfUpdateProgress)) + copy(res, e.selfUpdateProgress) + return res +} + +func (e *extension) DoSelfUpdate() { + e.mu.Lock() + updateState := e.lastSelfUpdateState + e.mu.Unlock() + // don't start an update if one is already in progress + if updateState == ipnstate.UpdateInProgress { + return + } + e.clearSelfUpdateProgress() + e.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateInProgress, "")) + up, err := clientupdate.NewUpdater(clientupdate.Arguments{ + Logf: func(format string, args ...any) { + e.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateInProgress, fmt.Sprintf(format, args...))) + }, + }) + if err != nil { + e.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFailed, err.Error())) + } + err = up.Update() + if err != nil { + e.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFailed, err.Error())) + } else { + e.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFinished, "tailscaled did not restart; please restart Tailscale manually.")) + } +} + +// serveUpdateInstall sends a request to the LocalBackend to start a Tailscale +// self-update. A successful response does not indicate whether the update +// succeeded, only that the request was accepted. Clients should use +// serveUpdateProgress after pinging this endpoint to check how the update is +// going. +func serveUpdateInstall(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.POST { + http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) + return + } + + b := h.LocalBackend() + ext, ok := ipnlocal.GetExt[*extension](b) + if !ok { + http.Error(w, "clientupdate extension not found", http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusAccepted) + + go ext.DoSelfUpdate() +} + +// serveUpdateProgress returns the status of an in-progress Tailscale self-update. +// This is provided as a slice of ipnstate.UpdateProgress structs with various +// log messages in order from oldest to newest. If an update is not in progress, +// the returned slice will be empty. +func serveUpdateProgress(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.GET { + http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) + return + } + + b := h.LocalBackend() + ext, ok := ipnlocal.GetExt[*extension](b) + if !ok { + http.Error(w, "clientupdate extension not found", http.StatusInternalServerError) + return + } + + ups := ext.GetSelfUpdateProgress() + + json.NewEncoder(w).Encode(ups) +} + +func (e *extension) pushSelfUpdateProgress(up ipnstate.UpdateProgress) { + e.mu.Lock() + defer e.mu.Unlock() + e.selfUpdateProgress = append(e.selfUpdateProgress, up) + e.lastSelfUpdateState = up.Status +} + +func handleC2NUpdateGet(b *ipnlocal.LocalBackend, w http.ResponseWriter, r *http.Request) { + e, ok := ipnlocal.GetExt[*extension](b) + if !ok { + http.Error(w, "clientupdate extension not found", http.StatusInternalServerError) + return + } + + e.logf("c2n: GET /update received") + + res := e.newC2NUpdateResponse() + res.Started = e.c2nUpdateStarted() + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} + +func handleC2NUpdatePost(b *ipnlocal.LocalBackend, w http.ResponseWriter, r *http.Request) { + e, ok := ipnlocal.GetExt[*extension](b) + if !ok { + http.Error(w, "clientupdate extension not found", http.StatusInternalServerError) + return + } + e.logf("c2n: POST /update received") + res := e.newC2NUpdateResponse() + defer func() { + if res.Err != "" { + e.logf("c2n: POST /update failed: %s", res.Err) + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) + }() + + if !res.Enabled { + res.Err = "not enabled" + return + } + if !res.Supported { + res.Err = "not supported" + return + } + + // Do not update if we have active inbound SSH connections. Control can set + // force=true query parameter to override this. + if r.FormValue("force") != "true" && b.ActiveSSHConns() > 0 { + res.Err = "not updating due to active SSH connections" + return + } + + if err := e.startAutoUpdate("c2n"); err != nil { + res.Err = err.Error() + return + } + res.Started = true +} + +func (e *extension) newC2NUpdateResponse() tailcfg.C2NUpdateResponse { + e.mu.Lock() + defer e.mu.Unlock() + + // If NewUpdater does not return an error, we can update the installation. + // + // Note that we create the Updater solely to check for errors; we do not + // invoke it here. For this purpose, it is ok to pass it a zero Arguments. + var upPref ipn.AutoUpdatePrefs + if e.prefs.Valid() { + upPref = e.prefs.AutoUpdate() + } + return tailcfg.C2NUpdateResponse{ + Enabled: envknob.AllowsRemoteUpdate() || upPref.Apply.EqualBool(true), + Supported: feature.CanAutoUpdate() && !version.IsMacSysExt(), + } +} + +func (e *extension) c2nUpdateStarted() bool { + e.mu.Lock() + defer e.mu.Unlock() + return e.c2nUpdateStatus.started +} + +func (e *extension) setC2NUpdateStarted(v bool) { + e.mu.Lock() + defer e.mu.Unlock() + e.c2nUpdateStatus.started = v +} + +func (e *extension) trySetC2NUpdateStarted() bool { + e.mu.Lock() + defer e.mu.Unlock() + if e.c2nUpdateStatus.started { + return false + } + e.c2nUpdateStatus.started = true + return true +} + +// findCmdTailscale looks for the cmd/tailscale that corresponds to the +// currently running cmd/tailscaled. It's up to the caller to verify that the +// two match, but this function does its best to find the right one. Notably, it +// doesn't use $PATH for security reasons. +func findCmdTailscale() (string, error) { + self, err := os.Executable() + if err != nil { + return "", err + } + var ts string + switch runtime.GOOS { + case "linux": + if self == "/usr/sbin/tailscaled" || self == "/usr/bin/tailscaled" { + ts = "/usr/bin/tailscale" + } + if self == "/usr/local/sbin/tailscaled" || self == "/usr/local/bin/tailscaled" { + ts = "/usr/local/bin/tailscale" + } + switch distro.Get() { + case distro.QNAP: + // The volume under /share/ where qpkg are installed is not + // predictable. But the rest of the path is. + ok, err := filepath.Match("/share/*/.qpkg/Tailscale/tailscaled", self) + if err == nil && ok { + ts = filepath.Join(filepath.Dir(self), "tailscale") + } + case distro.Unraid: + if self == "/usr/local/emhttp/plugins/tailscale/bin/tailscaled" { + ts = "/usr/local/emhttp/plugins/tailscale/bin/tailscale" + } + } + case "windows": + ts = filepath.Join(filepath.Dir(self), "tailscale.exe") + case "freebsd", "openbsd": + if self == "/usr/local/bin/tailscaled" { + ts = "/usr/local/bin/tailscale" + } + default: + return "", fmt.Errorf("unsupported OS %v", runtime.GOOS) + } + if ts != "" && regularFileExists(ts) { + return ts, nil + } + return "", errors.New("tailscale executable not found in expected place") +} + +func tailscaleUpdateCmd(cmdTS string) *exec.Cmd { + defaultCmd := exec.Command(cmdTS, "update", "--yes") + if runtime.GOOS != "linux" { + return defaultCmd + } + if _, err := exec.LookPath("systemd-run"); err != nil { + return defaultCmd + } + + // When systemd-run is available, use it to run the update command. This + // creates a new temporary unit separate from the tailscaled unit. When + // tailscaled is restarted during the update, systemd won't kill this + // temporary update unit, which could cause unexpected breakage. + // + // We want to use a few optional flags: + // * --wait, to block the update command until completion (added in systemd 232) + // * --pipe, to collect stdout/stderr (added in systemd 235) + // * --collect, to clean up failed runs from memory (added in systemd 236) + // + // We need to check the version of systemd to figure out if those flags are + // available. + // + // The output will look like: + // + // systemd 255 (255.7-1-arch) + // +PAM +AUDIT ... other feature flags ... + systemdVerOut, err := exec.Command("systemd-run", "--version").Output() + if err != nil { + return defaultCmd + } + parts := strings.Fields(string(systemdVerOut)) + if len(parts) < 2 || parts[0] != "systemd" { + return defaultCmd + } + systemdVer, err := strconv.Atoi(parts[1]) + if err != nil { + return defaultCmd + } + if systemdVer >= 236 { + return exec.Command("systemd-run", "--wait", "--pipe", "--collect", cmdTS, "update", "--yes") + } else if systemdVer >= 235 { + return exec.Command("systemd-run", "--wait", "--pipe", cmdTS, "update", "--yes") + } else if systemdVer >= 232 { + return exec.Command("systemd-run", "--wait", cmdTS, "update", "--yes") + } else { + return exec.Command("systemd-run", cmdTS, "update", "--yes") + } +} + +func regularFileExists(path string) bool { + fi, err := os.Stat(path) + return err == nil && fi.Mode().IsRegular() +} + +// startAutoUpdate triggers an auto-update attempt. The actual update happens +// asynchronously. If another update is in progress, an error is returned. +func (e *extension) startAutoUpdate(logPrefix string) (retErr error) { + // Check if update was already started, and mark as started. + if !e.trySetC2NUpdateStarted() { + return errors.New("update already started") + } + defer func() { + // Clear the started flag if something failed. + if retErr != nil { + e.setC2NUpdateStarted(false) + } + }() + + cmdTS, err := findCmdTailscale() + if err != nil { + return fmt.Errorf("failed to find cmd/tailscale binary: %w", err) + } + var ver struct { + Long string `json:"long"` + } + out, err := exec.Command(cmdTS, "version", "--json").Output() + if err != nil { + return fmt.Errorf("failed to find cmd/tailscale binary: %w", err) + } + if err := json.Unmarshal(out, &ver); err != nil { + return fmt.Errorf("invalid JSON from cmd/tailscale version --json: %w", err) + } + if ver.Long != version.Long() { + return fmt.Errorf("cmd/tailscale version %q does not match tailscaled version %q", ver.Long, version.Long()) + } + + cmd := tailscaleUpdateCmd(cmdTS) + buf := new(bytes.Buffer) + cmd.Stdout = buf + cmd.Stderr = buf + e.logf("%s: running %q", logPrefix, strings.Join(cmd.Args, " ")) + if err := cmd.Start(); err != nil { + return fmt.Errorf("failed to start cmd/tailscale update: %w", err) + } + + go func() { + if err := cmd.Wait(); err != nil { + e.logf("%s: update command failed: %v, output: %s", logPrefix, err, buf) + } else { + e.logf("%s: update attempt complete", logPrefix) + } + e.setC2NUpdateStarted(false) + }() + return nil +} + +func (e *extension) stopOfflineAutoUpdate() { + e.mu.Lock() + defer e.mu.Unlock() + e.stopOfflineAutoUpdateLocked() +} + +func (e *extension) stopOfflineAutoUpdateLocked() { + if e.offlineAutoUpdateCancel == nil { + return + } + e.logf("offline auto-update: stopping update checks") + e.offlineAutoUpdateCancel() + e.offlineAutoUpdateCancel = nil +} + +// e.mu must be held +func (e *extension) maybeStartOfflineAutoUpdateLocked(prefs ipn.PrefsView) { + if !prefs.Valid() || !prefs.AutoUpdate().Apply.EqualBool(true) { + return + } + // AutoUpdate.Apply field in prefs can only be true for platforms that + // support auto-updates. But check it here again, just in case. + if !feature.CanAutoUpdate() { + return + } + // On macsys, auto-updates are managed by Sparkle. + if version.IsMacSysExt() { + return + } + + if e.offlineAutoUpdateCancel != nil { + // Already running. + return + } + ctx, cancel := context.WithCancel(context.Background()) + e.offlineAutoUpdateCancel = cancel + + e.logf("offline auto-update: starting update checks") + go e.offlineAutoUpdate(ctx) +} + +const offlineAutoUpdateCheckPeriod = time.Hour + +func (e *extension) offlineAutoUpdate(ctx context.Context) { + t := time.NewTicker(offlineAutoUpdateCheckPeriod) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return + case <-t.C: + } + if err := e.startAutoUpdate("offline auto-update"); err != nil { + e.logf("offline auto-update: failed: %v", err) + } + } +} diff --git a/feature/condlite/expvar/expvar.go b/feature/condlite/expvar/expvar.go new file mode 100644 index 0000000000000..edc16ac771b13 --- /dev/null +++ b/feature/condlite/expvar/expvar.go @@ -0,0 +1,12 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !(ts_omit_debug && ts_omit_clientmetrics && ts_omit_usermetrics) + +// Package expvar contains type aliases for expvar types, to allow conditionally +// excluding the package from builds. +package expvar + +import "expvar" + +type Int = expvar.Int diff --git a/feature/condlite/expvar/omit.go b/feature/condlite/expvar/omit.go new file mode 100644 index 0000000000000..a21d94deb48eb --- /dev/null +++ b/feature/condlite/expvar/omit.go @@ -0,0 +1,11 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_debug && ts_omit_clientmetrics && ts_omit_usermetrics + +// excluding the package from builds. +package expvar + +type Int int64 + +func (*Int) Add(int64) {} diff --git a/feature/condregister/condregister.go b/feature/condregister/condregister.go index f9025095147f1..654483d1d7745 100644 --- a/feature/condregister/condregister.go +++ b/feature/condregister/condregister.go @@ -5,3 +5,14 @@ // by build tags. It is one central package that callers can empty import // to ensure all conditional features are registered. package condregister + +import ( + // Portmapper is special in that the CLI also needs to link it in, + // so it's pulled out into its own package, rather than using a maybe_*.go + // file in condregister. + _ "tailscale.com/feature/condregister/portmapper" + + // HTTP proxy support is also needed by the CLI, and tsnet, so it's its + // own package too. + _ "tailscale.com/feature/condregister/useproxy" +) diff --git a/feature/condregister/identityfederation/doc.go b/feature/condregister/identityfederation/doc.go new file mode 100644 index 0000000000000..503b2c8f127d5 --- /dev/null +++ b/feature/condregister/identityfederation/doc.go @@ -0,0 +1,7 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package identityfederation registers support for authkey resolution +// via identity federation if it's not disabled by the +// ts_omit_identityfederation build tag. +package identityfederation diff --git a/feature/condregister/identityfederation/maybe_identityfederation.go b/feature/condregister/identityfederation/maybe_identityfederation.go new file mode 100644 index 0000000000000..b1db42fc3c77a --- /dev/null +++ b/feature/condregister/identityfederation/maybe_identityfederation.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_identityfederation + +package identityfederation + +import _ "tailscale.com/feature/identityfederation" diff --git a/feature/condregister/maybe_ace.go b/feature/condregister/maybe_ace.go new file mode 100644 index 0000000000000..07023171144a5 --- /dev/null +++ b/feature/condregister/maybe_ace.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_ace + +package condregister + +import _ "tailscale.com/feature/ace" diff --git a/feature/condregister/maybe_appconnectors.go b/feature/condregister/maybe_appconnectors.go new file mode 100644 index 0000000000000..70112d7810b10 --- /dev/null +++ b/feature/condregister/maybe_appconnectors.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_appconnectors + +package condregister + +import _ "tailscale.com/feature/appconnectors" diff --git a/feature/condregister/maybe_c2n.go b/feature/condregister/maybe_c2n.go new file mode 100644 index 0000000000000..c222af533a37d --- /dev/null +++ b/feature/condregister/maybe_c2n.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_c2n + +package condregister + +import _ "tailscale.com/feature/c2n" diff --git a/feature/condregister/maybe_clientupdate.go b/feature/condregister/maybe_clientupdate.go new file mode 100644 index 0000000000000..bc694f970c543 --- /dev/null +++ b/feature/condregister/maybe_clientupdate.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_clientupdate + +package condregister + +import _ "tailscale.com/feature/clientupdate" diff --git a/feature/condregister/maybe_debugportmapper.go b/feature/condregister/maybe_debugportmapper.go new file mode 100644 index 0000000000000..4990d09ea5833 --- /dev/null +++ b/feature/condregister/maybe_debugportmapper.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_debugportmapper + +package condregister + +import _ "tailscale.com/feature/debugportmapper" diff --git a/feature/condregister/maybe_doctor.go b/feature/condregister/maybe_doctor.go new file mode 100644 index 0000000000000..3dc9ffa539312 --- /dev/null +++ b/feature/condregister/maybe_doctor.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_doctor + +package condregister + +import _ "tailscale.com/feature/doctor" diff --git a/feature/condregister/maybe_drive.go b/feature/condregister/maybe_drive.go new file mode 100644 index 0000000000000..cb447ff289a29 --- /dev/null +++ b/feature/condregister/maybe_drive.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_drive + +package condregister + +import _ "tailscale.com/feature/drive" diff --git a/feature/condregister/maybe_linkspeed.go b/feature/condregister/maybe_linkspeed.go new file mode 100644 index 0000000000000..46064b39a5935 --- /dev/null +++ b/feature/condregister/maybe_linkspeed.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android && !ts_omit_linkspeed + +package condregister + +import _ "tailscale.com/feature/linkspeed" diff --git a/feature/condregister/maybe_linuxdnsfight.go b/feature/condregister/maybe_linuxdnsfight.go new file mode 100644 index 0000000000000..0dae62b00ab8a --- /dev/null +++ b/feature/condregister/maybe_linuxdnsfight.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android && !ts_omit_linuxdnsfight + +package condregister + +import _ "tailscale.com/feature/linuxdnsfight" diff --git a/feature/condregister/maybe_osrouter.go b/feature/condregister/maybe_osrouter.go new file mode 100644 index 0000000000000..7ab85add22021 --- /dev/null +++ b/feature/condregister/maybe_osrouter.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_osrouter + +package condregister + +import _ "tailscale.com/wgengine/router/osrouter" diff --git a/feature/condregister/maybe_portlist.go b/feature/condregister/maybe_portlist.go new file mode 100644 index 0000000000000..1be56f177daf8 --- /dev/null +++ b/feature/condregister/maybe_portlist.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_portlist + +package condregister + +import _ "tailscale.com/feature/portlist" diff --git a/feature/condregister/maybe_posture.go b/feature/condregister/maybe_posture.go new file mode 100644 index 0000000000000..6f14c27137127 --- /dev/null +++ b/feature/condregister/maybe_posture.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_posture + +package condregister + +import _ "tailscale.com/feature/posture" diff --git a/feature/condregister/maybe_sdnotify.go b/feature/condregister/maybe_sdnotify.go new file mode 100644 index 0000000000000..647996f881d8f --- /dev/null +++ b/feature/condregister/maybe_sdnotify.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !ts_omit_sdnotify + +package condregister + +import _ "tailscale.com/feature/sdnotify" diff --git a/feature/condregister/maybe_store_aws.go b/feature/condregister/maybe_store_aws.go new file mode 100644 index 0000000000000..8358b49f05843 --- /dev/null +++ b/feature/condregister/maybe_store_aws.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build (ts_aws || (linux && (arm64 || amd64) && !android)) && !ts_omit_aws + +package condregister + +import _ "tailscale.com/ipn/store/awsstore" diff --git a/feature/condregister/maybe_store_kube.go b/feature/condregister/maybe_store_kube.go new file mode 100644 index 0000000000000..bb795b05e2450 --- /dev/null +++ b/feature/condregister/maybe_store_kube.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build (ts_kube || (linux && (arm64 || amd64) && !android)) && !ts_omit_kube + +package condregister + +import _ "tailscale.com/ipn/store/kubestore" diff --git a/feature/condregister/oauthkey/doc.go b/feature/condregister/oauthkey/doc.go new file mode 100644 index 0000000000000..4c4ea5e4e3078 --- /dev/null +++ b/feature/condregister/oauthkey/doc.go @@ -0,0 +1,10 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package oauthkey registers support for OAuth key resolution +// if it's not disabled via the ts_omit_oauthkey build tag. +// Currently (2025-09-19), tailscaled does not need OAuth key +// resolution, only the CLI and tsnet do, so this package is +// pulled out separately to avoid linking OAuth packages into +// tailscaled. +package oauthkey diff --git a/feature/condregister/oauthkey/maybe_oauthkey.go b/feature/condregister/oauthkey/maybe_oauthkey.go new file mode 100644 index 0000000000000..be8d04b8ec035 --- /dev/null +++ b/feature/condregister/oauthkey/maybe_oauthkey.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_oauthkey + +package oauthkey + +import _ "tailscale.com/feature/oauthkey" diff --git a/feature/condregister/portmapper/doc.go b/feature/condregister/portmapper/doc.go new file mode 100644 index 0000000000000..5c30538c43a11 --- /dev/null +++ b/feature/condregister/portmapper/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package portmapper registers support for portmapper +// if it's not disabled via the ts_omit_portmapper build tag. +package portmapper diff --git a/feature/condregister/portmapper/maybe_portmapper.go b/feature/condregister/portmapper/maybe_portmapper.go new file mode 100644 index 0000000000000..c306fd3d5a1f0 --- /dev/null +++ b/feature/condregister/portmapper/maybe_portmapper.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_portmapper + +package portmapper + +import _ "tailscale.com/feature/portmapper" diff --git a/feature/condregister/useproxy/doc.go b/feature/condregister/useproxy/doc.go new file mode 100644 index 0000000000000..1e8abb358fa83 --- /dev/null +++ b/feature/condregister/useproxy/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package useproxy registers support for using proxies +// if it's not disabled via the ts_omit_useproxy build tag. +package useproxy diff --git a/feature/condregister/useproxy/useproxy.go b/feature/condregister/useproxy/useproxy.go new file mode 100644 index 0000000000000..bda6e49c0bb95 --- /dev/null +++ b/feature/condregister/useproxy/useproxy.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_useproxy + +package useproxy + +import _ "tailscale.com/feature/useproxy" diff --git a/feature/debugportmapper/debugportmapper.go b/feature/debugportmapper/debugportmapper.go new file mode 100644 index 0000000000000..2625086c64dcf --- /dev/null +++ b/feature/debugportmapper/debugportmapper.go @@ -0,0 +1,204 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package debugportmapper registers support for debugging Tailscale's +// portmapping support. +package debugportmapper + +import ( + "context" + "fmt" + "net" + "net/http" + "net/netip" + "strconv" + "strings" + "sync" + "time" + + "tailscale.com/ipn/localapi" + "tailscale.com/net/netmon" + "tailscale.com/net/portmapper" + "tailscale.com/types/logger" + "tailscale.com/util/eventbus" +) + +func init() { + localapi.Register("debug-portmap", serveDebugPortmap) +} + +func serveDebugPortmap(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + w.Header().Set("Content-Type", "text/plain") + + dur, err := time.ParseDuration(r.FormValue("duration")) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + gwSelf := r.FormValue("gateway_and_self") + + trueFunc := func() bool { return true } + // Update portmapper debug flags + debugKnobs := &portmapper.DebugKnobs{VerboseLogs: true} + switch r.FormValue("type") { + case "": + case "pmp": + debugKnobs.DisablePCPFunc = trueFunc + debugKnobs.DisableUPnPFunc = trueFunc + case "pcp": + debugKnobs.DisablePMPFunc = trueFunc + debugKnobs.DisableUPnPFunc = trueFunc + case "upnp": + debugKnobs.DisablePCPFunc = trueFunc + debugKnobs.DisablePMPFunc = trueFunc + default: + http.Error(w, "unknown portmap debug type", http.StatusBadRequest) + return + } + if k := h.LocalBackend().ControlKnobs(); k != nil { + if k.DisableUPnP.Load() { + debugKnobs.DisableUPnPFunc = trueFunc + } + } + + if defBool(r.FormValue("log_http"), false) { + debugKnobs.LogHTTP = true + } + + var ( + logLock sync.Mutex + handlerDone bool + ) + logf := func(format string, args ...any) { + if !strings.HasSuffix(format, "\n") { + format = format + "\n" + } + + logLock.Lock() + defer logLock.Unlock() + + // The portmapper can call this log function after the HTTP + // handler returns, which is not allowed and can cause a panic. + // If this happens, ignore the log lines since this typically + // occurs due to a client disconnect. + if handlerDone { + return + } + + // Write and flush each line to the client so that output is streamed + fmt.Fprintf(w, format, args...) + if f, ok := w.(http.Flusher); ok { + f.Flush() + } + } + defer func() { + logLock.Lock() + handlerDone = true + logLock.Unlock() + }() + + ctx, cancel := context.WithTimeout(r.Context(), dur) + defer cancel() + + done := make(chan bool, 1) + + var c *portmapper.Client + c = portmapper.NewClient(portmapper.Config{ + Logf: logger.WithPrefix(logf, "portmapper: "), + NetMon: h.LocalBackend().NetMon(), + DebugKnobs: debugKnobs, + EventBus: h.LocalBackend().EventBus(), + OnChange: func() { + logf("portmapping changed.") + logf("have mapping: %v", c.HaveMapping()) + + if ext, ok := c.GetCachedMappingOrStartCreatingOne(); ok { + logf("cb: mapping: %v", ext) + select { + case done <- true: + default: + } + return + } + logf("cb: no mapping") + }, + }) + defer c.Close() + + bus := eventbus.New() + defer bus.Close() + netMon, err := netmon.New(bus, logger.WithPrefix(logf, "monitor: ")) + if err != nil { + logf("error creating monitor: %v", err) + return + } + + gatewayAndSelfIP := func() (gw, self netip.Addr, ok bool) { + if a, b, ok := strings.Cut(gwSelf, "/"); ok { + gw = netip.MustParseAddr(a) + self = netip.MustParseAddr(b) + return gw, self, true + } + return netMon.GatewayAndSelfIP() + } + + c.SetGatewayLookupFunc(gatewayAndSelfIP) + + gw, selfIP, ok := gatewayAndSelfIP() + if !ok { + logf("no gateway or self IP; %v", netMon.InterfaceState()) + return + } + logf("gw=%v; self=%v", gw, selfIP) + + uc, err := net.ListenPacket("udp", "0.0.0.0:0") + if err != nil { + return + } + defer uc.Close() + c.SetLocalPort(uint16(uc.LocalAddr().(*net.UDPAddr).Port)) + + res, err := c.Probe(ctx) + if err != nil { + logf("error in Probe: %v", err) + return + } + logf("Probe: %+v", res) + + if !res.PCP && !res.PMP && !res.UPnP { + logf("no portmapping services available") + return + } + + if ext, ok := c.GetCachedMappingOrStartCreatingOne(); ok { + logf("mapping: %v", ext) + } else { + logf("no mapping") + } + + select { + case <-done: + case <-ctx.Done(): + if r.Context().Err() == nil { + logf("serveDebugPortmap: context done: %v", ctx.Err()) + } else { + h.Logf("serveDebugPortmap: context done: %v", ctx.Err()) + } + } +} + +func defBool(a string, def bool) bool { + if a == "" { + return def + } + v, err := strconv.ParseBool(a) + if err != nil { + return def + } + return v +} diff --git a/feature/doctor/doctor.go b/feature/doctor/doctor.go new file mode 100644 index 0000000000000..875b57d14c4f0 --- /dev/null +++ b/feature/doctor/doctor.go @@ -0,0 +1,95 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// The doctor package registers the "doctor" problem diagnosis support into the +// rest of Tailscale. +package doctor + +import ( + "context" + "fmt" + "html" + "net/http" + "time" + + "tailscale.com/doctor" + "tailscale.com/doctor/ethtool" + "tailscale.com/doctor/permissions" + "tailscale.com/doctor/routetable" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/net/tsaddr" + "tailscale.com/types/logger" +) + +func init() { + ipnlocal.HookDoctor.Set(visitDoctor) + ipnlocal.RegisterPeerAPIHandler("/v0/doctor", handleServeDoctor) +} + +func handleServeDoctor(h ipnlocal.PeerAPIHandler, w http.ResponseWriter, r *http.Request) { + if !h.CanDebug() { + http.Error(w, "denied; no debug access", http.StatusForbidden) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + fmt.Fprintln(w, "

Doctor Output

") + + fmt.Fprintln(w, "
")
+
+	b := h.LocalBackend()
+	visitDoctor(r.Context(), b, func(format string, args ...any) {
+		line := fmt.Sprintf(format, args...)
+		fmt.Fprintln(w, html.EscapeString(line))
+	})
+
+	fmt.Fprintln(w, "
") +} + +func visitDoctor(ctx context.Context, b *ipnlocal.LocalBackend, logf logger.Logf) { + // We can write logs too fast for logtail to handle, even when + // opting-out of rate limits. Limit ourselves to at most one message + // per 20ms and a burst of 60 log lines, which should be fast enough to + // not block for too long but slow enough that we can upload all lines. + logf = logger.SlowLoggerWithClock(ctx, logf, 20*time.Millisecond, 60, b.Clock().Now) + + var checks []doctor.Check + checks = append(checks, + permissions.Check{}, + routetable.Check{}, + ethtool.Check{}, + ) + + // Print a log message if any of the global DNS resolvers are Tailscale + // IPs; this can interfere with our ability to connect to the Tailscale + // controlplane. + checks = append(checks, doctor.CheckFunc("dns-resolvers", func(_ context.Context, logf logger.Logf) error { + nm := b.NetMap() + if nm == nil { + return nil + } + + for i, resolver := range nm.DNS.Resolvers { + ipp, ok := resolver.IPPort() + if ok && tsaddr.IsTailscaleIP(ipp.Addr()) { + logf("resolver %d is a Tailscale address: %v", i, resolver) + } + } + for i, resolver := range nm.DNS.FallbackResolvers { + ipp, ok := resolver.IPPort() + if ok && tsaddr.IsTailscaleIP(ipp.Addr()) { + logf("fallback resolver %d is a Tailscale address: %v", i, resolver) + } + } + return nil + })) + + // TODO(andrew): more + + numChecks := len(checks) + checks = append(checks, doctor.CheckFunc("numchecks", func(_ context.Context, log logger.Logf) error { + log("%d checks", numChecks) + return nil + })) + + doctor.RunChecks(ctx, logf, checks...) +} diff --git a/feature/drive/drive.go b/feature/drive/drive.go new file mode 100644 index 0000000000000..3660a2b959643 --- /dev/null +++ b/feature/drive/drive.go @@ -0,0 +1,5 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package drive registers the Taildrive (file server) feature. +package drive diff --git a/feature/feature.go b/feature/feature.go index 5976d7f5a5d0d..0d383b398ab60 100644 --- a/feature/feature.go +++ b/feature/feature.go @@ -4,7 +4,12 @@ // Package feature tracks which features are linked into the binary. package feature -import "reflect" +import ( + "errors" + "reflect" +) + +var ErrUnavailable = errors.New("feature not included in this build") var in = map[string]bool{} @@ -45,7 +50,8 @@ func (h *Hook[Func]) Set(f Func) { } // Get returns the hook function, or panics if it hasn't been set. -// Use IsSet to check if it's been set. +// Use IsSet to check if it's been set, or use GetOrNil if you're +// okay with a nil return value. func (h *Hook[Func]) Get() Func { if !h.ok { panic("Get on unset feature hook, without IsSet") @@ -59,6 +65,11 @@ func (h *Hook[Func]) GetOk() (f Func, ok bool) { return h.f, h.ok } +// GetOrNil returns the hook function or nil if it hasn't been set. +func (h *Hook[Func]) GetOrNil() Func { + return h.f +} + // Hooks is a slice of funcs. // // As opposed to a single Hook, this is meant to be used when diff --git a/feature/featuretags/featuretags.go b/feature/featuretags/featuretags.go new file mode 100644 index 0000000000000..c93e8b15b1001 --- /dev/null +++ b/feature/featuretags/featuretags.go @@ -0,0 +1,287 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// The featuretags package is a registry of all the ts_omit-able build tags. +package featuretags + +import "tailscale.com/util/set" + +// CLI is a special feature in the [Features] map that works opposite +// from the others: it is opt-in, rather than opt-out, having a different +// build tag format. +const CLI FeatureTag = "cli" + +// FeatureTag names a Tailscale feature that can be selectively added or removed +// via build tags. +type FeatureTag string + +// IsOmittable reports whether this feature tag is one that can be +// omitted via a ts_omit_ build tag. +func (ft FeatureTag) IsOmittable() bool { + switch ft { + case CLI: + return false + } + return true +} + +// OmitTag returns the ts_omit_ build tag for this feature tag. +// It panics if the feature tag is not omitable. +func (ft FeatureTag) OmitTag() string { + if !ft.IsOmittable() { + panic("not omitable: " + string(ft)) + } + return "ts_omit_" + string(ft) +} + +// Requires returns the set of features that must be included to +// use the given feature, including the provided feature itself. +func Requires(ft FeatureTag) set.Set[FeatureTag] { + s := set.Set[FeatureTag]{} + var add func(FeatureTag) + add = func(ft FeatureTag) { + s.Add(ft) + for _, dep := range Features[ft].Deps { + add(dep) + } + } + add(ft) + return s +} + +// RequiredBy is the inverse of Requires: it returns the set of features that +// depend on the given feature (directly or indirectly), including the feature +// itself. +func RequiredBy(ft FeatureTag) set.Set[FeatureTag] { + s := set.Set[FeatureTag]{} + for f := range Features { + if featureDependsOn(f, ft) { + s.Add(f) + } + } + return s +} + +// featureDependsOn reports whether feature a (directly or indirectly) depends on b. +// It returns true if a == b. +func featureDependsOn(a, b FeatureTag) bool { + if a == b { + return true + } + for _, dep := range Features[a].Deps { + if featureDependsOn(dep, b) { + return true + } + } + return false +} + +// FeatureMeta describes a modular feature that can be conditionally linked into +// the binary. +type FeatureMeta struct { + Sym string // exported Go symbol for boolean const + Desc string // human-readable description + Deps []FeatureTag // other features this feature requires + + // ImplementationDetail is whether the feature is an internal implementation + // detail. That is, it's not something a user wuold care about having or not + // having, but we'd like to able to omit from builds if no other + // user-visible features depend on it. + ImplementationDetail bool +} + +// Features are the known Tailscale features that can be selectively included or +// excluded via build tags, and a description of each. +var Features = map[FeatureTag]FeatureMeta{ + "ace": {Sym: "ACE", Desc: "Alternate Connectivity Endpoints"}, + "acme": {Sym: "ACME", Desc: "ACME TLS certificate management"}, + "appconnectors": {Sym: "AppConnectors", Desc: "App Connectors support"}, + "aws": {Sym: "AWS", Desc: "AWS integration"}, + "advertiseexitnode": { + Sym: "AdvertiseExitNode", + Desc: "Run an exit node", + Deps: []FeatureTag{ + "peerapiserver", // to run the ExitDNS server + "advertiseroutes", + }, + }, + "advertiseroutes": { + Sym: "AdvertiseRoutes", + Desc: "Advertise routes for other nodes to use", + Deps: []FeatureTag{ + "c2n", // for control plane to probe health for HA subnet router leader election + }, + }, + "bakedroots": {Sym: "BakedRoots", Desc: "Embed CA (LetsEncrypt) x509 roots to use as fallback"}, + "bird": { + Sym: "Bird", + Desc: "Bird BGP integration", + Deps: []FeatureTag{"advertiseroutes"}, + }, + "c2n": { + Sym: "C2N", + Desc: "Control-to-node (C2N) support", + ImplementationDetail: true, + }, + "captiveportal": {Sym: "CaptivePortal", Desc: "Captive portal detection"}, + "capture": {Sym: "Capture", Desc: "Packet capture"}, + "cli": {Sym: "CLI", Desc: "embed the CLI into the tailscaled binary"}, + "cliconndiag": {Sym: "CLIConnDiag", Desc: "CLI connection error diagnostics"}, + "clientmetrics": {Sym: "ClientMetrics", Desc: "Client metrics support"}, + "clientupdate": { + Sym: "ClientUpdate", + Desc: "Client auto-update support", + Deps: []FeatureTag{"c2n"}, + }, + "completion": {Sym: "Completion", Desc: "CLI shell completion"}, + "cloud": {Sym: "Cloud", Desc: "detect cloud environment to learn instances IPs and DNS servers"}, + "dbus": { + Sym: "DBus", + Desc: "Linux DBus support", + ImplementationDetail: true, + }, + "debug": {Sym: "Debug", Desc: "various debug support, for things that don't have or need their own more specific feature"}, + "debugeventbus": {Sym: "DebugEventBus", Desc: "eventbus debug support"}, + "debugportmapper": { + Sym: "DebugPortMapper", + Desc: "portmapper debug support", + Deps: []FeatureTag{"portmapper"}, + }, + "desktop_sessions": {Sym: "DesktopSessions", Desc: "Desktop sessions support"}, + "doctor": {Sym: "Doctor", Desc: "Diagnose possible issues with Tailscale and its host environment"}, + "drive": {Sym: "Drive", Desc: "Tailscale Drive (file server) support"}, + "gro": { + Sym: "GRO", + Desc: "Generic Receive Offload support (performance)", + Deps: []FeatureTag{"netstack"}, + }, + "health": {Sym: "Health", Desc: "Health checking support"}, + "hujsonconf": {Sym: "HuJSONConf", Desc: "HuJSON config file support"}, + "identityfederation": {Sym: "IdentityFederation", Desc: "Auth key generation via identity federation support"}, + "iptables": {Sym: "IPTables", Desc: "Linux iptables support"}, + "kube": {Sym: "Kube", Desc: "Kubernetes integration"}, + "lazywg": {Sym: "LazyWG", Desc: "Lazy WireGuard configuration for memory-constrained devices with large netmaps"}, + "linuxdnsfight": {Sym: "LinuxDNSFight", Desc: "Linux support for detecting DNS fights (inotify watching of /etc/resolv.conf)"}, + "linkspeed": { + Sym: "LinkSpeed", + Desc: "Set link speed on TUN device for better OS integration (Linux only)", + }, + "listenrawdisco": { + Sym: "ListenRawDisco", + Desc: "Use raw sockets for more robust disco (NAT traversal) message receiving (Linux only)", + }, + "logtail": { + Sym: "LogTail", + Desc: "upload logs to log.tailscale.com (debug logs for bug reports and also by network flow logs if enabled)", + }, + "oauthkey": {Sym: "OAuthKey", Desc: "OAuth secret-to-authkey resolution support"}, + "outboundproxy": { + Sym: "OutboundProxy", + Desc: "Support running an outbound localhost HTTP/SOCK5 proxy support that sends traffic over Tailscale", + Deps: []FeatureTag{"netstack"}, + }, + "osrouter": { + Sym: "OSRouter", + Desc: "Configure the operating system's network stack, IPs, and routing tables", + // TODO(bradfitz): if this is omitted, and netstack is too, then tailscaled needs + // external config to be useful. Some people may want that, and we should support it, + // but it's rare. Maybe there should be a way to declare here that this "Provides" + // another feature (and netstack can too), and then if those required features provided + // by some other feature are missing, then it's an error by default unless you accept + // that it's okay to proceed without that meta feature. + }, + "peerapiclient": { + Sym: "PeerAPIClient", + Desc: "PeerAPI client support", + ImplementationDetail: true, + }, + "peerapiserver": { + Sym: "PeerAPIServer", + Desc: "PeerAPI server support", + ImplementationDetail: true, + }, + "portlist": {Sym: "PortList", Desc: "Optionally advertise listening service ports"}, + "portmapper": {Sym: "PortMapper", Desc: "NAT-PMP/PCP/UPnP port mapping support"}, + "posture": {Sym: "Posture", Desc: "Device posture checking support"}, + "dns": { + Sym: "DNS", + Desc: "MagicDNS and system DNS configuration support", + }, + "netlog": { + Sym: "NetLog", + Desc: "Network flow logging support", + Deps: []FeatureTag{"logtail"}, + }, + "netstack": {Sym: "Netstack", Desc: "gVisor netstack (userspace networking) support"}, + "networkmanager": { + Sym: "NetworkManager", + Desc: "Linux NetworkManager integration", + Deps: []FeatureTag{"dbus"}, + }, + "relayserver": {Sym: "RelayServer", Desc: "Relay server"}, + "resolved": { + Sym: "Resolved", + Desc: "Linux systemd-resolved integration", + Deps: []FeatureTag{"dbus"}, + }, + "sdnotify": { + Sym: "SDNotify", + Desc: "systemd notification support", + }, + "serve": { + Sym: "Serve", + Desc: "Serve and Funnel support", + Deps: []FeatureTag{"netstack"}, + }, + "ssh": { + Sym: "SSH", + Desc: "Tailscale SSH support", + Deps: []FeatureTag{"c2n", "dbus", "netstack"}, + }, + "synology": { + Sym: "Synology", + Desc: "Synology NAS integration (applies to Linux builds only)", + }, + "syspolicy": {Sym: "SystemPolicy", Desc: "System policy configuration (MDM) support"}, + "systray": { + Sym: "SysTray", + Desc: "Linux system tray", + Deps: []FeatureTag{"dbus"}, + }, + "taildrop": { + Sym: "Taildrop", + Desc: "Taildrop (file sending) support", + Deps: []FeatureTag{ + "peerapiclient", "peerapiserver", // assume Taildrop is both sides for now + }, + }, + "tailnetlock": {Sym: "TailnetLock", Desc: "Tailnet Lock support"}, + "tap": {Sym: "Tap", Desc: "Experimental Layer 2 (ethernet) support"}, + "tpm": {Sym: "TPM", Desc: "TPM support"}, + "unixsocketidentity": { + Sym: "UnixSocketIdentity", + Desc: "differentiate between users accessing the LocalAPI over unix sockets (if omitted, all users have full access)", + }, + "useroutes": { + Sym: "UseRoutes", + Desc: "Use routes advertised by other nodes", + }, + "useexitnode": { + Sym: "UseExitNode", + Desc: "Use exit nodes", + Deps: []FeatureTag{"peerapiclient", "useroutes"}, + }, + "useproxy": { + Sym: "UseProxy", + Desc: "Support using system proxies as specified by env vars or the system configuration to reach Tailscale servers.", + }, + "usermetrics": { + Sym: "UserMetrics", + Desc: "Usermetrics (documented, stable) metrics support", + }, + "wakeonlan": {Sym: "WakeOnLAN", Desc: "Wake-on-LAN support"}, + "webclient": { + Sym: "WebClient", Desc: "Web client support", + Deps: []FeatureTag{"serve"}, + }, +} diff --git a/feature/featuretags/featuretags_test.go b/feature/featuretags/featuretags_test.go new file mode 100644 index 0000000000000..893ab0e6a1c71 --- /dev/null +++ b/feature/featuretags/featuretags_test.go @@ -0,0 +1,85 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package featuretags + +import ( + "maps" + "slices" + "testing" + + "tailscale.com/util/set" +) + +func TestKnownDeps(t *testing.T) { + for tag, meta := range Features { + for _, dep := range meta.Deps { + if _, ok := Features[dep]; !ok { + t.Errorf("feature %q has unknown dependency %q", tag, dep) + } + } + + // And indirectly check for cycles. If there were a cycle, + // this would infinitely loop. + deps := Requires(tag) + t.Logf("deps of %q: %v", tag, slices.Sorted(maps.Keys(deps))) + } +} + +func TestRequires(t *testing.T) { + var setOf = set.Of[FeatureTag] + tests := []struct { + in FeatureTag + want set.Set[FeatureTag] + }{ + { + in: "drive", + want: setOf("drive"), + }, + { + in: "cli", + want: setOf("cli"), + }, + { + in: "serve", + want: setOf("serve", "netstack"), + }, + { + in: "webclient", + want: setOf("webclient", "serve", "netstack"), + }, + } + for _, tt := range tests { + got := Requires(tt.in) + if !maps.Equal(got, tt.want) { + t.Errorf("DepSet(%q) = %v, want %v", tt.in, got, tt.want) + } + } +} + +func TestRequiredBy(t *testing.T) { + var setOf = set.Of[FeatureTag] + tests := []struct { + in FeatureTag + want set.Set[FeatureTag] + }{ + { + in: "drive", + want: setOf("drive"), + }, + { + in: "webclient", + want: setOf("webclient"), + }, + { + in: "serve", + want: setOf("webclient", "serve"), + }, + } + for _, tt := range tests { + got := RequiredBy(tt.in) + if !maps.Equal(got, tt.want) { + t.Errorf("FeaturesWhichDependOn(%q) = %v, want %v", tt.in, got, tt.want) + } + } +} diff --git a/feature/hooks.go b/feature/hooks.go new file mode 100644 index 0000000000000..a3c6c0395ee81 --- /dev/null +++ b/feature/hooks.go @@ -0,0 +1,73 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package feature + +import ( + "net/http" + "net/url" + + "tailscale.com/types/logger" + "tailscale.com/types/persist" +) + +// HookCanAutoUpdate is a hook for the clientupdate package +// to conditionally initialize. +var HookCanAutoUpdate Hook[func() bool] + +// CanAutoUpdate reports whether the current binary is built with auto-update +// support and, if so, whether the current platform supports it. +func CanAutoUpdate() bool { + if f, ok := HookCanAutoUpdate.GetOk(); ok { + return f() + } + return false +} + +// HookProxyFromEnvironment is a hook for feature/useproxy to register +// a function to use as http.ProxyFromEnvironment. +var HookProxyFromEnvironment Hook[func(*http.Request) (*url.URL, error)] + +// HookProxyInvalidateCache is a hook for feature/useproxy to register +// [tshttpproxy.InvalidateCache]. +var HookProxyInvalidateCache Hook[func()] + +// HookProxyGetAuthHeader is a hook for feature/useproxy to register +// [tshttpproxy.GetAuthHeader]. +var HookProxyGetAuthHeader Hook[func(*url.URL) (string, error)] + +// HookProxySetSelfProxy is a hook for feature/useproxy to register +// [tshttpproxy.SetSelfProxy]. +var HookProxySetSelfProxy Hook[func(...string)] + +// HookProxySetTransportGetProxyConnectHeader is a hook for feature/useproxy to register +// [tshttpproxy.SetTransportGetProxyConnectHeader]. +var HookProxySetTransportGetProxyConnectHeader Hook[func(*http.Transport)] + +// HookTPMAvailable is a hook that reports whether a TPM device is supported +// and available. +var HookTPMAvailable Hook[func() bool] + +var HookGenerateAttestationKeyIfEmpty Hook[func(p *persist.Persist, logf logger.Logf) (bool, error)] + +// TPMAvailable reports whether a TPM device is supported and available. +func TPMAvailable() bool { + if f, ok := HookTPMAvailable.GetOk(); ok { + return f() + } + return false +} + +// HookHardwareAttestationAvailable is a hook that reports whether hardware +// attestation is supported and available. +var HookHardwareAttestationAvailable Hook[func() bool] + +// HardwareAttestationAvailable reports whether hardware attestation is +// supported and available (TPM on Windows/Linux, Secure Enclave on macOS|iOS, +// KeyStore on Android) +func HardwareAttestationAvailable() bool { + if f, ok := HookHardwareAttestationAvailable.GetOk(); ok { + return f() + } + return false +} diff --git a/feature/identityfederation/identityfederation.go b/feature/identityfederation/identityfederation.go new file mode 100644 index 0000000000000..a4470fc27eaea --- /dev/null +++ b/feature/identityfederation/identityfederation.go @@ -0,0 +1,127 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package identityfederation registers support for using ID tokens to +// automatically request authkeys for logging in. +package identityfederation + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/oauth2" + "tailscale.com/feature" + "tailscale.com/internal/client/tailscale" + "tailscale.com/ipn" +) + +func init() { + feature.Register("identityfederation") + tailscale.HookResolveAuthKeyViaWIF.Set(resolveAuthKey) +} + +// resolveAuthKey uses OIDC identity federation to exchange the provided ID token and client ID for an authkey. +func resolveAuthKey(ctx context.Context, baseURL, clientID, idToken string, tags []string) (string, error) { + if clientID == "" { + return "", nil // Short-circuit, no client ID means not using identity federation + } + + if idToken == "" { + return "", errors.New("federated identity authkeys require --id-token") + } + if len(tags) == 0 { + return "", errors.New("federated identity authkeys require --advertise-tags") + } + if baseURL == "" { + baseURL = ipn.DefaultControlURL + } + + ephemeral, preauth, err := parseOptionalAttributes(clientID) + if err != nil { + return "", fmt.Errorf("failed to parse optional config attributes: %w", err) + } + + accessToken, err := exchangeJWTForToken(ctx, baseURL, clientID, idToken) + if err != nil { + return "", fmt.Errorf("failed to exchange JWT for access token: %w", err) + } + if accessToken == "" { + return "", errors.New("received empty access token from Tailscale") + } + + tsClient := tailscale.NewClient("-", tailscale.APIKey(accessToken)) + tsClient.UserAgent = "tailscale-cli-identity-federation" + tsClient.BaseURL = baseURL + + authkey, _, err := tsClient.CreateKey(ctx, tailscale.KeyCapabilities{ + Devices: tailscale.KeyDeviceCapabilities{ + Create: tailscale.KeyDeviceCreateCapabilities{ + Reusable: false, + Ephemeral: ephemeral, + Preauthorized: preauth, + Tags: tags, + }, + }, + }) + if err != nil { + return "", fmt.Errorf("unexpected error while creating authkey: %w", err) + } + if authkey == "" { + return "", errors.New("received empty authkey from control server") + } + + return authkey, nil +} + +func parseOptionalAttributes(clientID string) (ephemeral bool, preauthorized bool, err error) { + _, attrs, found := strings.Cut(clientID, "?") + if !found { + return true, false, nil + } + + parsed, err := url.ParseQuery(attrs) + if err != nil { + return false, false, fmt.Errorf("failed to parse optional config attributes: %w", err) + } + + for k := range parsed { + switch k { + case "ephemeral": + ephemeral, err = strconv.ParseBool(parsed.Get(k)) + case "preauthorized": + preauthorized, err = strconv.ParseBool(parsed.Get(k)) + default: + return false, false, fmt.Errorf("unknown optional config attribute %q", k) + } + } + + return ephemeral, preauthorized, err +} + +// exchangeJWTForToken exchanges a JWT for a Tailscale access token. +func exchangeJWTForToken(ctx context.Context, baseURL, clientID, idToken string) (string, error) { + httpClient := &http.Client{Timeout: 10 * time.Second} + ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient) + + token, err := (&oauth2.Config{ + Endpoint: oauth2.Endpoint{ + TokenURL: fmt.Sprintf("%s/api/v2/oauth/token-exchange", baseURL), + }, + }).Exchange(ctx, "", oauth2.SetAuthURLParam("client_id", clientID), oauth2.SetAuthURLParam("jwt", idToken)) + if err != nil { + // Try to extract more detailed error message + var retrieveErr *oauth2.RetrieveError + if errors.As(err, &retrieveErr) { + return "", fmt.Errorf("token exchange failed with status %d: %s", retrieveErr.Response.StatusCode, string(retrieveErr.Body)) + } + return "", fmt.Errorf("unexpected token exchange request error: %w", err) + } + + return token.AccessToken, nil +} diff --git a/feature/identityfederation/identityfederation_test.go b/feature/identityfederation/identityfederation_test.go new file mode 100644 index 0000000000000..7b75852a819a1 --- /dev/null +++ b/feature/identityfederation/identityfederation_test.go @@ -0,0 +1,167 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package identityfederation + +import ( + "context" + "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestResolveAuthKey(t *testing.T) { + tests := []struct { + name string + clientID string + idToken string + tags []string + wantAuthKey string + wantErr string + }{ + { + name: "success", + clientID: "client-123", + idToken: "token", + tags: []string{"tag:test"}, + wantAuthKey: "tskey-auth-xyz", + wantErr: "", + }, + { + name: "missing client id short-circuits without error", + clientID: "", + idToken: "token", + tags: []string{"tag:test"}, + wantAuthKey: "", + wantErr: "", + }, + { + name: "missing id token", + clientID: "client-123", + idToken: "", + tags: []string{"tag:test"}, + wantErr: "federated identity authkeys require --id-token", + }, + { + name: "missing tags", + clientID: "client-123", + idToken: "token", + tags: []string{}, + wantErr: "federated identity authkeys require --advertise-tags", + }, + { + name: "invalid client id attributes", + clientID: "client-123?invalid=value", + idToken: "token", + tags: []string{"tag:test"}, + wantErr: `failed to parse optional config attributes: unknown optional config attribute "invalid"`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + srv := mockedControlServer(t) + defer srv.Close() + + authKey, err := resolveAuthKey(context.Background(), srv.URL, tt.clientID, tt.idToken, tt.tags) + if tt.wantErr != "" { + if err == nil { + t.Errorf("resolveAuthKey() error = nil, want %q", tt.wantErr) + return + } + if err.Error() != tt.wantErr { + t.Errorf("resolveAuthKey() error = %q, want %q", err.Error(), tt.wantErr) + } + } else if err != nil { + t.Fatalf("resolveAuthKey() unexpected error = %v", err) + } + if authKey != tt.wantAuthKey { + t.Errorf("resolveAuthKey() = %q, want %q", authKey, tt.wantAuthKey) + } + }) + } +} + +func TestParseOptionalAttributes(t *testing.T) { + tests := []struct { + name string + clientID string + wantEphemeral bool + wantPreauth bool + wantErr string + }{ + { + name: "default values", + clientID: "client-123", + wantEphemeral: true, + wantPreauth: false, + wantErr: "", + }, + { + name: "custom values", + clientID: "client-123?ephemeral=false&preauthorized=true", + wantEphemeral: false, + wantPreauth: true, + wantErr: "", + }, + { + name: "unknown attribute", + clientID: "client-123?unknown=value", + wantEphemeral: false, + wantPreauth: false, + wantErr: `unknown optional config attribute "unknown"`, + }, + { + name: "invalid value", + clientID: "client-123?ephemeral=invalid", + wantEphemeral: false, + wantPreauth: false, + wantErr: `strconv.ParseBool: parsing "invalid": invalid syntax`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ephemeral, preauth, err := parseOptionalAttributes(tt.clientID) + if tt.wantErr != "" { + if err == nil { + t.Errorf("parseOptionalAttributes() error = nil, want %q", tt.wantErr) + return + } + if err.Error() != tt.wantErr { + t.Errorf("parseOptionalAttributes() error = %q, want %q", err.Error(), tt.wantErr) + } + } else { + if err != nil { + t.Errorf("parseOptionalAttributes() error = %v, want nil", err) + return + } + } + if ephemeral != tt.wantEphemeral { + t.Errorf("parseOptionalAttributes() ephemeral = %v, want %v", ephemeral, tt.wantEphemeral) + } + if preauth != tt.wantPreauth { + t.Errorf("parseOptionalAttributes() preauth = %v, want %v", preauth, tt.wantPreauth) + } + }) + } +} + +func mockedControlServer(t *testing.T) *httptest.Server { + t.Helper() + + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch { + case strings.Contains(r.URL.Path, "/oauth/token-exchange"): + // OAuth2 library sends the token exchange request + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":"access-123","token_type":"Bearer","expires_in":3600}`)) + case strings.Contains(r.URL.Path, "/api/v2/tailnet") && strings.Contains(r.URL.Path, "/keys"): + // Tailscale client creates the authkey + w.Write([]byte(`{"key":"tskey-auth-xyz","created":"2024-01-01T00:00:00Z"}`)) + default: + w.WriteHeader(http.StatusNotFound) + } + })) +} diff --git a/feature/linkspeed/doc.go b/feature/linkspeed/doc.go new file mode 100644 index 0000000000000..2d2fcf0929808 --- /dev/null +++ b/feature/linkspeed/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package linkspeed registers support for setting the TUN link speed on Linux, +// to better integrate with system monitoring tools. +package linkspeed diff --git a/net/tstun/linkattrs_linux.go b/feature/linkspeed/linkspeed_linux.go similarity index 91% rename from net/tstun/linkattrs_linux.go rename to feature/linkspeed/linkspeed_linux.go index 320385ba694dc..90e33d4c9fea4 100644 --- a/net/tstun/linkattrs_linux.go +++ b/feature/linkspeed/linkspeed_linux.go @@ -1,17 +1,22 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !android +//go:build linux && !android -package tstun +package linkspeed import ( "github.com/mdlayher/genetlink" "github.com/mdlayher/netlink" "github.com/tailscale/wireguard-go/tun" "golang.org/x/sys/unix" + "tailscale.com/net/tstun" ) +func init() { + tstun.HookSetLinkAttrs.Set(setLinkAttrs) +} + // setLinkSpeed sets the advertised link speed of the TUN interface. func setLinkSpeed(iface tun.Device, mbps int) error { name, err := iface.Name() diff --git a/feature/linuxdnsfight/linuxdnsfight.go b/feature/linuxdnsfight/linuxdnsfight.go new file mode 100644 index 0000000000000..02d99a3144246 --- /dev/null +++ b/feature/linuxdnsfight/linuxdnsfight.go @@ -0,0 +1,51 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android + +// Package linuxdnsfight provides Linux support for detecting DNS fights +// (inotify watching of /etc/resolv.conf). +package linuxdnsfight + +import ( + "context" + "fmt" + + "github.com/illarion/gonotify/v3" + "tailscale.com/net/dns" +) + +func init() { + dns.HookWatchFile.Set(watchFile) +} + +// watchFile sets up an inotify watch for a given directory and +// calls the callback function every time a particular file is changed. +// The filename should be located in the provided directory. +func watchFile(ctx context.Context, dir, filename string, cb func()) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + const events = gonotify.IN_ATTRIB | + gonotify.IN_CLOSE_WRITE | + gonotify.IN_CREATE | + gonotify.IN_DELETE | + gonotify.IN_MODIFY | + gonotify.IN_MOVE + + watcher, err := gonotify.NewDirWatcher(ctx, events, dir) + if err != nil { + return fmt.Errorf("NewDirWatcher: %w", err) + } + + for { + select { + case event := <-watcher.C: + if event.Name == filename { + cb() + } + case <-ctx.Done(): + return ctx.Err() + } + } +} diff --git a/net/dns/direct_linux_test.go b/feature/linuxdnsfight/linuxdnsfight_test.go similarity index 74% rename from net/dns/direct_linux_test.go rename to feature/linuxdnsfight/linuxdnsfight_test.go index 079d060ed3323..bd3463666d46b 100644 --- a/net/dns/direct_linux_test.go +++ b/feature/linuxdnsfight/linuxdnsfight_test.go @@ -1,7 +1,9 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package dns +//go:build linux && !android + +package linuxdnsfight import ( "context" @@ -25,8 +27,13 @@ func TestWatchFile(t *testing.T) { var callbackCalled atomic.Bool callbackDone := make(chan bool) callback := func() { - callbackDone <- true - callbackCalled.Store(true) + // We only send to the channel once to avoid blocking if the + // callback is called multiple times -- this happens occasionally + // if inotify sends multiple events before we cancel the context. + if !callbackCalled.Load() { + callbackDone <- true + callbackCalled.Store(true) + } } var eg errgroup.Group diff --git a/feature/oauthkey/oauthkey.go b/feature/oauthkey/oauthkey.go new file mode 100644 index 0000000000000..5834c33becad6 --- /dev/null +++ b/feature/oauthkey/oauthkey.go @@ -0,0 +1,108 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package oauthkey registers support for using OAuth client secrets to +// automatically request authkeys for logging in. +package oauthkey + +import ( + "context" + "errors" + "fmt" + "net/url" + "strconv" + "strings" + + "golang.org/x/oauth2/clientcredentials" + "tailscale.com/feature" + "tailscale.com/internal/client/tailscale" +) + +func init() { + feature.Register("oauthkey") + tailscale.HookResolveAuthKey.Set(resolveAuthKey) +} + +// resolveAuthKey either returns v unchanged (in the common case) or, if it +// starts with "tskey-client-" (as Tailscale OAuth secrets do) parses it like +// +// tskey-client-xxxx[?ephemeral=false&bar&preauthorized=BOOL&baseURL=...] +// +// and does the OAuth2 dance to get and return an authkey. The "ephemeral" +// property defaults to true if unspecified. The "preauthorized" defaults to +// false. The "baseURL" defaults to https://api.tailscale.com. +// The passed in tags are required, and must be non-empty. These will be +// set on the authkey generated by the OAuth2 dance. +func resolveAuthKey(ctx context.Context, v string, tags []string) (string, error) { + if !strings.HasPrefix(v, "tskey-client-") { + return v, nil + } + if len(tags) == 0 { + return "", errors.New("oauth authkeys require --advertise-tags") + } + + clientSecret, named, _ := strings.Cut(v, "?") + attrs, err := url.ParseQuery(named) + if err != nil { + return "", err + } + for k := range attrs { + switch k { + case "ephemeral", "preauthorized", "baseURL": + default: + return "", fmt.Errorf("unknown attribute %q", k) + } + } + getBool := func(name string, def bool) (bool, error) { + v := attrs.Get(name) + if v == "" { + return def, nil + } + ret, err := strconv.ParseBool(v) + if err != nil { + return false, fmt.Errorf("invalid attribute boolean attribute %s value %q", name, v) + } + return ret, nil + } + ephemeral, err := getBool("ephemeral", true) + if err != nil { + return "", err + } + preauth, err := getBool("preauthorized", false) + if err != nil { + return "", err + } + + baseURL := "https://api.tailscale.com" + if v := attrs.Get("baseURL"); v != "" { + baseURL = v + } + + credentials := clientcredentials.Config{ + ClientID: "some-client-id", // ignored + ClientSecret: clientSecret, + TokenURL: baseURL + "/api/v2/oauth/token", + } + + tsClient := tailscale.NewClient("-", nil) + tsClient.UserAgent = "tailscale-cli" + tsClient.HTTPClient = credentials.Client(ctx) + tsClient.BaseURL = baseURL + + caps := tailscale.KeyCapabilities{ + Devices: tailscale.KeyDeviceCapabilities{ + Create: tailscale.KeyDeviceCreateCapabilities{ + Reusable: false, + Ephemeral: ephemeral, + Preauthorized: preauth, + Tags: tags, + }, + }, + } + + authkey, _, err := tsClient.CreateKey(ctx, caps) + if err != nil { + return "", err + } + return authkey, nil +} diff --git a/feature/portlist/portlist.go b/feature/portlist/portlist.go new file mode 100644 index 0000000000000..7d69796ffd5d2 --- /dev/null +++ b/feature/portlist/portlist.go @@ -0,0 +1,157 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package portlist contains code to poll the local system for open ports +// and report them to the control plane, if enabled on the tailnet. +package portlist + +import ( + "context" + "sync/atomic" + + "tailscale.com/envknob" + "tailscale.com/ipn" + "tailscale.com/ipn/ipnext" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/ipn/policy" + "tailscale.com/portlist" + "tailscale.com/tailcfg" + "tailscale.com/types/logger" + "tailscale.com/util/eventbus" + "tailscale.com/version" +) + +func init() { + ipnext.RegisterExtension("portlist", newExtension) +} + +func newExtension(logf logger.Logf, sb ipnext.SafeBackend) (ipnext.Extension, error) { + busClient := sb.Sys().Bus.Get().Client("portlist") + e := &Extension{ + sb: sb, + busClient: busClient, + logf: logger.WithPrefix(logf, "portlist: "), + pub: eventbus.Publish[ipnlocal.PortlistServices](busClient), + pollerDone: make(chan struct{}), + wakePoller: make(chan struct{}), + } + e.ctx, e.ctxCancel = context.WithCancel(context.Background()) + return e, nil +} + +// Extension implements the portlist extension. +type Extension struct { + ctx context.Context + ctxCancel context.CancelFunc + pollerDone chan struct{} // close-only chan when poller goroutine exits + wakePoller chan struct{} // best effort chan to wake poller from sleep + busClient *eventbus.Client + pub *eventbus.Publisher[ipnlocal.PortlistServices] + logf logger.Logf + sb ipnext.SafeBackend + host ipnext.Host // from Init + + shieldsUp atomic.Bool + shouldUploadServicesAtomic atomic.Bool +} + +func (e *Extension) Name() string { return "portlist" } +func (e *Extension) Shutdown() error { + e.ctxCancel() + e.busClient.Close() + <-e.pollerDone + return nil +} + +func (e *Extension) Init(h ipnext.Host) error { + if !envknob.BoolDefaultTrue("TS_PORTLIST") { + return ipnext.SkipExtension + } + + e.host = h + h.Hooks().ShouldUploadServices.Set(e.shouldUploadServicesAtomic.Load) + h.Hooks().ProfileStateChange.Add(e.onChangeProfile) + h.Hooks().OnSelfChange.Add(e.onSelfChange) + + // TODO(nickkhyl): remove this after the profileManager refactoring. + // See tailscale/tailscale#15974. + // This same workaround appears in feature/taildrop/ext.go. + profile, prefs := h.Profiles().CurrentProfileState() + e.onChangeProfile(profile, prefs, false) + + go e.runPollLoop() + return nil +} + +func (e *Extension) onSelfChange(tailcfg.NodeView) { + e.updateShouldUploadServices() +} + +func (e *Extension) onChangeProfile(_ ipn.LoginProfileView, prefs ipn.PrefsView, sameNode bool) { + e.shieldsUp.Store(prefs.ShieldsUp()) + e.updateShouldUploadServices() +} + +func (e *Extension) updateShouldUploadServices() { + v := !e.shieldsUp.Load() && e.host.NodeBackend().CollectServices() + if e.shouldUploadServicesAtomic.CompareAndSwap(!v, v) && v { + // Upon transition from false to true (enabling service reporting), try + // to wake the poller to do an immediate poll if it's sleeping. + // It's not a big deal if we miss waking it. It'll get to it soon enough. + select { + case e.wakePoller <- struct{}{}: + default: + } + } +} + +// runPollLoop is a goroutine that periodically checks the open +// ports and publishes them if they've changed. +func (e *Extension) runPollLoop() { + defer close(e.pollerDone) + + var poller portlist.Poller + + ticker, tickerChannel := e.sb.Clock().NewTicker(portlist.PollInterval()) + defer ticker.Stop() + for { + select { + case <-tickerChannel: + case <-e.wakePoller: + case <-e.ctx.Done(): + return + } + + if !e.shouldUploadServicesAtomic.Load() { + continue + } + + ports, changed, err := poller.Poll() + if err != nil { + e.logf("Poll: %v", err) + // TODO: this is kinda weird that we just return here and never try + // again. Maybe that was because all errors are assumed to be + // permission errors and thus permanent? Audit varioys OS + // implementation and check error types, and then make this check + // for permanent vs temporary errors and keep looping with a backoff + // for temporary errors? But for now we just give up, like we always + // have. + return + } + if !changed { + continue + } + sl := []tailcfg.Service{} + for _, p := range ports { + s := tailcfg.Service{ + Proto: tailcfg.ServiceProto(p.Proto), + Port: p.Port, + Description: p.Process, + } + if policy.IsInterestingService(s, version.OS()) { + sl = append(sl, s) + } + } + e.pub.Publish(ipnlocal.PortlistServices(sl)) + } +} diff --git a/feature/portmapper/portmapper.go b/feature/portmapper/portmapper.go new file mode 100644 index 0000000000000..e7be00ad17d8c --- /dev/null +++ b/feature/portmapper/portmapper.go @@ -0,0 +1,38 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package portmapper registers support for NAT-PMP, PCP, and UPnP port +// mapping protocols to help get direction connections through NATs. +package portmapper + +import ( + "tailscale.com/net/netmon" + "tailscale.com/net/portmapper" + "tailscale.com/net/portmapper/portmappertype" + "tailscale.com/types/logger" + "tailscale.com/util/eventbus" +) + +func init() { + portmappertype.HookNewPortMapper.Set(newPortMapper) +} + +func newPortMapper( + logf logger.Logf, + bus *eventbus.Bus, + netMon *netmon.Monitor, + disableUPnPOrNil func() bool, + onlyTCP443OrNil func() bool) portmappertype.Client { + + pm := portmapper.NewClient(portmapper.Config{ + EventBus: bus, + Logf: logf, + NetMon: netMon, + DebugKnobs: &portmapper.DebugKnobs{ + DisableAll: onlyTCP443OrNil, + DisableUPnPFunc: disableUPnPOrNil, + }, + }) + pm.SetGatewayLookupFunc(netMon.GatewayAndSelfIP) + return pm +} diff --git a/feature/posture/posture.go b/feature/posture/posture.go new file mode 100644 index 0000000000000..8e1945d7dbd0b --- /dev/null +++ b/feature/posture/posture.go @@ -0,0 +1,114 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package posture registers support for device posture checking, +// reporting machine-specific information to the control plane +// when enabled by the user and tailnet. +package posture + +import ( + "encoding/json" + "net/http" + + "tailscale.com/ipn/ipnext" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/posture" + "tailscale.com/syncs" + "tailscale.com/tailcfg" + "tailscale.com/types/logger" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/ptype" +) + +func init() { + ipnext.RegisterExtension("posture", newExtension) + ipnlocal.RegisterC2N("GET /posture/identity", handleC2NPostureIdentityGet) +} + +func newExtension(logf logger.Logf, b ipnext.SafeBackend) (ipnext.Extension, error) { + e := &extension{ + logf: logger.WithPrefix(logf, "posture: "), + } + return e, nil +} + +type extension struct { + logf logger.Logf + + // lastKnownHardwareAddrs is a list of the previous known hardware addrs. + // Previously known hwaddrs are kept to work around an issue on Windows + // where all addresses might disappear. + // http://go/corp/25168 + lastKnownHardwareAddrs syncs.AtomicValue[[]string] +} + +func (e *extension) Name() string { return "posture" } +func (e *extension) Init(h ipnext.Host) error { return nil } +func (e *extension) Shutdown() error { return nil } + +func handleC2NPostureIdentityGet(b *ipnlocal.LocalBackend, w http.ResponseWriter, r *http.Request) { + e, ok := ipnlocal.GetExt[*extension](b) + if !ok { + http.Error(w, "posture extension not available", http.StatusInternalServerError) + return + } + e.logf("c2n: GET /posture/identity received") + + res := tailcfg.C2NPostureIdentityResponse{} + + // Only collect posture identity if enabled on the client, + // this will first check syspolicy, MDM settings like Registry + // on Windows or defaults on macOS. If they are not set, it falls + // back to the cli-flag, `--posture-checking`. + choice, err := b.PolicyClient().GetPreferenceOption(pkey.PostureChecking, ptype.ShowChoiceByPolicy) + if err != nil { + e.logf( + "c2n: failed to read PostureChecking from syspolicy, returning default from CLI: %s; got error: %s", + b.Prefs().PostureChecking(), + err, + ) + } + + if choice.ShouldEnable(b.Prefs().PostureChecking()) { + res.SerialNumbers, err = posture.GetSerialNumbers(b.PolicyClient(), e.logf) + if err != nil { + e.logf("c2n: GetSerialNumbers returned error: %v", err) + } + + // TODO(tailscale/corp#21371, 2024-07-10): once this has landed in a stable release + // and looks good in client metrics, remove this parameter and always report MAC + // addresses. + if r.FormValue("hwaddrs") == "true" { + res.IfaceHardwareAddrs, err = e.getHardwareAddrs() + if err != nil { + e.logf("c2n: GetHardwareAddrs returned error: %v", err) + } + } + } else { + res.PostureDisabled = true + } + + e.logf("c2n: posture identity disabled=%v reported %d serials %d hwaddrs", res.PostureDisabled, len(res.SerialNumbers), len(res.IfaceHardwareAddrs)) + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} + +// getHardwareAddrs returns the hardware addresses for the machine. If the list +// of hardware addresses is empty, it will return the previously known hardware +// addresses. Both the current, and previously known hardware addresses might be +// empty. +func (e *extension) getHardwareAddrs() ([]string, error) { + addrs, err := posture.GetHardwareAddrs() + if err != nil { + return nil, err + } + + if len(addrs) == 0 { + e.logf("getHardwareAddrs: got empty list of hwaddrs, returning previous list") + return e.lastKnownHardwareAddrs.Load(), nil + } + + e.lastKnownHardwareAddrs.Store(addrs) + return addrs, nil +} diff --git a/feature/relayserver/relayserver.go b/feature/relayserver/relayserver.go index b90a6234508f2..df2fb4cb7c165 100644 --- a/feature/relayserver/relayserver.go +++ b/feature/relayserver/relayserver.go @@ -6,16 +6,23 @@ package relayserver import ( + "encoding/json" + "fmt" + "log" + "net/http" + "net/netip" + "strings" "sync" "tailscale.com/disco" + "tailscale.com/envknob" "tailscale.com/feature" "tailscale.com/ipn" "tailscale.com/ipn/ipnext" + "tailscale.com/ipn/localapi" "tailscale.com/net/udprelay" - "tailscale.com/net/udprelay/endpoint" + "tailscale.com/net/udprelay/status" "tailscale.com/tailcfg" - "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/ptr" "tailscale.com/util/eventbus" @@ -29,6 +36,32 @@ const featureName = "relayserver" func init() { feature.Register(featureName) ipnext.RegisterExtension(featureName, newExtension) + localapi.Register("debug-peer-relay-sessions", servePeerRelayDebugSessions) +} + +// servePeerRelayDebugSessions is an HTTP handler for the Local API that +// returns debug/status information for peer relay sessions being relayed by +// this Tailscale node. It writes a JSON-encoded [status.ServerStatus] into the +// HTTP response, or returns an HTTP 405/500 with error text as the body. +func servePeerRelayDebugSessions(h *localapi.Handler, w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + http.Error(w, "GET required", http.StatusMethodNotAllowed) + return + } + + var e *extension + if ok := h.LocalBackend().FindMatchingExtension(&e); !ok { + http.Error(w, "peer relay server extension unavailable", http.StatusInternalServerError) + return + } + + st := e.serverStatus() + j, err := json.Marshal(st) + if err != nil { + http.Error(w, fmt.Sprintf("failed to marshal json: %v", err), http.StatusInternalServerError) + return + } + w.Write(j) } // newExtension is an [ipnext.NewExtensionFn] that creates a new relay server @@ -47,18 +80,13 @@ type extension struct { logf logger.Logf bus *eventbus.Bus - mu sync.Mutex // guards the following fields - shutdown bool - port *int // ipn.Prefs.RelayServerPort, nil if disabled - disconnectFromBusCh chan struct{} // non-nil if consumeEventbusTopics is running, closed to signal it to return - busDoneCh chan struct{} // non-nil if consumeEventbusTopics is running, closed when it returns - hasNodeAttrDisableRelayServer bool // tailcfg.NodeAttrDisableRelayServer -} + mu sync.Mutex // guards the following fields + shutdown bool -// relayServer is the interface of [udprelay.Server]. -type relayServer interface { - AllocateEndpoint(discoA key.DiscoPublic, discoB key.DiscoPublic) (endpoint.ServerEndpoint, error) - Close() error + port *int // ipn.Prefs.RelayServerPort, nil if disabled + eventSubs *eventbus.Monitor // nil if not connected to eventbus + debugSessionsCh chan chan []status.ServerSession // non-nil if consumeEventbusTopics is running + hasNodeAttrDisableRelayServer bool // tailcfg.NodeAttrDisableRelayServer } // Name implements [ipnext.Extension]. @@ -82,14 +110,13 @@ func (e *extension) handleBusLifetimeLocked() { if !busShouldBeRunning { e.disconnectFromBusLocked() return - } - if e.busDoneCh != nil { + } else if e.eventSubs != nil { return // already running } - port := *e.port - e.disconnectFromBusCh = make(chan struct{}) - e.busDoneCh = make(chan struct{}) - go e.consumeEventbusTopics(port) + + ec := e.bus.Client("relayserver.extension") + e.debugSessionsCh = make(chan chan []status.ServerSession) + e.eventSubs = ptr.To(ec.Monitor(e.consumeEventbusTopics(ec, *e.port))) } func (e *extension) selfNodeViewChanged(nodeView tailcfg.NodeView) { @@ -115,68 +142,99 @@ func (e *extension) profileStateChanged(_ ipn.LoginProfileView, prefs ipn.PrefsV e.handleBusLifetimeLocked() } -func (e *extension) consumeEventbusTopics(port int) { - defer close(e.busDoneCh) +// overrideAddrs returns TS_DEBUG_RELAY_SERVER_ADDRS as []netip.Addr, if set. It +// can be between 0 and 3 comma-separated Addrs. TS_DEBUG_RELAY_SERVER_ADDRS is +// not a stable interface, and is subject to change. +var overrideAddrs = sync.OnceValue(func() (ret []netip.Addr) { + all := envknob.String("TS_DEBUG_RELAY_SERVER_ADDRS") + const max = 3 + remain := all + for remain != "" && len(ret) < max { + var s string + s, remain, _ = strings.Cut(remain, ",") + addr, err := netip.ParseAddr(s) + if err != nil { + log.Printf("ignoring invalid Addr %q in TS_DEBUG_RELAY_SERVER_ADDRS %q: %v", s, all, err) + continue + } + ret = append(ret, addr) + } + return +}) - eventClient := e.bus.Client("relayserver.extension") - reqSub := eventbus.Subscribe[magicsock.UDPRelayAllocReq](eventClient) - respPub := eventbus.Publish[magicsock.UDPRelayAllocResp](eventClient) - defer eventClient.Close() +// consumeEventbusTopics serves endpoint allocation requests over the eventbus. +// It also serves [relayServer] debug information on a channel. +// consumeEventbusTopics must never acquire [extension.mu], which can be held +// by other goroutines while waiting to receive on [extension.eventSubs] or the +// inner [extension.debugSessionsCh] channel. +func (e *extension) consumeEventbusTopics(ec *eventbus.Client, port int) func(*eventbus.Client) { + reqSub := eventbus.Subscribe[magicsock.UDPRelayAllocReq](ec) + respPub := eventbus.Publish[magicsock.UDPRelayAllocResp](ec) + debugSessionsCh := e.debugSessionsCh - var rs relayServer // lazily initialized - defer func() { - if rs != nil { - rs.Close() + return func(ec *eventbus.Client) { + rs, err := udprelay.NewServer(e.logf, port, overrideAddrs()) + if err != nil { + e.logf("error initializing server: %v", err) } - }() - for { - select { - case <-e.disconnectFromBusCh: - return - case <-reqSub.Done(): - // If reqSub is done, the eventClient has been closed, which is a - // signal to return. - return - case req := <-reqSub.Events(): - if rs == nil { - var err error - rs, err = udprelay.NewServer(e.logf, port, nil) + + defer func() { + if rs != nil { + rs.Close() + } + }() + for { + select { + case <-ec.Done(): + return + case respCh := <-debugSessionsCh: + if rs == nil { + respCh <- nil + continue + } + sessions := rs.GetSessions() + respCh <- sessions + case req := <-reqSub.Events(): + if rs == nil { + // The server may have previously failed to initialize if + // the configured port was in use, try again. + rs, err = udprelay.NewServer(e.logf, port, overrideAddrs()) + if err != nil { + e.logf("error initializing server: %v", err) + continue + } + } + se, err := rs.AllocateEndpoint(req.Message.ClientDisco[0], req.Message.ClientDisco[1]) if err != nil { - e.logf("error initializing server: %v", err) + e.logf("error allocating endpoint: %v", err) continue } - } - se, err := rs.AllocateEndpoint(req.Message.ClientDisco[0], req.Message.ClientDisco[1]) - if err != nil { - e.logf("error allocating endpoint: %v", err) - continue - } - respPub.Publish(magicsock.UDPRelayAllocResp{ - ReqRxFromNodeKey: req.RxFromNodeKey, - ReqRxFromDiscoKey: req.RxFromDiscoKey, - Message: &disco.AllocateUDPRelayEndpointResponse{ - Generation: req.Message.Generation, - UDPRelayEndpoint: disco.UDPRelayEndpoint{ - ServerDisco: se.ServerDisco, - ClientDisco: se.ClientDisco, - LamportID: se.LamportID, - VNI: se.VNI, - BindLifetime: se.BindLifetime.Duration, - SteadyStateLifetime: se.SteadyStateLifetime.Duration, - AddrPorts: se.AddrPorts, + respPub.Publish(magicsock.UDPRelayAllocResp{ + ReqRxFromNodeKey: req.RxFromNodeKey, + ReqRxFromDiscoKey: req.RxFromDiscoKey, + Message: &disco.AllocateUDPRelayEndpointResponse{ + Generation: req.Message.Generation, + UDPRelayEndpoint: disco.UDPRelayEndpoint{ + ServerDisco: se.ServerDisco, + ClientDisco: se.ClientDisco, + LamportID: se.LamportID, + VNI: se.VNI, + BindLifetime: se.BindLifetime.Duration, + SteadyStateLifetime: se.SteadyStateLifetime.Duration, + AddrPorts: se.AddrPorts, + }, }, - }, - }) + }) + } } } } func (e *extension) disconnectFromBusLocked() { - if e.busDoneCh != nil { - close(e.disconnectFromBusCh) - <-e.busDoneCh - e.busDoneCh = nil - e.disconnectFromBusCh = nil + if e.eventSubs != nil { + e.eventSubs.Close() + e.eventSubs = nil + e.debugSessionsCh = nil } } @@ -188,3 +246,30 @@ func (e *extension) Shutdown() error { e.shutdown = true return nil } + +// serverStatus gathers and returns current peer relay server status information +// for this Tailscale node, and status of each peer relay session this node is +// relaying (if any). +func (e *extension) serverStatus() status.ServerStatus { + e.mu.Lock() + defer e.mu.Unlock() + + st := status.ServerStatus{ + UDPPort: nil, + Sessions: nil, + } + if e.port == nil || e.eventSubs == nil { + return st + } + st.UDPPort = ptr.To(*e.port) + + ch := make(chan []status.ServerSession) + select { + case e.debugSessionsCh <- ch: + resp := <-ch + st.Sessions = resp + return st + case <-e.eventSubs.Done(): + return st + } +} diff --git a/feature/relayserver/relayserver_test.go b/feature/relayserver/relayserver_test.go index d3fc36a83674a..65c503524c5de 100644 --- a/feature/relayserver/relayserver_test.go +++ b/feature/relayserver/relayserver_test.go @@ -8,6 +8,7 @@ import ( "tailscale.com/ipn" "tailscale.com/tsd" + "tailscale.com/types/logger" "tailscale.com/types/ptr" "tailscale.com/util/eventbus" ) @@ -96,13 +97,14 @@ func Test_extension_profileStateChanged(t *testing.T) { sys := tsd.NewSystem() bus := sys.Bus.Get() e := &extension{ + logf: logger.Discard, port: tt.fields.port, bus: bus, } defer e.disconnectFromBusLocked() e.profileStateChanged(ipn.LoginProfileView{}, tt.args.prefs, tt.args.sameNode) - if tt.wantBusRunning != (e.busDoneCh != nil) { - t.Errorf("wantBusRunning: %v != (e.busDoneCh != nil): %v", tt.wantBusRunning, e.busDoneCh != nil) + if tt.wantBusRunning != (e.eventSubs != nil) { + t.Errorf("wantBusRunning: %v != (e.eventSubs != nil): %v", tt.wantBusRunning, e.eventSubs != nil) } if (tt.wantPort == nil) != (e.port == nil) { t.Errorf("(tt.wantPort == nil): %v != (e.port == nil): %v", tt.wantPort == nil, e.port == nil) @@ -118,7 +120,7 @@ func Test_extension_handleBusLifetimeLocked(t *testing.T) { name string shutdown bool port *int - busDoneCh chan struct{} + eventSubs *eventbus.Monitor hasNodeAttrDisableRelayServer bool wantBusRunning bool }{ @@ -154,16 +156,17 @@ func Test_extension_handleBusLifetimeLocked(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { e := &extension{ + logf: logger.Discard, bus: eventbus.New(), shutdown: tt.shutdown, port: tt.port, - busDoneCh: tt.busDoneCh, + eventSubs: tt.eventSubs, hasNodeAttrDisableRelayServer: tt.hasNodeAttrDisableRelayServer, } e.handleBusLifetimeLocked() defer e.disconnectFromBusLocked() - if tt.wantBusRunning != (e.busDoneCh != nil) { - t.Errorf("wantBusRunning: %v != (e.busDoneCh != nil): %v", tt.wantBusRunning, e.busDoneCh != nil) + if tt.wantBusRunning != (e.eventSubs != nil) { + t.Errorf("wantBusRunning: %v != (e.eventSubs != nil): %v", tt.wantBusRunning, e.eventSubs != nil) } }) } diff --git a/feature/sdnotify.go b/feature/sdnotify.go new file mode 100644 index 0000000000000..e785dc1acc09a --- /dev/null +++ b/feature/sdnotify.go @@ -0,0 +1,32 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package feature + +import ( + "runtime" + + "tailscale.com/feature/buildfeatures" +) + +// HookSystemdReady sends a readiness to systemd. This will unblock service +// dependents from starting. +var HookSystemdReady Hook[func()] + +// HookSystemdStatus holds a func that will send a single line status update to +// systemd so that information shows up in systemctl output. +var HookSystemdStatus Hook[func(format string, args ...any)] + +// SystemdStatus sends a single line status update to systemd so that +// information shows up in systemctl output. +// +// It does nothing on non-Linux systems or if the binary was built without +// the sdnotify feature. +func SystemdStatus(format string, args ...any) { + if runtime.GOOS != "linux" || !buildfeatures.HasSDNotify { + return + } + if f, ok := HookSystemdStatus.GetOk(); ok { + f(format, args...) + } +} diff --git a/util/systemd/doc.go b/feature/sdnotify/sdnotify.go similarity index 81% rename from util/systemd/doc.go rename to feature/sdnotify/sdnotify.go index 0c28e182354ec..d13aa63f23c15 100644 --- a/util/systemd/doc.go +++ b/feature/sdnotify/sdnotify.go @@ -2,7 +2,7 @@ // SPDX-License-Identifier: BSD-3-Clause /* -Package systemd contains a minimal wrapper around systemd-notify to enable +Package sdnotify contains a minimal wrapper around systemd-notify to enable applications to signal readiness and status to systemd. This package will only have effect on Linux systems running Tailscale in a @@ -10,4 +10,4 @@ systemd unit with the Type=notify flag set. On other operating systems (or when running in a Linux distro without being run from inside systemd) this package will become a no-op. */ -package systemd +package sdnotify diff --git a/util/systemd/systemd_linux.go b/feature/sdnotify/sdnotify_linux.go similarity index 84% rename from util/systemd/systemd_linux.go rename to feature/sdnotify/sdnotify_linux.go index fdfd1bba05451..b005f1bdb2bb2 100644 --- a/util/systemd/systemd_linux.go +++ b/feature/sdnotify/sdnotify_linux.go @@ -3,7 +3,7 @@ //go:build linux && !android -package systemd +package sdnotify import ( "errors" @@ -12,8 +12,14 @@ import ( "sync" "github.com/mdlayher/sdnotify" + "tailscale.com/feature" ) +func init() { + feature.HookSystemdReady.Set(ready) + feature.HookSystemdStatus.Set(status) +} + var getNotifyOnce struct { sync.Once v *sdnotify.Notifier @@ -46,15 +52,15 @@ func notifier() *sdnotify.Notifier { return getNotifyOnce.v } -// Ready signals readiness to systemd. This will unblock service dependents from starting. -func Ready() { +// ready signals readiness to systemd. This will unblock service dependents from starting. +func ready() { err := notifier().Notify(sdnotify.Ready) if err != nil { readyOnce.logf("systemd: error notifying: %v", err) } } -// Status sends a single line status update to systemd so that information shows up +// status sends a single line status update to systemd so that information shows up // in systemctl output. For example: // // $ systemctl status tailscale @@ -69,7 +75,7 @@ func Ready() { // CPU: 2min 38.469s // CGroup: /system.slice/tailscale.service // └─26741 /nix/store/sv6cj4mw2jajm9xkbwj07k29dj30lh0n-tailscale-date.20200727/bin/tailscaled --port 41641 -func Status(format string, args ...any) { +func status(format string, args ...any) { err := notifier().Notify(sdnotify.Statusf(format, args...)) if err != nil { statusOnce.logf("systemd: error notifying: %v", err) diff --git a/feature/taildrop/ext.go b/feature/taildrop/ext.go index f8f45b53fae26..6bdb375ccfe63 100644 --- a/feature/taildrop/ext.go +++ b/feature/taildrop/ext.go @@ -105,6 +105,7 @@ func (e *Extension) Init(h ipnext.Host) error { // TODO(nickkhyl): remove this after the profileManager refactoring. // See tailscale/tailscale#15974. + // This same workaround appears in feature/portlist/portlist.go. profile, prefs := h.Profiles().CurrentProfileState() e.onChangeProfile(profile, prefs, false) return nil diff --git a/feature/taildrop/peerapi_test.go b/feature/taildrop/peerapi_test.go index 6339973544453..254d8794e8273 100644 --- a/feature/taildrop/peerapi_test.go +++ b/feature/taildrop/peerapi_test.go @@ -33,11 +33,13 @@ type peerAPIHandler struct { isSelf bool // whether peerNode is owned by same user as this node selfNode tailcfg.NodeView // this node; always non-nil peerNode tailcfg.NodeView // peerNode is who's making the request + canDebug bool // whether peerNode can debug this node (goroutines, metrics, magicsock internal state, etc) } func (h *peerAPIHandler) IsSelfUntagged() bool { return !h.selfNode.IsTagged() && !h.peerNode.IsTagged() && h.isSelf } +func (h *peerAPIHandler) CanDebug() bool { return h.canDebug } func (h *peerAPIHandler) Peer() tailcfg.NodeView { return h.peerNode } func (h *peerAPIHandler) Self() tailcfg.NodeView { return h.selfNode } func (h *peerAPIHandler) RemoteAddr() netip.AddrPort { return h.remoteAddr } diff --git a/feature/taildrop/retrieve.go b/feature/taildrop/retrieve.go index b048a1b3b5f9d..e767bac324684 100644 --- a/feature/taildrop/retrieve.go +++ b/feature/taildrop/retrieve.go @@ -14,7 +14,7 @@ import ( "time" "tailscale.com/client/tailscale/apitype" - "tailscale.com/logtail/backoff" + "tailscale.com/util/backoff" "tailscale.com/util/set" ) diff --git a/feature/tap/tap_linux.go b/feature/tap/tap_linux.go index 58ac00593d3a8..53dcabc364d6b 100644 --- a/feature/tap/tap_linux.go +++ b/feature/tap/tap_linux.go @@ -6,6 +6,7 @@ package tap import ( "bytes" + "errors" "fmt" "net" "net/netip" @@ -29,7 +30,6 @@ import ( "tailscale.com/syncs" "tailscale.com/types/ipproto" "tailscale.com/types/logger" - "tailscale.com/util/multierr" ) // TODO: this was randomly generated once. Maybe do it per process start? But @@ -482,7 +482,7 @@ func (t *tapDevice) Write(buffs [][]byte, offset int) (int, error) { wrote++ } } - return wrote, multierr.New(errs...) + return wrote, errors.Join(errs...) } func (t *tapDevice) MTU() (int, error) { diff --git a/feature/tpm/attestation.go b/feature/tpm/attestation.go new file mode 100644 index 0000000000000..5fbda3b17bab3 --- /dev/null +++ b/feature/tpm/attestation.go @@ -0,0 +1,279 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tpm + +import ( + "crypto" + "encoding/json" + "errors" + "fmt" + "io" + "log" + + "github.com/google/go-tpm/tpm2" + "github.com/google/go-tpm/tpm2/transport" + "golang.org/x/crypto/cryptobyte" + "golang.org/x/crypto/cryptobyte/asn1" + "tailscale.com/types/key" +) + +type attestationKey struct { + tpm transport.TPMCloser + // private and public parts of the TPM key as returned from tpm2.Create. + // These are used for serialization. + tpmPrivate tpm2.TPM2BPrivate + tpmPublic tpm2.TPM2BPublic + // handle of the loaded TPM key. + handle *tpm2.NamedHandle + // pub is the parsed *ecdsa.PublicKey. + pub crypto.PublicKey +} + +func newAttestationKey() (ak *attestationKey, retErr error) { + tpm, err := open() + if err != nil { + return nil, key.ErrUnsupported + } + defer func() { + if retErr != nil { + tpm.Close() + } + }() + ak = &attestationKey{tpm: tpm} + + // Create a key under the storage hierarchy. + if err := withSRK(log.Printf, ak.tpm, func(srk tpm2.AuthHandle) error { + resp, err := tpm2.Create{ + ParentHandle: tpm2.NamedHandle{ + Handle: srk.Handle, + Name: srk.Name, + }, + InPublic: tpm2.New2B( + tpm2.TPMTPublic{ + Type: tpm2.TPMAlgECC, + NameAlg: tpm2.TPMAlgSHA256, + ObjectAttributes: tpm2.TPMAObject{ + SensitiveDataOrigin: true, + UserWithAuth: true, + AdminWithPolicy: true, + NoDA: true, + FixedTPM: true, + FixedParent: true, + SignEncrypt: true, + }, + Parameters: tpm2.NewTPMUPublicParms( + tpm2.TPMAlgECC, + &tpm2.TPMSECCParms{ + CurveID: tpm2.TPMECCNistP256, + Scheme: tpm2.TPMTECCScheme{ + Scheme: tpm2.TPMAlgECDSA, + Details: tpm2.NewTPMUAsymScheme( + tpm2.TPMAlgECDSA, + &tpm2.TPMSSigSchemeECDSA{ + // Unfortunately, TPMs don't let us use + // TPMAlgNull here to make the hash + // algorithm dynamic higher in the + // stack. We have to hardcode it here. + HashAlg: tpm2.TPMAlgSHA256, + }, + ), + }, + }, + ), + }, + ), + }.Execute(ak.tpm) + if err != nil { + return fmt.Errorf("tpm2.Create: %w", err) + } + ak.tpmPrivate = resp.OutPrivate + ak.tpmPublic = resp.OutPublic + return nil + }); err != nil { + return nil, err + } + return ak, ak.load() +} + +func (ak *attestationKey) loaded() bool { + return ak.tpm != nil && ak.handle != nil && ak.pub != nil +} + +// load the key into the TPM from its public/private components. Must be called +// before Sign or Public. +func (ak *attestationKey) load() error { + if ak.loaded() { + return nil + } + if len(ak.tpmPrivate.Buffer) == 0 || len(ak.tpmPublic.Bytes()) == 0 { + return fmt.Errorf("attestationKey.load called without tpmPrivate or tpmPublic") + } + return withSRK(log.Printf, ak.tpm, func(srk tpm2.AuthHandle) error { + resp, err := tpm2.Load{ + ParentHandle: tpm2.NamedHandle{ + Handle: srk.Handle, + Name: srk.Name, + }, + InPrivate: ak.tpmPrivate, + InPublic: ak.tpmPublic, + }.Execute(ak.tpm) + if err != nil { + return fmt.Errorf("tpm2.Load: %w", err) + } + + ak.handle = &tpm2.NamedHandle{ + Handle: resp.ObjectHandle, + Name: resp.Name, + } + pub, err := ak.tpmPublic.Contents() + if err != nil { + return err + } + ak.pub, err = tpm2.Pub(*pub) + return err + }) +} + +// attestationKeySerialized is the JSON-serialized representation of +// attestationKey. +type attestationKeySerialized struct { + TPMPrivate []byte `json:"tpmPrivate"` + TPMPublic []byte `json:"tpmPublic"` +} + +// MarshalJSON implements json.Marshaler. +func (ak *attestationKey) MarshalJSON() ([]byte, error) { + if ak == nil || ak.IsZero() { + return []byte("null"), nil + } + return json.Marshal(attestationKeySerialized{ + TPMPublic: ak.tpmPublic.Bytes(), + TPMPrivate: ak.tpmPrivate.Buffer, + }) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (ak *attestationKey) UnmarshalJSON(data []byte) (retErr error) { + var aks attestationKeySerialized + if err := json.Unmarshal(data, &aks); err != nil { + return err + } + + ak.tpmPrivate = tpm2.TPM2BPrivate{Buffer: aks.TPMPrivate} + ak.tpmPublic = tpm2.BytesAs2B[tpm2.TPMTPublic, *tpm2.TPMTPublic](aks.TPMPublic) + + tpm, err := open() + if err != nil { + return key.ErrUnsupported + } + defer func() { + if retErr != nil { + tpm.Close() + } + }() + ak.tpm = tpm + + return ak.load() +} + +func (ak *attestationKey) Public() crypto.PublicKey { + return ak.pub +} + +func (ak *attestationKey) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) { + if !ak.loaded() { + return nil, errors.New("tpm2 attestation key is not loaded during Sign") + } + // Unfortunately, TPMs don't let us make keys with dynamic hash algorithms. + // The hash algorithm is fixed at key creation time (tpm2.Create). + if opts != crypto.SHA256 { + return nil, fmt.Errorf("tpm2 key is restricted to SHA256, have %q", opts) + } + resp, err := tpm2.Sign{ + KeyHandle: ak.handle, + Digest: tpm2.TPM2BDigest{ + Buffer: digest, + }, + InScheme: tpm2.TPMTSigScheme{ + Scheme: tpm2.TPMAlgECDSA, + Details: tpm2.NewTPMUSigScheme( + tpm2.TPMAlgECDSA, + &tpm2.TPMSSchemeHash{ + HashAlg: tpm2.TPMAlgSHA256, + }, + ), + }, + Validation: tpm2.TPMTTKHashCheck{ + Tag: tpm2.TPMSTHashCheck, + }, + }.Execute(ak.tpm) + if err != nil { + return nil, fmt.Errorf("tpm2.Sign: %w", err) + } + sig, err := resp.Signature.Signature.ECDSA() + if err != nil { + return nil, err + } + return encodeSignature(sig.SignatureR.Buffer, sig.SignatureS.Buffer) +} + +// Copied from crypto/ecdsa. +func encodeSignature(r, s []byte) ([]byte, error) { + var b cryptobyte.Builder + b.AddASN1(asn1.SEQUENCE, func(b *cryptobyte.Builder) { + addASN1IntBytes(b, r) + addASN1IntBytes(b, s) + }) + return b.Bytes() +} + +// addASN1IntBytes encodes in ASN.1 a positive integer represented as +// a big-endian byte slice with zero or more leading zeroes. +func addASN1IntBytes(b *cryptobyte.Builder, bytes []byte) { + for len(bytes) > 0 && bytes[0] == 0 { + bytes = bytes[1:] + } + if len(bytes) == 0 { + b.SetError(errors.New("invalid integer")) + return + } + b.AddASN1(asn1.INTEGER, func(c *cryptobyte.Builder) { + if bytes[0]&0x80 != 0 { + c.AddUint8(0) + } + c.AddBytes(bytes) + }) +} + +func (ak *attestationKey) Close() error { + var errs []error + if ak.handle != nil && ak.tpm != nil { + _, err := tpm2.FlushContext{FlushHandle: ak.handle.Handle}.Execute(ak.tpm) + errs = append(errs, err) + } + if ak.tpm != nil { + errs = append(errs, ak.tpm.Close()) + } + return errors.Join(errs...) +} + +func (ak *attestationKey) Clone() key.HardwareAttestationKey { + if ak == nil { + return nil + } + return &attestationKey{ + tpm: ak.tpm, + tpmPrivate: ak.tpmPrivate, + tpmPublic: ak.tpmPublic, + handle: ak.handle, + pub: ak.pub, + } +} + +func (ak *attestationKey) IsZero() bool { + if ak == nil { + return true + } + return !ak.loaded() +} diff --git a/feature/tpm/attestation_test.go b/feature/tpm/attestation_test.go new file mode 100644 index 0000000000000..ead88c955aeea --- /dev/null +++ b/feature/tpm/attestation_test.go @@ -0,0 +1,98 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tpm + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/sha256" + "encoding/json" + "testing" +) + +func TestAttestationKeySign(t *testing.T) { + skipWithoutTPM(t) + ak, err := newAttestationKey() + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := ak.Close(); err != nil { + t.Errorf("ak.Close: %v", err) + } + }) + + data := []byte("secrets") + digest := sha256.Sum256(data) + + // Check signature/validation round trip. + sig, err := ak.Sign(rand.Reader, digest[:], crypto.SHA256) + if err != nil { + t.Fatal(err) + } + if !ecdsa.VerifyASN1(ak.Public().(*ecdsa.PublicKey), digest[:], sig) { + t.Errorf("ecdsa.VerifyASN1 failed") + } + + // Create a different key. + ak2, err := newAttestationKey() + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := ak2.Close(); err != nil { + t.Errorf("ak2.Close: %v", err) + } + }) + + // Make sure that the keys are distinct via their public keys and the + // signatures they produce. + if ak.Public().(*ecdsa.PublicKey).Equal(ak2.Public()) { + t.Errorf("public keys of distinct attestation keys are the same") + } + sig2, err := ak2.Sign(rand.Reader, digest[:], crypto.SHA256) + if err != nil { + t.Fatal(err) + } + if bytes.Equal(sig, sig2) { + t.Errorf("signatures from distinct attestation keys are the same") + } +} + +func TestAttestationKeyUnmarshal(t *testing.T) { + skipWithoutTPM(t) + ak, err := newAttestationKey() + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := ak.Close(); err != nil { + t.Errorf("ak.Close: %v", err) + } + }) + + buf, err := ak.MarshalJSON() + if err != nil { + t.Fatal(err) + } + var ak2 attestationKey + if err := json.Unmarshal(buf, &ak2); err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := ak2.Close(); err != nil { + t.Errorf("ak2.Close: %v", err) + } + }) + + if !ak2.loaded() { + t.Error("unmarshalled key is not loaded") + } + + if !ak.Public().(*ecdsa.PublicKey).Equal(ak2.Public()) { + t.Error("unmarshalled public key is not the same as the original public key") + } +} diff --git a/feature/tpm/tpm.go b/feature/tpm/tpm.go index 0260cca586e13..4b27a241fa255 100644 --- a/feature/tpm/tpm.go +++ b/feature/tpm/tpm.go @@ -14,6 +14,7 @@ import ( "log" "os" "path/filepath" + "runtime" "slices" "strings" "sync" @@ -22,32 +23,80 @@ import ( "github.com/google/go-tpm/tpm2/transport" "golang.org/x/crypto/nacl/secretbox" "tailscale.com/atomicfile" + "tailscale.com/envknob" "tailscale.com/feature" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/store" "tailscale.com/paths" "tailscale.com/tailcfg" + "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/util/testenv" ) var infoOnce = sync.OnceValue(info) func init() { feature.Register("tpm") + feature.HookTPMAvailable.Set(tpmSupported) + feature.HookHardwareAttestationAvailable.Set(tpmSupported) + hostinfo.RegisterHostinfoNewHook(func(hi *tailcfg.Hostinfo) { hi.TPM = infoOnce() }) store.Register(store.TPMPrefix, newStore) + if runtime.GOOS == "linux" || runtime.GOOS == "windows" { + key.RegisterHardwareAttestationKeyFns( + func() key.HardwareAttestationKey { return &attestationKey{} }, + func() (key.HardwareAttestationKey, error) { return newAttestationKey() }, + ) + } } +func tpmSupported() bool { + hi := infoOnce() + if hi == nil { + return false + } + if hi.FamilyIndicator != "2.0" { + return false + } + + tpm, err := open() + if err != nil { + return false + } + defer tpm.Close() + + if err := withSRK(logger.Discard, tpm, func(srk tpm2.AuthHandle) error { + return nil + }); err != nil { + return false + } + return true +} + +var verboseTPM = envknob.RegisterBool("TS_DEBUG_TPM") + func info() *tailcfg.TPMInfo { + logf := logger.Discard + if !testenv.InTest() || verboseTPM() { + logf = log.New(log.Default().Writer(), "TPM: ", 0).Printf + } + tpm, err := open() if err != nil { - log.Printf("TPM: error opening: %v", err) + if !os.IsNotExist(err) || verboseTPM() { + // Only log if it's an interesting error, not just "no TPM", + // as is very common, especially in VMs. + logf("error opening: %v", err) + } return nil } - log.Printf("TPM: successfully opened") + if verboseTPM() { + logf("successfully opened") + } defer tpm.Close() info := new(tailcfg.TPMInfo) @@ -69,6 +118,7 @@ func info() *tailcfg.TPMInfo { {tpm2.TPMPTVendorTPMType, func(info *tailcfg.TPMInfo, value uint32) { info.Model = int(value) }}, {tpm2.TPMPTFirmwareVersion1, func(info *tailcfg.TPMInfo, value uint32) { info.FirmwareVersion += uint64(value) << 32 }}, {tpm2.TPMPTFirmwareVersion2, func(info *tailcfg.TPMInfo, value uint32) { info.FirmwareVersion += uint64(value) }}, + {tpm2.TPMPTFamilyIndicator, toStr(&info.FamilyIndicator)}, } { resp, err := tpm2.GetCapability{ Capability: tpm2.TPMCapTPMProperties, @@ -76,12 +126,12 @@ func info() *tailcfg.TPMInfo { PropertyCount: 1, }.Execute(tpm) if err != nil { - log.Printf("TPM: GetCapability %v: %v", cap.prop, err) + logf("GetCapability %v: %v", cap.prop, err) continue } props, err := resp.CapabilityData.Data.TPMProperties() if err != nil { - log.Printf("TPM: GetCapability %v: %v", cap.prop, err) + logf("GetCapability %v: %v", cap.prop, err) continue } if len(props.TPMProperty) == 0 { @@ -89,6 +139,7 @@ func info() *tailcfg.TPMInfo { } cap.apply(info, props.TPMProperty[0].Value) } + logf("successfully read all properties") return info } diff --git a/feature/tpm/tpm_test.go b/feature/tpm/tpm_test.go index f4497f8c72732..afce570fc250d 100644 --- a/feature/tpm/tpm_test.go +++ b/feature/tpm/tpm_test.go @@ -133,6 +133,31 @@ func TestStore(t *testing.T) { }) } +func BenchmarkInfo(b *testing.B) { + b.StopTimer() + skipWithoutTPM(b) + b.StartTimer() + for i := 0; i < b.N; i++ { + hi := info() + if hi == nil { + b.Fatalf("tpm info error") + } + } + b.StopTimer() +} + +func BenchmarkTPMSupported(b *testing.B) { + b.StopTimer() + skipWithoutTPM(b) + b.StartTimer() + for i := 0; i < b.N; i++ { + if !tpmSupported() { + b.Fatalf("tpmSupported returned false") + } + } + b.StopTimer() +} + func BenchmarkStore(b *testing.B) { skipWithoutTPM(b) b.StopTimer() @@ -277,15 +302,6 @@ func TestMigrateStateToTPM(t *testing.T) { } } -func tpmSupported() bool { - tpm, err := open() - if err != nil { - return false - } - tpm.Close() - return true -} - type mockTPMSealProvider struct { path string data map[ipn.StateKey][]byte diff --git a/feature/useproxy/useproxy.go b/feature/useproxy/useproxy.go new file mode 100644 index 0000000000000..a18e60577af85 --- /dev/null +++ b/feature/useproxy/useproxy.go @@ -0,0 +1,18 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package useproxy registers support for using system proxies. +package useproxy + +import ( + "tailscale.com/feature" + "tailscale.com/net/tshttpproxy" +) + +func init() { + feature.HookProxyFromEnvironment.Set(tshttpproxy.ProxyFromEnvironment) + feature.HookProxyInvalidateCache.Set(tshttpproxy.InvalidateCache) + feature.HookProxyGetAuthHeader.Set(tshttpproxy.GetAuthHeader) + feature.HookProxySetSelfProxy.Set(tshttpproxy.SetSelfProxy) + feature.HookProxySetTransportGetProxyConnectHeader.Set(tshttpproxy.SetTransportGetProxyConnectHeader) +} diff --git a/flake.nix b/flake.nix index 8cb5e078e11e2..726757f7a76b7 100644 --- a/flake.nix +++ b/flake.nix @@ -46,8 +46,9 @@ systems, flake-compat, }: let - go125Version = "1.25.0"; - goHash = "sha256-S9AekSlyB7+kUOpA1NWpOxtTGl5DhHOyoG4Y4HciciU="; + goVersion = nixpkgs.lib.fileContents ./go.toolchain.version; + toolChainRev = nixpkgs.lib.fileContents ./go.toolchain.rev; + gitHash = nixpkgs.lib.fileContents ./go.toolchain.rev.sri; eachSystem = f: nixpkgs.lib.genAttrs (import systems) (system: f (import nixpkgs { @@ -55,10 +56,12 @@ overlays = [ (final: prev: { go_1_25 = prev.go_1_25.overrideAttrs { - version = go125Version; - src = prev.fetchurl { - url = "https://go.dev/dl/go${go125Version}.src.tar.gz"; - hash = goHash; + version = goVersion; + src = prev.fetchFromGitHub { + owner = "tailscale"; + repo = "go"; + rev = toolChainRev; + sha256 = gitHash; }; }; }) @@ -148,5 +151,5 @@ }); }; } -# nix-direnv cache busting line: sha256-8aE6dWMkTLdWRD9WnLVSzpOQQh61voEnjZAJHtbGCSs= +# nix-direnv cache busting line: sha256-rV3C2Vi48FCifGt58OdEO4+Av0HRIs8sUJVvp/gEBLw= diff --git a/go.mod b/go.mod index 6883d2552e447..3c281fa7a34bf 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module tailscale.com -go 1.25.1 +go 1.25.3 require ( filippo.io/mkcert v1.4.4 @@ -79,7 +79,7 @@ require ( github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e github.com/studio-b12/gowebdav v0.9.0 github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e - github.com/tailscale/depaware v0.0.0-20250112153213-b748de04d81b + github.com/tailscale/depaware v0.0.0-20251001183927-9c2ad255ef3f github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41 github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869 github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 @@ -136,6 +136,7 @@ require ( github.com/alecthomas/go-check-sumtype v0.1.4 // indirect github.com/alexkohler/nakedret/v2 v2.0.4 // indirect github.com/armon/go-metrics v0.4.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect github.com/boltdb/bolt v1.3.1 // indirect github.com/bombsimon/wsl/v4 v4.2.1 // indirect github.com/butuzov/mirror v1.1.0 // indirect @@ -186,6 +187,7 @@ require ( go.uber.org/automaxprocs v1.5.3 // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + k8s.io/component-base v0.32.0 // indirect ) require ( diff --git a/go.mod.sri b/go.mod.sri index 781799de5eae1..f94054422c6d7 100644 --- a/go.mod.sri +++ b/go.mod.sri @@ -1 +1 @@ -sha256-8aE6dWMkTLdWRD9WnLVSzpOQQh61voEnjZAJHtbGCSs= +sha256-rV3C2Vi48FCifGt58OdEO4+Av0HRIs8sUJVvp/gEBLw= diff --git a/go.sum b/go.sum index 72ddb730fdf84..bc386d1fdb37f 100644 --- a/go.sum +++ b/go.sum @@ -178,6 +178,8 @@ github.com/bkielbasa/cyclop v1.2.1 h1:AeF71HZDob1P2/pRm1so9cd1alZnrpyc4q2uP2l0gJ github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM= github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb h1:m935MPodAbYS46DG4pJSv7WO+VECIWUQ7OJYSoTrMh4= github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= @@ -972,8 +974,8 @@ github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplB github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk= github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e h1:PtWT87weP5LWHEY//SWsYkSO3RWRZo4OSWagh3YD2vQ= github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e/go.mod h1:XrBNfAFN+pwoWuksbFS9Ccxnopa15zJGgXRFN90l3K4= -github.com/tailscale/depaware v0.0.0-20250112153213-b748de04d81b h1:ewWb4cA+YO9/3X+v5UhdV+eKFsNBOPcGRh39Glshx/4= -github.com/tailscale/depaware v0.0.0-20250112153213-b748de04d81b/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8= +github.com/tailscale/depaware v0.0.0-20251001183927-9c2ad255ef3f h1:PDPGJtm9PFBLNudHGwkfUGp/FWvP+kXXJ0D1pB35F40= +github.com/tailscale/depaware v0.0.0-20251001183927-9c2ad255ef3f/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8= github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 h1:Gzfnfk2TWrk8Jj4P4c1a3CtQyMaTVCznlkLZI++hok4= github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55/go.mod h1:4k4QO+dQ3R5FofL+SanAUZe+/QfeK0+OIuwDIRu2vSg= github.com/tailscale/goexpect v0.0.0-20210902213824-6e8c725cea41 h1:/V2rCMMWcsjYaYO2MeovLw+ClP63OtXgCF2Y1eb8+Ns= @@ -1546,6 +1548,8 @@ k8s.io/apiserver v0.32.0 h1:VJ89ZvQZ8p1sLeiWdRJpRD6oLozNZD2+qVSLi+ft5Qs= k8s.io/apiserver v0.32.0/go.mod h1:HFh+dM1/BE/Hm4bS4nTXHVfN6Z6tFIZPi649n83b4Ag= k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= +k8s.io/component-base v0.32.0 h1:d6cWHZkCiiep41ObYQS6IcgzOUQUNpywm39KVYaUqzU= +k8s.io/component-base v0.32.0/go.mod h1:JLG2W5TUxUu5uDyKiH2R/7NnxJo1HlPoRIIbVLkK5eM= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= diff --git a/go.toolchain.rev b/go.toolchain.rev index 1fd4f3df25747..9ea6b37dcbc32 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -aa85d1541af0921f830f053f29d91971fa5838f6 +5c01b77ad0d27a8bd4ef89ef7e713fd7043c5a91 diff --git a/go.toolchain.rev.sri b/go.toolchain.rev.sri new file mode 100644 index 0000000000000..a62a525998ac7 --- /dev/null +++ b/go.toolchain.rev.sri @@ -0,0 +1 @@ +sha256-2TYziJLJrFOW2FehhahKficnDACJEwjuvVYyeQZbrcc= diff --git a/go.toolchain.version b/go.toolchain.version new file mode 100644 index 0000000000000..5bb76b575e1f5 --- /dev/null +++ b/go.toolchain.version @@ -0,0 +1 @@ +1.25.3 diff --git a/health/health.go b/health/health.go index 05887043814ea..cbfa599c56eaf 100644 --- a/health/health.go +++ b/health/health.go @@ -8,7 +8,6 @@ package health import ( "context" "errors" - "expvar" "fmt" "maps" "net/http" @@ -20,15 +19,13 @@ import ( "time" "tailscale.com/envknob" - "tailscale.com/metrics" + "tailscale.com/feature/buildfeatures" "tailscale.com/tailcfg" "tailscale.com/tstime" "tailscale.com/types/opt" "tailscale.com/util/cibuild" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" - "tailscale.com/util/multierr" - "tailscale.com/util/set" - "tailscale.com/util/usermetric" "tailscale.com/version" ) @@ -64,6 +61,21 @@ var receiveNames = []string{ // Tracker tracks the health of various Tailscale subsystems, // comparing each subsystems' state with each other to make sure // they're consistent based on the user's intended state. +// +// If a client [Warnable] becomes unhealthy or its unhealthy state is updated, +// an event will be emitted with WarnableChanged set to true and the Warnable +// and its UnhealthyState: +// +// Change{WarnableChanged: true, Warnable: w, UnhealthyState: us} +// +// If a Warnable becomes healthy, an event will be emitted with +// WarnableChanged set to true, the Warnable set, and UnhealthyState set to nil: +// +// Change{WarnableChanged: true, Warnable: w, UnhealthyState: nil} +// +// If the health messages from the control-plane change, an event will be +// emitted with ControlHealthChanged set to true. Recipients can fetch the set of +// control-plane health messages by calling [Tracker.CurrentState]: type Tracker struct { // MagicSockReceiveFuncs tracks the state of the three // magicsock receive functions: IPv4, IPv6, and DERP. @@ -76,6 +88,9 @@ type Tracker struct { testClock tstime.Clock // nil means use time.Now / tstime.StdClock{} + eventClient *eventbus.Client + changePub *eventbus.Publisher[Change] + // mu guards everything that follows. mu sync.Mutex @@ -87,9 +102,8 @@ type Tracker struct { // sysErr maps subsystems to their current error (or nil if the subsystem is healthy) // Deprecated: using Warnables should be preferred - sysErr map[Subsystem]error - watchers set.HandleSet[func(Change)] // opt func to run if error state changes - timer tstime.TimerController + sysErr map[Subsystem]error + timer tstime.TimerController latestVersion *tailcfg.ClientVersion // or nil checkForUpdates bool @@ -116,7 +130,41 @@ type Tracker struct { lastLoginErr error localLogConfigErr error tlsConnectionErrors map[string]error // map[ServerName]error - metricHealthMessage *metrics.MultiLabelMap[metricHealthMessageLabel] + metricHealthMessage any // nil or *metrics.MultiLabelMap[metricHealthMessageLabel] +} + +// NewTracker contructs a new [Tracker] and attaches the given eventbus. +// NewTracker will panic is no eventbus is given. +func NewTracker(bus *eventbus.Bus) *Tracker { + if !buildfeatures.HasHealth { + return &Tracker{} + } + if bus == nil { + panic("no eventbus set") + } + + ec := bus.Client("health.Tracker") + t := &Tracker{ + eventClient: ec, + changePub: eventbus.Publish[Change](ec), + } + t.timer = t.clock().AfterFunc(time.Minute, t.timerSelfCheck) + + ec.Monitor(t.awaitEventClientDone) + + return t +} + +func (t *Tracker) awaitEventClientDone(ec *eventbus.Client) { + <-ec.Done() + t.mu.Lock() + defer t.mu.Unlock() + + for _, timer := range t.pendingVisibleTimers { + timer.Stop() + } + t.timer.Stop() + clear(t.pendingVisibleTimers) } func (t *Tracker) now() time.Time { @@ -174,6 +222,9 @@ const legacyErrorArgKey = "LegacyError" // temporarily (2024-06-14) while we migrate the old health infrastructure based // on Subsystems to the new Warnables architecture. func (s Subsystem) Warnable() *Warnable { + if !buildfeatures.HasHealth { + return &noopWarnable + } w, ok := subsystemsWarnables[s] if !ok { panic(fmt.Sprintf("health: no Warnable for Subsystem %q", s)) @@ -183,10 +234,15 @@ func (s Subsystem) Warnable() *Warnable { var registeredWarnables = map[WarnableCode]*Warnable{} +var noopWarnable Warnable + // Register registers a new Warnable with the health package and returns it. // Register panics if the Warnable was already registered, because Warnables // should be unique across the program. func Register(w *Warnable) *Warnable { + if !buildfeatures.HasHealth { + return &noopWarnable + } if registeredWarnables[w.Code] != nil { panic(fmt.Sprintf("health: a Warnable with code %q was already registered", w.Code)) } @@ -198,6 +254,9 @@ func Register(w *Warnable) *Warnable { // unregister removes a Warnable from the health package. It should only be used // for testing purposes. func unregister(w *Warnable) { + if !buildfeatures.HasHealth { + return + } if registeredWarnables[w.Code] == nil { panic(fmt.Sprintf("health: attempting to unregister Warnable %q that was not registered", w.Code)) } @@ -270,6 +329,9 @@ func StaticMessage(s string) func(Args) string { // some lost Tracker plumbing, we want to capture stack trace // samples when it occurs. func (t *Tracker) nil() bool { + if !buildfeatures.HasHealth { + return true + } if t != nil { return false } @@ -338,37 +400,10 @@ func (w *Warnable) IsVisible(ws *warningState, clockNow func() time.Time) bool { return clockNow().Sub(ws.BrokenSince) >= w.TimeToVisible } -// SetMetricsRegistry sets up the metrics for the Tracker. It takes -// a usermetric.Registry and registers the metrics there. -func (t *Tracker) SetMetricsRegistry(reg *usermetric.Registry) { - if reg == nil || t.metricHealthMessage != nil { - return - } - - t.metricHealthMessage = usermetric.NewMultiLabelMapWithRegistry[metricHealthMessageLabel]( - reg, - "tailscaled_health_messages", - "gauge", - "Number of health messages broken down by type.", - ) - - t.metricHealthMessage.Set(metricHealthMessageLabel{ - Type: MetricLabelWarning, - }, expvar.Func(func() any { - if t.nil() { - return 0 - } - t.mu.Lock() - defer t.mu.Unlock() - t.updateBuiltinWarnablesLocked() - return int64(len(t.stringsLocked())) - })) -} - // IsUnhealthy reports whether the current state is unhealthy because the given // warnable is set. func (t *Tracker) IsUnhealthy(w *Warnable) bool { - if t.nil() { + if !buildfeatures.HasHealth || t.nil() { return false } t.mu.Lock() @@ -382,7 +417,7 @@ func (t *Tracker) IsUnhealthy(w *Warnable) bool { // SetUnhealthy takes ownership of args. The args can be nil if no additional information is // needed for the unhealthy state. func (t *Tracker) SetUnhealthy(w *Warnable, args Args) { - if t.nil() { + if !buildfeatures.HasHealth || t.nil() { return } t.mu.Lock() @@ -391,7 +426,7 @@ func (t *Tracker) SetUnhealthy(w *Warnable, args Args) { } func (t *Tracker) setUnhealthyLocked(w *Warnable, args Args) { - if w == nil { + if !buildfeatures.HasHealth || w == nil { return } @@ -418,25 +453,20 @@ func (t *Tracker) setUnhealthyLocked(w *Warnable, args Args) { Warnable: w, UnhealthyState: w.unhealthyState(ws), } - for _, cb := range t.watchers { - // If the Warnable has been unhealthy for more than its TimeToVisible, the callback should be - // executed immediately. Otherwise, the callback should be enqueued to run once the Warnable - // becomes visible. - if w.IsVisible(ws, t.now) { - cb(change) - continue - } - - // The time remaining until the Warnable will be visible to the user is the TimeToVisible - // minus the time that has already passed since the Warnable became unhealthy. + // Publish the change to the event bus. If the change is already visible + // now, publish it immediately; otherwise queue a timer to publish it at + // a future time when it becomes visible. + if w.IsVisible(ws, t.now) { + t.changePub.Publish(change) + } else { visibleIn := w.TimeToVisible - t.now().Sub(brokenSince) - var tc tstime.TimerController = t.clock().AfterFunc(visibleIn, func() { + tc := t.clock().AfterFunc(visibleIn, func() { t.mu.Lock() defer t.mu.Unlock() // Check if the Warnable is still unhealthy, as it could have become healthy between the time // the timer was set for and the time it was executed. if t.warnableVal[w] != nil { - cb(change) + t.changePub.Publish(change) delete(t.pendingVisibleTimers, w) } }) @@ -447,7 +477,7 @@ func (t *Tracker) setUnhealthyLocked(w *Warnable, args Args) { // SetHealthy removes any warningState for the given Warnable. func (t *Tracker) SetHealthy(w *Warnable) { - if t.nil() { + if !buildfeatures.HasHealth || t.nil() { return } t.mu.Lock() @@ -456,7 +486,7 @@ func (t *Tracker) SetHealthy(w *Warnable) { } func (t *Tracker) setHealthyLocked(w *Warnable) { - if t.warnableVal[w] == nil { + if !buildfeatures.HasHealth || t.warnableVal[w] == nil { // Nothing to remove return } @@ -473,9 +503,7 @@ func (t *Tracker) setHealthyLocked(w *Warnable) { WarnableChanged: true, Warnable: w, } - for _, cb := range t.watchers { - cb(change) - } + t.changePub.Publish(change) } // notifyWatchersControlChangedLocked calls each watcher to signal that control @@ -484,9 +512,7 @@ func (t *Tracker) notifyWatchersControlChangedLocked() { change := Change{ ControlHealthChanged: true, } - for _, cb := range t.watchers { - cb(change) - } + t.changePub.Publish(change) } // AppendWarnableDebugFlags appends to base any health items that are currently in failed @@ -531,62 +557,6 @@ type Change struct { UnhealthyState *UnhealthyState } -// RegisterWatcher adds a function that will be called its own goroutine -// whenever the health state of any client [Warnable] or control-plane health -// messages changes. The returned function can be used to unregister the -// callback. -// -// If a client [Warnable] becomes unhealthy or its unhealthy state is updated, -// the callback will be called with WarnableChanged set to true and the Warnable -// and its UnhealthyState: -// -// go cb(Change{WarnableChanged: true, Warnable: w, UnhealthyState: us}) -// -// If a Warnable becomes healthy, the callback will be called with -// WarnableChanged set to true, the Warnable set, and UnhealthyState set to nil: -// -// go cb(Change{WarnableChanged: true, Warnable: w, UnhealthyState: nil}) -// -// If the health messages from the control-plane change, the callback will be -// called with ControlHealthChanged set to true. Recipients can fetch the set of -// control-plane health messages by calling [Tracker.CurrentState]: -// -// go cb(Change{ControlHealthChanged: true}) -func (t *Tracker) RegisterWatcher(cb func(Change)) (unregister func()) { - return t.registerSyncWatcher(func(c Change) { - go cb(c) - }) -} - -// registerSyncWatcher adds a function that will be called whenever the health -// state changes. The provided callback function will be executed synchronously. -// Call RegisterWatcher to register any callbacks that won't return from -// execution immediately. -func (t *Tracker) registerSyncWatcher(cb func(c Change)) (unregister func()) { - if t.nil() { - return func() {} - } - t.initOnce.Do(t.doOnceInit) - t.mu.Lock() - defer t.mu.Unlock() - if t.watchers == nil { - t.watchers = set.HandleSet[func(Change)]{} - } - handle := t.watchers.Add(cb) - if t.timer == nil { - t.timer = t.clock().AfterFunc(time.Minute, t.timerSelfCheck) - } - return func() { - t.mu.Lock() - defer t.mu.Unlock() - delete(t.watchers, handle) - if len(t.watchers) == 0 && t.timer != nil { - t.timer.Stop() - t.timer = nil - } - } -} - // SetRouterHealth sets the state of the wgengine/router.Router. // // Deprecated: Warnables should be preferred over Subsystem errors. @@ -1009,8 +979,8 @@ func (t *Tracker) selfCheckLocked() { // OverallError returns a summary of the health state. // -// If there are multiple problems, the error will be of type -// multierr.Error. +// If there are multiple problems, the error will be joined using +// [errors.Join]. func (t *Tracker) OverallError() error { if t.nil() { return nil @@ -1027,7 +997,7 @@ func (t *Tracker) OverallError() error { // each Warning to show a localized version of them instead. This function is // here for legacy compatibility purposes and is deprecated. func (t *Tracker) Strings() []string { - if t.nil() { + if !buildfeatures.HasHealth || t.nil() { return nil } t.mu.Lock() @@ -1036,6 +1006,9 @@ func (t *Tracker) Strings() []string { } func (t *Tracker) stringsLocked() []string { + if !buildfeatures.HasHealth { + return nil + } result := []string{} for w, ws := range t.warnableVal { if !w.IsVisible(ws, t.now) { @@ -1088,7 +1061,7 @@ func (t *Tracker) errorsLocked() []error { // This function is here for legacy compatibility purposes and is deprecated. func (t *Tracker) multiErrLocked() error { errs := t.errorsLocked() - return multierr.New(errs...) + return errors.Join(errs...) } var fakeErrForTesting = envknob.RegisterString("TS_DEBUG_FAKE_HEALTH_ERROR") @@ -1096,6 +1069,9 @@ var fakeErrForTesting = envknob.RegisterString("TS_DEBUG_FAKE_HEALTH_ERROR") // updateBuiltinWarnablesLocked performs a number of checks on the state of the backend, // and adds/removes Warnings from the Tracker as needed. func (t *Tracker) updateBuiltinWarnablesLocked() { + if !buildfeatures.HasHealth { + return + } t.updateWarmingUpWarnableLocked() if w, show := t.showUpdateWarnable(); show { @@ -1334,11 +1310,17 @@ func (s *ReceiveFuncStats) Name() string { } func (s *ReceiveFuncStats) Enter() { + if !buildfeatures.HasHealth { + return + } s.numCalls.Add(1) s.inCall.Store(true) } func (s *ReceiveFuncStats) Exit() { + if !buildfeatures.HasHealth { + return + } s.inCall.Store(false) } @@ -1347,7 +1329,7 @@ func (s *ReceiveFuncStats) Exit() { // // If t is nil, it returns nil. func (t *Tracker) ReceiveFuncStats(which ReceiveFunc) *ReceiveFuncStats { - if t == nil { + if !buildfeatures.HasHealth || t == nil { return nil } t.initOnce.Do(t.doOnceInit) @@ -1355,6 +1337,9 @@ func (t *Tracker) ReceiveFuncStats(which ReceiveFunc) *ReceiveFuncStats { } func (t *Tracker) doOnceInit() { + if !buildfeatures.HasHealth { + return + } for i := range t.MagicSockReceiveFuncs { f := &t.MagicSockReceiveFuncs[i] f.name = (ReceiveFunc(i)).String() @@ -1403,10 +1388,3 @@ func (t *Tracker) LastNoiseDialWasRecent() bool { t.lastNoiseDial = now return dur < 2*time.Minute } - -const MetricLabelWarning = "warning" - -type metricHealthMessageLabel struct { - // TODO: break down by warnable.severity as well? - Type string -} diff --git a/health/health_test.go b/health/health_test.go index d66cea06c0f0b..af7d06c8fe258 100644 --- a/health/health_test.go +++ b/health/health_test.go @@ -4,26 +4,55 @@ package health import ( + "errors" + "flag" "fmt" "maps" "reflect" "slices" "strconv" "testing" + "testing/synctest" "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "tailscale.com/metrics" "tailscale.com/tailcfg" + "tailscale.com/tsconst" "tailscale.com/tstest" "tailscale.com/tstime" "tailscale.com/types/opt" + "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/usermetric" "tailscale.com/version" ) +var doDebug = flag.Bool("debug", false, "Enable debug logging") + +func wantChange(c Change) func(c Change) (bool, error) { + return func(cEv Change) (bool, error) { + if cEv.ControlHealthChanged != c.ControlHealthChanged { + return false, fmt.Errorf("expected ControlHealthChanged %t, got %t", c.ControlHealthChanged, cEv.ControlHealthChanged) + } + if cEv.WarnableChanged != c.WarnableChanged { + return false, fmt.Errorf("expected WarnableChanged %t, got %t", c.WarnableChanged, cEv.WarnableChanged) + } + if c.Warnable != nil && (cEv.Warnable == nil || cEv.Warnable != c.Warnable) { + return false, fmt.Errorf("expected Warnable %+v, got %+v", c.Warnable, cEv.Warnable) + } + + if c.UnhealthyState != nil { + panic("comparison of UnhealthyState is not yet supported") + } + + return true, nil + } +} + func TestAppendWarnableDebugFlags(t *testing.T) { - var tr Tracker + tr := NewTracker(eventbustest.NewBus(t)) for i := range 10 { w := Register(&Warnable{ @@ -68,7 +97,9 @@ func TestNilMethodsDontCrash(t *testing.T) { } func TestSetUnhealthyWithDuplicateThenHealthyAgain(t *testing.T) { - ht := Tracker{} + bus := eventbustest.NewBus(t) + watcher := eventbustest.NewWatcher(t, bus) + ht := NewTracker(bus) if len(ht.Strings()) != 0 { t.Fatalf("before first insertion, len(newTracker.Strings) = %d; want = 0", len(ht.Strings())) } @@ -92,10 +123,20 @@ func TestSetUnhealthyWithDuplicateThenHealthyAgain(t *testing.T) { if !reflect.DeepEqual(ht.Strings(), want) { t.Fatalf("after setting the healthy, newTracker.Strings() = %v; want = %v", ht.Strings(), want) } + + if err := eventbustest.ExpectExactly(watcher, + wantChange(Change{WarnableChanged: true, Warnable: testWarnable}), + wantChange(Change{WarnableChanged: true, Warnable: testWarnable}), + wantChange(Change{WarnableChanged: true, Warnable: testWarnable}), + ); err != nil { + t.Fatalf("expected events, got %q", err) + } } func TestRemoveAllWarnings(t *testing.T) { - ht := Tracker{} + bus := eventbustest.NewBus(t) + watcher := eventbustest.NewWatcher(t, bus) + ht := NewTracker(bus) if len(ht.Strings()) != 0 { t.Fatalf("before first insertion, len(newTracker.Strings) = %d; want = 0", len(ht.Strings())) } @@ -109,67 +150,96 @@ func TestRemoveAllWarnings(t *testing.T) { if len(ht.Strings()) != 0 { t.Fatalf("after RemoveAll, len(newTracker.Strings) = %d; want = 0", len(ht.Strings())) } + if err := eventbustest.ExpectExactly(watcher, + wantChange(Change{WarnableChanged: true, Warnable: testWarnable}), + wantChange(Change{WarnableChanged: true, Warnable: testWarnable}), + ); err != nil { + t.Fatalf("expected events, got %q", err) + } } // TestWatcher tests that a registered watcher function gets called with the correct // Warnable and non-nil/nil UnhealthyState upon setting a Warnable to unhealthy/healthy. func TestWatcher(t *testing.T) { - ht := Tracker{} - wantText := "Hello world" - becameUnhealthy := make(chan struct{}) - becameHealthy := make(chan struct{}) - - watcherFunc := func(c Change) { - w := c.Warnable - us := c.UnhealthyState - if w != testWarnable { - t.Fatalf("watcherFunc was called, but with an unexpected Warnable: %v, want: %v", w, testWarnable) - } + tests := []struct { + name string + preFunc func(t *testing.T, ht *Tracker, bus *eventbus.Bus, fn func(Change)) + }{ + { + name: "with-eventbus", + preFunc: func(_ *testing.T, _ *Tracker, bus *eventbus.Bus, fn func(c Change)) { + client := bus.Client("healthwatchertestclient") + sub := eventbus.Subscribe[Change](client) + go func() { + for { + select { + case <-sub.Done(): + return + case change := <-sub.Events(): + fn(change) + } + } + }() + }, + }, + } - if us != nil { - if us.Text != wantText { - t.Fatalf("unexpected us.Text: %s, want: %s", us.Text, wantText) - } - if us.Args[ArgError] != wantText { - t.Fatalf("unexpected us.Args[ArgError]: %s, want: %s", us.Args[ArgError], wantText) + for _, tt := range tests { + t.Run(tt.name, func(*testing.T) { + bus := eventbustest.NewBus(t) + ht := NewTracker(bus) + wantText := "Hello world" + becameUnhealthy := make(chan struct{}) + becameHealthy := make(chan struct{}) + + watcherFunc := func(c Change) { + w := c.Warnable + us := c.UnhealthyState + if w != testWarnable { + t.Fatalf("watcherFunc was called, but with an unexpected Warnable: %v, want: %v", w, testWarnable) + } + + if us != nil { + if us.Text != wantText { + t.Fatalf("unexpected us.Text: %q, want: %s", us.Text, wantText) + } + if us.Args[ArgError] != wantText { + t.Fatalf("unexpected us.Args[ArgError]: %q, want: %s", us.Args[ArgError], wantText) + } + becameUnhealthy <- struct{}{} + } else { + becameHealthy <- struct{}{} + } } - becameUnhealthy <- struct{}{} - } else { - becameHealthy <- struct{}{} - } - } - unregisterFunc := ht.RegisterWatcher(watcherFunc) - if len(ht.watchers) != 1 { - t.Fatalf("after RegisterWatcher, len(newTracker.watchers) = %d; want = 1", len(ht.watchers)) - } - ht.SetUnhealthy(testWarnable, Args{ArgError: wantText}) + // Set up test + tt.preFunc(t, ht, bus, watcherFunc) - select { - case <-becameUnhealthy: - // Test passed because the watcher got notified of an unhealthy state - case <-becameHealthy: - // Test failed because the watcher got of a healthy state instead of an unhealthy one - t.Fatalf("watcherFunc was called with a healthy state") - case <-time.After(1 * time.Second): - t.Fatalf("watcherFunc didn't get called upon calling SetUnhealthy") - } + // Start running actual test + ht.SetUnhealthy(testWarnable, Args{ArgError: wantText}) - ht.SetHealthy(testWarnable) + select { + case <-becameUnhealthy: + // Test passed because the watcher got notified of an unhealthy state + case <-becameHealthy: + // Test failed because the watcher got of a healthy state instead of an unhealthy one + t.Fatalf("watcherFunc was called with a healthy state") + case <-time.After(5 * time.Second): + t.Fatalf("watcherFunc didn't get called upon calling SetUnhealthy") + } - select { - case <-becameUnhealthy: - // Test failed because the watcher got of an unhealthy state instead of a healthy one - t.Fatalf("watcherFunc was called with an unhealthy state") - case <-becameHealthy: - // Test passed because the watcher got notified of a healthy state - case <-time.After(1 * time.Second): - t.Fatalf("watcherFunc didn't get called upon calling SetUnhealthy") - } + ht.SetHealthy(testWarnable) - unregisterFunc() - if len(ht.watchers) != 0 { - t.Fatalf("after unregisterFunc, len(newTracker.watchers) = %d; want = 0", len(ht.watchers)) + select { + case <-becameUnhealthy: + // Test failed because the watcher got of an unhealthy state instead of a healthy one + t.Fatalf("watcherFunc was called with an unhealthy state") + case <-becameHealthy: + // Test passed because the watcher got notified of a healthy state + case <-time.After(5 * time.Second): + t.Fatalf("watcherFunc didn't get called upon calling SetUnhealthy") + } + }) } } @@ -178,45 +248,72 @@ func TestWatcher(t *testing.T) { // has a TimeToVisible set, which means that a watcher should only be notified of an unhealthy state after // the TimeToVisible duration has passed. func TestSetUnhealthyWithTimeToVisible(t *testing.T) { - ht := Tracker{} - mw := Register(&Warnable{ - Code: "test-warnable-3-secs-to-visible", - Title: "Test Warnable with 3 seconds to visible", - Text: StaticMessage("Hello world"), - TimeToVisible: 2 * time.Second, - ImpactsConnectivity: true, - }) - defer unregister(mw) - - becameUnhealthy := make(chan struct{}) - becameHealthy := make(chan struct{}) + tests := []struct { + name string + preFunc func(t *testing.T, ht *Tracker, bus *eventbus.Bus, fn func(Change)) + }{ + { + name: "with-eventbus", + preFunc: func(_ *testing.T, _ *Tracker, bus *eventbus.Bus, fn func(c Change)) { + client := bus.Client("healthwatchertestclient") + sub := eventbus.Subscribe[Change](client) + go func() { + for { + select { + case <-sub.Done(): + return + case change := <-sub.Events(): + fn(change) + } + } + }() + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(*testing.T) { + bus := eventbustest.NewBus(t) + ht := NewTracker(bus) + mw := Register(&Warnable{ + Code: "test-warnable-3-secs-to-visible", + Title: "Test Warnable with 3 seconds to visible", + Text: StaticMessage("Hello world"), + TimeToVisible: 2 * time.Second, + ImpactsConnectivity: true, + }) - watchFunc := func(c Change) { - w := c.Warnable - us := c.UnhealthyState - if w != mw { - t.Fatalf("watcherFunc was called, but with an unexpected Warnable: %v, want: %v", w, w) - } + becameUnhealthy := make(chan struct{}) + becameHealthy := make(chan struct{}) - if us != nil { - becameUnhealthy <- struct{}{} - } else { - becameHealthy <- struct{}{} - } - } + watchFunc := func(c Change) { + w := c.Warnable + us := c.UnhealthyState + if w != mw { + t.Fatalf("watcherFunc was called, but with an unexpected Warnable: %v, want: %v", w, w) + } - ht.RegisterWatcher(watchFunc) - ht.SetUnhealthy(mw, Args{ArgError: "Hello world"}) + if us != nil { + becameUnhealthy <- struct{}{} + } else { + becameHealthy <- struct{}{} + } + } - select { - case <-becameUnhealthy: - // Test failed because the watcher got notified of an unhealthy state - t.Fatalf("watcherFunc was called with an unhealthy state") - case <-becameHealthy: - // Test failed because the watcher got of a healthy state - t.Fatalf("watcherFunc was called with a healthy state") - case <-time.After(1 * time.Second): - // As expected, watcherFunc still had not been called after 1 second + tt.preFunc(t, ht, bus, watchFunc) + ht.SetUnhealthy(mw, Args{ArgError: "Hello world"}) + + select { + case <-becameUnhealthy: + // Test failed because the watcher got notified of an unhealthy state + t.Fatalf("watcherFunc was called with an unhealthy state") + case <-becameHealthy: + // Test failed because the watcher got of a healthy state + t.Fatalf("watcherFunc was called with a healthy state") + case <-time.After(1 * time.Second): + // As expected, watcherFunc still had not been called after 1 second + } + unregister(mw) + }) } } @@ -242,7 +339,7 @@ func TestRegisterWarnablePanicsWithDuplicate(t *testing.T) { // TestCheckDependsOnAppearsInUnhealthyState asserts that the DependsOn field in the UnhealthyState // is populated with the WarnableCode(s) of the Warnable(s) that a warning depends on. func TestCheckDependsOnAppearsInUnhealthyState(t *testing.T) { - ht := Tracker{} + ht := NewTracker(eventbustest.NewBus(t)) w1 := Register(&Warnable{ Code: "w1", Text: StaticMessage("W1 Text"), @@ -352,11 +449,11 @@ func TestShowUpdateWarnable(t *testing.T) { } for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { - tr := &Tracker{ - checkForUpdates: tt.check, - applyUpdates: tt.apply, - latestVersion: tt.cv, - } + tr := NewTracker(eventbustest.NewBus(t)) + tr.checkForUpdates = tt.check + tr.applyUpdates = tt.apply + tr.latestVersion = tt.cv + gotWarnable, gotShow := tr.showUpdateWarnable() if gotWarnable != tt.wantWarnable { t.Errorf("got warnable: %v, want: %v", gotWarnable, tt.wantWarnable) @@ -401,13 +498,16 @@ func TestHealthMetric(t *testing.T) { } for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { - tr := &Tracker{ - checkForUpdates: tt.check, - applyUpdates: tt.apply, - latestVersion: tt.cv, - } + tr := NewTracker(eventbustest.NewBus(t)) + tr.checkForUpdates = tt.check + tr.applyUpdates = tt.apply + tr.latestVersion = tt.cv tr.SetMetricsRegistry(&usermetric.Registry{}) - if val := tr.metricHealthMessage.Get(metricHealthMessageLabel{Type: MetricLabelWarning}).String(); val != strconv.Itoa(tt.wantMetricCount) { + m, ok := tr.metricHealthMessage.(*metrics.MultiLabelMap[metricHealthMessageLabel]) + if !ok { + t.Fatal("metricHealthMessage has wrong type or is nil") + } + if val := m.Get(metricHealthMessageLabel{Type: MetricLabelWarning}).String(); val != strconv.Itoa(tt.wantMetricCount) { t.Fatalf("metric value: %q, want: %q", val, strconv.Itoa(tt.wantMetricCount)) } for _, w := range tr.CurrentState().Warnings { @@ -426,9 +526,8 @@ func TestNoDERPHomeWarnable(t *testing.T) { Start: time.Unix(123, 0), FollowRealTime: false, }) - ht := &Tracker{ - testClock: clock, - } + ht := NewTracker(eventbustest.NewBus(t)) + ht.testClock = clock ht.SetIPNState("NeedsLogin", true) // Advance 30 seconds to get past the "recentlyLoggedIn" check. @@ -448,7 +547,7 @@ func TestNoDERPHomeWarnable(t *testing.T) { // but doesn't use tstest.Clock so avoids the deadlock // I hit: https://github.com/tailscale/tailscale/issues/14798 func TestNoDERPHomeWarnableManual(t *testing.T) { - ht := &Tracker{} + ht := NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) // Avoid wantRunning: @@ -462,7 +561,7 @@ func TestNoDERPHomeWarnableManual(t *testing.T) { } func TestControlHealth(t *testing.T) { - ht := Tracker{} + ht := NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() @@ -545,7 +644,11 @@ func TestControlHealth(t *testing.T) { var r usermetric.Registry ht.SetMetricsRegistry(&r) - got := ht.metricHealthMessage.Get(metricHealthMessageLabel{ + m, ok := ht.metricHealthMessage.(*metrics.MultiLabelMap[metricHealthMessageLabel]) + if !ok { + t.Fatal("metricHealthMessage has wrong type or is nil") + } + got := m.Get(metricHealthMessageLabel{ Type: MetricLabelWarning, }).String() want := strconv.Itoa( @@ -562,7 +665,7 @@ func TestControlHealthNotifies(t *testing.T) { name string initialState map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage newState map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage - wantNotify bool + wantEvents []any } tests := []test{ { @@ -573,7 +676,7 @@ func TestControlHealthNotifies(t *testing.T) { newState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ "test": {}, }, - wantNotify: false, + wantEvents: []any{}, }, { name: "on-set", @@ -581,7 +684,9 @@ func TestControlHealthNotifies(t *testing.T) { newState: map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ "test": {}, }, - wantNotify: true, + wantEvents: []any{ + eventbustest.Type[Change](), + }, }, { name: "details-change", @@ -595,7 +700,9 @@ func TestControlHealthNotifies(t *testing.T) { Title: "Updated title", }, }, - wantNotify: true, + wantEvents: []any{ + eventbustest.Type[Change](), + }, }, { name: "action-changes", @@ -615,63 +722,119 @@ func TestControlHealthNotifies(t *testing.T) { }, }, }, - wantNotify: true, + wantEvents: []any{ + eventbustest.Type[Change](), + }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ht := Tracker{} - ht.SetIPNState("NeedsLogin", true) - ht.GotStreamedMapResponse() + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) + if *doDebug { + eventbustest.LogAllEvents(t, bus) + } + tw := eventbustest.NewWatcher(t, bus) + + ht := NewTracker(bus) + ht.SetIPNState("NeedsLogin", true) + ht.GotStreamedMapResponse() + + // Expect events at starup, before doing anything else, skip unstable + // event and no warning event as they show up at different times. + synctest.Wait() + if err := eventbustest.Expect(tw, + CompareWarnableCode(t, tsconst.HealthWarnableWarmingUp), + CompareWarnableCode(t, tsconst.HealthWarnableNotInMapPoll), + CompareWarnableCode(t, tsconst.HealthWarnableWarmingUp), + ); err != nil { + t.Errorf("startup error: %v", err) + } - if len(test.initialState) != 0 { - ht.SetControlHealth(test.initialState) - } + // Only set initial state if we need to + if len(test.initialState) != 0 { + t.Log("Setting initial state") + ht.SetControlHealth(test.initialState) + synctest.Wait() + if err := eventbustest.Expect(tw, + CompareWarnableCode(t, tsconst.HealthWarnableMagicsockReceiveFuncError), + // Skip event with no warnable + CompareWarnableCode(t, tsconst.HealthWarnableNoDERPHome), + ); err != nil { + t.Errorf("initial state error: %v", err) + } + } - gotNotified := false - ht.registerSyncWatcher(func(_ Change) { - gotNotified = true - }) + ht.SetControlHealth(test.newState) + // Close the bus early to avoid timers triggering more events. + bus.Close() - ht.SetControlHealth(test.newState) + synctest.Wait() + if err := eventbustest.ExpectExactly(tw, test.wantEvents...); err != nil { + t.Errorf("event error: %v", err) + } + }) + }) + } +} - if gotNotified != test.wantNotify { - t.Errorf("notified: got %v, want %v", gotNotified, test.wantNotify) +func CompareWarnableCode(t *testing.T, code string) func(Change) bool { + t.Helper() + return func(c Change) bool { + t.Helper() + if c.Warnable != nil { + t.Logf("Warnable code: %s", c.Warnable.Code) + if string(c.Warnable.Code) == code { + return true } - }) + } else { + t.Log("No Warnable") + } + return false } } func TestControlHealthIgnoredOutsideMapPoll(t *testing.T) { - ht := Tracker{} - ht.SetIPNState("NeedsLogin", true) + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + ht := NewTracker(bus) + ht.SetIPNState("NeedsLogin", true) - gotNotified := false - ht.registerSyncWatcher(func(_ Change) { - gotNotified = true - }) + ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ + "control-health": {}, + }) - ht.SetControlHealth(map[tailcfg.DisplayMessageID]tailcfg.DisplayMessage{ - "control-health": {}, - }) + state := ht.CurrentState() + _, ok := state.Warnings["control-health"] - state := ht.CurrentState() - _, ok := state.Warnings["control-health"] + if ok { + t.Error("got a warning with code 'control-health', want none") + } - if ok { - t.Error("got a warning with code 'control-health', want none") - } + // An event is emitted when SetIPNState is run above, + // so only fail on the second event. + eventCounter := 0 + expectOne := func(c *Change) error { + eventCounter++ + if eventCounter == 1 { + return nil + } + return errors.New("saw more than 1 event") + } - if gotNotified { - t.Error("watcher got called, want it to not be called") - } + synctest.Wait() + if err := eventbustest.Expect(tw, expectOne); err == nil { + t.Error("event got emitted, want it to not be called") + } + }) } // TestCurrentStateETagControlHealth tests that the ETag on an [UnhealthyState] // created from Control health & returned by [Tracker.CurrentState] is different // when the details of the [tailcfg.DisplayMessage] are different. func TestCurrentStateETagControlHealth(t *testing.T) { - ht := Tracker{} + ht := NewTracker(eventbustest.NewBus(t)) ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() @@ -776,9 +939,8 @@ func TestCurrentStateETagControlHealth(t *testing.T) { // when the details of the Warnable are different. func TestCurrentStateETagWarnable(t *testing.T) { newTracker := func(clock tstime.Clock) *Tracker { - ht := &Tracker{ - testClock: clock, - } + ht := NewTracker(eventbustest.NewBus(t)) + ht.testClock = clock ht.SetIPNState("NeedsLogin", true) ht.GotStreamedMapResponse() return ht diff --git a/health/state.go b/health/state.go index 116518629f27e..e6d937b6a8f02 100644 --- a/health/state.go +++ b/health/state.go @@ -9,11 +9,15 @@ import ( "encoding/json" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/tailcfg" ) // State contains the health status of the backend, and is // provided to the client UI via LocalAPI through ipn.Notify. +// +// It is also exposed via c2n for debugging purposes, so try +// not to change its structure too gratuitously. type State struct { // Each key-value pair in Warnings represents a Warnable that is currently // unhealthy. If a Warnable is healthy, it will not be present in this map. @@ -117,7 +121,7 @@ func (w *Warnable) unhealthyState(ws *warningState) *UnhealthyState { // The returned State is a snapshot of shared memory, and the caller should not // mutate the returned value. func (t *Tracker) CurrentState() *State { - if t.nil() { + if !buildfeatures.HasHealth || t.nil() { return &State{} } diff --git a/health/usermetrics.go b/health/usermetrics.go new file mode 100644 index 0000000000000..110c57b57971c --- /dev/null +++ b/health/usermetrics.go @@ -0,0 +1,52 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_health && !ts_omit_usermetrics + +package health + +import ( + "expvar" + + "tailscale.com/feature/buildfeatures" + "tailscale.com/util/usermetric" +) + +const MetricLabelWarning = "warning" + +type metricHealthMessageLabel struct { + // TODO: break down by warnable.severity as well? + Type string +} + +// SetMetricsRegistry sets up the metrics for the Tracker. It takes +// a usermetric.Registry and registers the metrics there. +func (t *Tracker) SetMetricsRegistry(reg *usermetric.Registry) { + if !buildfeatures.HasHealth { + return + } + + if reg == nil || t.metricHealthMessage != nil { + return + } + + m := usermetric.NewMultiLabelMapWithRegistry[metricHealthMessageLabel]( + reg, + "tailscaled_health_messages", + "gauge", + "Number of health messages broken down by type.", + ) + + m.Set(metricHealthMessageLabel{ + Type: MetricLabelWarning, + }, expvar.Func(func() any { + if t.nil() { + return 0 + } + t.mu.Lock() + defer t.mu.Unlock() + t.updateBuiltinWarnablesLocked() + return int64(len(t.stringsLocked())) + })) + t.metricHealthMessage = m +} diff --git a/health/usermetrics_omit.go b/health/usermetrics_omit.go new file mode 100644 index 0000000000000..9d5e35b861681 --- /dev/null +++ b/health/usermetrics_omit.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_health || ts_omit_usermetrics + +package health + +func (t *Tracker) SetMetricsRegistry(any) {} diff --git a/health/warnings.go b/health/warnings.go index 3997e66b39ad0..a9c4b34a0f849 100644 --- a/health/warnings.go +++ b/health/warnings.go @@ -8,234 +8,279 @@ import ( "runtime" "time" + "tailscale.com/feature/buildfeatures" + "tailscale.com/tsconst" "tailscale.com/version" ) +func condRegister(f func() *Warnable) *Warnable { + if !buildfeatures.HasHealth { + return nil + } + return f() +} + /** This file contains definitions for the Warnables maintained within this `health` package. */ // updateAvailableWarnable is a Warnable that warns the user that an update is available. -var updateAvailableWarnable = Register(&Warnable{ - Code: "update-available", - Title: "Update available", - Severity: SeverityLow, - Text: func(args Args) string { - if version.IsMacAppStore() || version.IsAppleTV() || version.IsMacSys() || version.IsWindowsGUI() || runtime.GOOS == "android" { - return fmt.Sprintf("An update from version %s to %s is available.", args[ArgCurrentVersion], args[ArgAvailableVersion]) - } else { - return fmt.Sprintf("An update from version %s to %s is available. Run `tailscale update` or `tailscale set --auto-update` to update now.", args[ArgCurrentVersion], args[ArgAvailableVersion]) - } - }, +var updateAvailableWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableUpdateAvailable, + Title: "Update available", + Severity: SeverityLow, + Text: func(args Args) string { + if version.IsMacAppStore() || version.IsAppleTV() || version.IsMacSys() || version.IsWindowsGUI() || runtime.GOOS == "android" { + return fmt.Sprintf("An update from version %s to %s is available.", args[ArgCurrentVersion], args[ArgAvailableVersion]) + } else { + return fmt.Sprintf("An update from version %s to %s is available. Run `tailscale update` or `tailscale set --auto-update` to update now.", args[ArgCurrentVersion], args[ArgAvailableVersion]) + } + }, + } }) // securityUpdateAvailableWarnable is a Warnable that warns the user that an important security update is available. -var securityUpdateAvailableWarnable = Register(&Warnable{ - Code: "security-update-available", - Title: "Security update available", - Severity: SeverityMedium, - Text: func(args Args) string { - if version.IsMacAppStore() || version.IsAppleTV() || version.IsMacSys() || version.IsWindowsGUI() || runtime.GOOS == "android" { - return fmt.Sprintf("A security update from version %s to %s is available.", args[ArgCurrentVersion], args[ArgAvailableVersion]) - } else { - return fmt.Sprintf("A security update from version %s to %s is available. Run `tailscale update` or `tailscale set --auto-update` to update now.", args[ArgCurrentVersion], args[ArgAvailableVersion]) - } - }, +var securityUpdateAvailableWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableSecurityUpdateAvailable, + Title: "Security update available", + Severity: SeverityMedium, + Text: func(args Args) string { + if version.IsMacAppStore() || version.IsAppleTV() || version.IsMacSys() || version.IsWindowsGUI() || runtime.GOOS == "android" { + return fmt.Sprintf("A security update from version %s to %s is available.", args[ArgCurrentVersion], args[ArgAvailableVersion]) + } else { + return fmt.Sprintf("A security update from version %s to %s is available. Run `tailscale update` or `tailscale set --auto-update` to update now.", args[ArgCurrentVersion], args[ArgAvailableVersion]) + } + }, + } }) // unstableWarnable is a Warnable that warns the user that they are using an unstable version of Tailscale // so they won't be surprised by all the issues that may arise. -var unstableWarnable = Register(&Warnable{ - Code: "is-using-unstable-version", - Title: "Using an unstable version", - Severity: SeverityLow, - Text: StaticMessage("This is an unstable version of Tailscale meant for testing and development purposes. Please report any issues to Tailscale."), +var unstableWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableIsUsingUnstableVersion, + Title: "Using an unstable version", + Severity: SeverityLow, + Text: StaticMessage("This is an unstable version of Tailscale meant for testing and development purposes. Please report any issues to Tailscale."), + } }) // NetworkStatusWarnable is a Warnable that warns the user that the network is down. -var NetworkStatusWarnable = Register(&Warnable{ - Code: "network-status", - Title: "Network down", - Severity: SeverityMedium, - Text: StaticMessage("Tailscale cannot connect because the network is down. Check your Internet connection."), - ImpactsConnectivity: true, - TimeToVisible: 5 * time.Second, +var NetworkStatusWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableNetworkStatus, + Title: "Network down", + Severity: SeverityMedium, + Text: StaticMessage("Tailscale cannot connect because the network is down. Check your Internet connection."), + ImpactsConnectivity: true, + TimeToVisible: 5 * time.Second, + } }) // IPNStateWarnable is a Warnable that warns the user that Tailscale is stopped. -var IPNStateWarnable = Register(&Warnable{ - Code: "wantrunning-false", - Title: "Tailscale off", - Severity: SeverityLow, - Text: StaticMessage("Tailscale is stopped."), +var IPNStateWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableWantRunningFalse, + Title: "Tailscale off", + Severity: SeverityLow, + Text: StaticMessage("Tailscale is stopped."), + } }) // localLogWarnable is a Warnable that warns the user that the local log is misconfigured. -var localLogWarnable = Register(&Warnable{ - Code: "local-log-config-error", - Title: "Local log misconfiguration", - Severity: SeverityLow, - Text: func(args Args) string { - return fmt.Sprintf("The local log is misconfigured: %v", args[ArgError]) - }, +var localLogWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableLocalLogConfigError, + Title: "Local log misconfiguration", + Severity: SeverityLow, + Text: func(args Args) string { + return fmt.Sprintf("The local log is misconfigured: %v", args[ArgError]) + }, + } }) // LoginStateWarnable is a Warnable that warns the user that they are logged out, // and provides the last login error if available. -var LoginStateWarnable = Register(&Warnable{ - Code: "login-state", - Title: "Logged out", - Severity: SeverityMedium, - Text: func(args Args) string { - if args[ArgError] != "" { - return fmt.Sprintf("You are logged out. The last login error was: %v", args[ArgError]) - } else { - return "You are logged out." - } - }, - DependsOn: []*Warnable{IPNStateWarnable}, +var LoginStateWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableLoginState, + Title: "Logged out", + Severity: SeverityMedium, + Text: func(args Args) string { + if args[ArgError] != "" { + return fmt.Sprintf("You are logged out. The last login error was: %v", args[ArgError]) + } else { + return "You are logged out." + } + }, + DependsOn: []*Warnable{IPNStateWarnable}, + } }) // notInMapPollWarnable is a Warnable that warns the user that we are using a stale network map. -var notInMapPollWarnable = Register(&Warnable{ - Code: "not-in-map-poll", - Title: "Out of sync", - Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, - Text: StaticMessage("Unable to connect to the Tailscale coordination server to synchronize the state of your tailnet. Peer reachability might degrade over time."), - // 8 minutes reflects a maximum maintenance window for the coordination server. - TimeToVisible: 8 * time.Minute, +var notInMapPollWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableNotInMapPoll, + Title: "Out of sync", + Severity: SeverityMedium, + DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, + Text: StaticMessage("Unable to connect to the Tailscale coordination server to synchronize the state of your tailnet. Peer reachability might degrade over time."), + // 8 minutes reflects a maximum maintenance window for the coordination server. + TimeToVisible: 8 * time.Minute, + } }) // noDERPHomeWarnable is a Warnable that warns the user that Tailscale doesn't have a home DERP. -var noDERPHomeWarnable = Register(&Warnable{ - Code: "no-derp-home", - Title: "No home relay server", - Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable}, - Text: StaticMessage("Tailscale could not connect to any relay server. Check your Internet connection."), - ImpactsConnectivity: true, - TimeToVisible: 10 * time.Second, +var noDERPHomeWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableNoDERPHome, + Title: "No home relay server", + Severity: SeverityMedium, + DependsOn: []*Warnable{NetworkStatusWarnable}, + Text: StaticMessage("Tailscale could not connect to any relay server. Check your Internet connection."), + ImpactsConnectivity: true, + TimeToVisible: 10 * time.Second, + } }) // noDERPConnectionWarnable is a Warnable that warns the user that Tailscale couldn't connect to a specific DERP server. -var noDERPConnectionWarnable = Register(&Warnable{ - Code: "no-derp-connection", - Title: "Relay server unavailable", - Severity: SeverityMedium, - DependsOn: []*Warnable{ - NetworkStatusWarnable, - - // Technically noDERPConnectionWarnable could be used to warn about - // failure to connect to a specific DERP server (e.g. your home is derp1 - // but you're trying to connect to a peer's derp4 and are unable) but as - // of 2024-09-25 we only use this for connecting to your home DERP, so - // we depend on noDERPHomeWarnable which is the ability to figure out - // what your DERP home even is. - noDERPHomeWarnable, - }, - Text: func(args Args) string { - if n := args[ArgDERPRegionName]; n != "" { - return fmt.Sprintf("Tailscale could not connect to the '%s' relay server. Your Internet connection might be down, or the server might be temporarily unavailable.", n) - } else { - return fmt.Sprintf("Tailscale could not connect to the relay server with ID '%s'. Your Internet connection might be down, or the server might be temporarily unavailable.", args[ArgDERPRegionID]) - } - }, - ImpactsConnectivity: true, - TimeToVisible: 10 * time.Second, +var noDERPConnectionWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableNoDERPConnection, + Title: "Relay server unavailable", + Severity: SeverityMedium, + DependsOn: []*Warnable{ + NetworkStatusWarnable, + + // Technically noDERPConnectionWarnable could be used to warn about + // failure to connect to a specific DERP server (e.g. your home is derp1 + // but you're trying to connect to a peer's derp4 and are unable) but as + // of 2024-09-25 we only use this for connecting to your home DERP, so + // we depend on noDERPHomeWarnable which is the ability to figure out + // what your DERP home even is. + noDERPHomeWarnable, + }, + Text: func(args Args) string { + if n := args[ArgDERPRegionName]; n != "" { + return fmt.Sprintf("Tailscale could not connect to the '%s' relay server. Your Internet connection might be down, or the server might be temporarily unavailable.", n) + } else { + return fmt.Sprintf("Tailscale could not connect to the relay server with ID '%s'. Your Internet connection might be down, or the server might be temporarily unavailable.", args[ArgDERPRegionID]) + } + }, + ImpactsConnectivity: true, + TimeToVisible: 10 * time.Second, + } }) // derpTimeoutWarnable is a Warnable that warns the user that Tailscale hasn't // heard from the home DERP region for a while. -var derpTimeoutWarnable = Register(&Warnable{ - Code: "derp-timed-out", - Title: "Relay server timed out", - Severity: SeverityMedium, - DependsOn: []*Warnable{ - NetworkStatusWarnable, - noDERPConnectionWarnable, // don't warn about it being stalled if we're not connected - noDERPHomeWarnable, // same reason as noDERPConnectionWarnable's dependency - }, - Text: func(args Args) string { - if n := args[ArgDERPRegionName]; n != "" { - return fmt.Sprintf("Tailscale hasn't heard from the '%s' relay server in %v. The server might be temporarily unavailable, or your Internet connection might be down.", n, args[ArgDuration]) - } else { - return fmt.Sprintf("Tailscale hasn't heard from the home relay server (region ID '%v') in %v. The server might be temporarily unavailable, or your Internet connection might be down.", args[ArgDERPRegionID], args[ArgDuration]) - } - }, +var derpTimeoutWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableDERPTimedOut, + Title: "Relay server timed out", + Severity: SeverityMedium, + DependsOn: []*Warnable{ + NetworkStatusWarnable, + noDERPConnectionWarnable, // don't warn about it being stalled if we're not connected + noDERPHomeWarnable, // same reason as noDERPConnectionWarnable's dependency + }, + Text: func(args Args) string { + if n := args[ArgDERPRegionName]; n != "" { + return fmt.Sprintf("Tailscale hasn't heard from the '%s' relay server in %v. The server might be temporarily unavailable, or your Internet connection might be down.", n, args[ArgDuration]) + } else { + return fmt.Sprintf("Tailscale hasn't heard from the home relay server (region ID '%v') in %v. The server might be temporarily unavailable, or your Internet connection might be down.", args[ArgDERPRegionID], args[ArgDuration]) + } + }, + } }) // derpRegionErrorWarnable is a Warnable that warns the user that a DERP region is reporting an issue. -var derpRegionErrorWarnable = Register(&Warnable{ - Code: "derp-region-error", - Title: "Relay server error", - Severity: SeverityLow, - DependsOn: []*Warnable{NetworkStatusWarnable}, - Text: func(args Args) string { - return fmt.Sprintf("The relay server #%v is reporting an issue: %v", args[ArgDERPRegionID], args[ArgError]) - }, +var derpRegionErrorWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableDERPRegionError, + Title: "Relay server error", + Severity: SeverityLow, + DependsOn: []*Warnable{NetworkStatusWarnable}, + Text: func(args Args) string { + return fmt.Sprintf("The relay server #%v is reporting an issue: %v", args[ArgDERPRegionID], args[ArgError]) + }, + } }) // noUDP4BindWarnable is a Warnable that warns the user that Tailscale couldn't listen for incoming UDP connections. -var noUDP4BindWarnable = Register(&Warnable{ - Code: "no-udp4-bind", - Title: "NAT traversal setup failure", - Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, - Text: StaticMessage("Tailscale couldn't listen for incoming UDP connections."), - ImpactsConnectivity: true, +var noUDP4BindWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableNoUDP4Bind, + Title: "NAT traversal setup failure", + Severity: SeverityMedium, + DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, + Text: StaticMessage("Tailscale couldn't listen for incoming UDP connections."), + ImpactsConnectivity: true, + } }) // mapResponseTimeoutWarnable is a Warnable that warns the user that Tailscale hasn't received a network map from the coordination server in a while. -var mapResponseTimeoutWarnable = Register(&Warnable{ - Code: "mapresponse-timeout", - Title: "Network map response timeout", - Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, - Text: func(args Args) string { - return fmt.Sprintf("Tailscale hasn't received a network map from the coordination server in %s.", args[ArgDuration]) - }, +var mapResponseTimeoutWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableMapResponseTimeout, + Title: "Network map response timeout", + Severity: SeverityMedium, + DependsOn: []*Warnable{NetworkStatusWarnable, IPNStateWarnable}, + Text: func(args Args) string { + return fmt.Sprintf("Tailscale hasn't received a network map from the coordination server in %s.", args[ArgDuration]) + }, + } }) // tlsConnectionFailedWarnable is a Warnable that warns the user that Tailscale could not establish an encrypted connection with a server. -var tlsConnectionFailedWarnable = Register(&Warnable{ - Code: "tls-connection-failed", - Title: "Encrypted connection failed", - Severity: SeverityMedium, - DependsOn: []*Warnable{NetworkStatusWarnable}, - Text: func(args Args) string { - return fmt.Sprintf("Tailscale could not establish an encrypted connection with '%q': %v", args[ArgServerName], args[ArgError]) - }, +var tlsConnectionFailedWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableTLSConnectionFailed, + Title: "Encrypted connection failed", + Severity: SeverityMedium, + DependsOn: []*Warnable{NetworkStatusWarnable}, + Text: func(args Args) string { + return fmt.Sprintf("Tailscale could not establish an encrypted connection with '%q': %v", args[ArgServerName], args[ArgError]) + }, + } }) // magicsockReceiveFuncWarnable is a Warnable that warns the user that one of the Magicsock functions is not running. -var magicsockReceiveFuncWarnable = Register(&Warnable{ - Code: "magicsock-receive-func-error", - Title: "MagicSock function not running", - Severity: SeverityMedium, - Text: func(args Args) string { - return fmt.Sprintf("The MagicSock function %s is not running. You might experience connectivity issues.", args[ArgMagicsockFunctionName]) - }, +var magicsockReceiveFuncWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableMagicsockReceiveFuncError, + Title: "MagicSock function not running", + Severity: SeverityMedium, + Text: func(args Args) string { + return fmt.Sprintf("The MagicSock function %s is not running. You might experience connectivity issues.", args[ArgMagicsockFunctionName]) + }, + } }) // testWarnable is a Warnable that is used within this package for testing purposes only. -var testWarnable = Register(&Warnable{ - Code: "test-warnable", - Title: "Test warnable", - Severity: SeverityLow, - Text: func(args Args) string { - return args[ArgError] - }, +var testWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableTestWarnable, + Title: "Test warnable", + Severity: SeverityLow, + Text: func(args Args) string { + return args[ArgError] + }, + } }) // applyDiskConfigWarnable is a Warnable that warns the user that there was an error applying the envknob config stored on disk. -var applyDiskConfigWarnable = Register(&Warnable{ - Code: "apply-disk-config", - Title: "Could not apply configuration", - Severity: SeverityMedium, - Text: func(args Args) string { - return fmt.Sprintf("An error occurred applying the Tailscale envknob configuration stored on disk: %v", args[ArgError]) - }, +var applyDiskConfigWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableApplyDiskConfig, + Title: "Could not apply configuration", + Severity: SeverityMedium, + Text: func(args Args) string { + return fmt.Sprintf("An error occurred applying the Tailscale envknob configuration stored on disk: %v", args[ArgError]) + }, + } }) // warmingUpWarnableDuration is the duration for which the warmingUpWarnable is reported by the backend after the user @@ -245,9 +290,11 @@ const warmingUpWarnableDuration = 5 * time.Second // warmingUpWarnable is a Warnable that is reported by the backend when it is starting up, for a maximum time of // warmingUpWarnableDuration. The GUIs use the presence of this Warnable to prevent showing any other warnings until // the backend is fully started. -var warmingUpWarnable = Register(&Warnable{ - Code: "warming-up", - Title: "Tailscale is starting", - Severity: SeverityLow, - Text: StaticMessage("Tailscale is starting. Please wait."), +var warmingUpWarnable = condRegister(func() *Warnable { + return &Warnable{ + Code: tsconst.HealthWarnableWarmingUp, + Title: "Tailscale is starting", + Severity: SeverityLow, + Text: StaticMessage("Tailscale is starting. Please wait."), + } }) diff --git a/internal/client/tailscale/identityfederation.go b/internal/client/tailscale/identityfederation.go new file mode 100644 index 0000000000000..e1fe3559c7b44 --- /dev/null +++ b/internal/client/tailscale/identityfederation.go @@ -0,0 +1,19 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tailscale + +import ( + "context" + + "tailscale.com/feature" +) + +// HookResolveAuthKeyViaWIF resolves to [identityfederation.ResolveAuthKey] when the +// corresponding feature tag is enabled in the build process. +// +// baseURL is the URL of the control server used for token exchange and authkey generation. +// clientID is the federated client ID used for token exchange, the format is / +// idToken is the Identity token from the identity provider +// tags is the list of tags to be associated with the auth key +var HookResolveAuthKeyViaWIF feature.Hook[func(ctx context.Context, baseURL, clientID, idToken string, tags []string) (string, error)] diff --git a/internal/client/tailscale/oauthkeys.go b/internal/client/tailscale/oauthkeys.go new file mode 100644 index 0000000000000..21102ce0b5fc8 --- /dev/null +++ b/internal/client/tailscale/oauthkeys.go @@ -0,0 +1,20 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tailscale + +import ( + "context" + + "tailscale.com/feature" +) + +// HookResolveAuthKey resolves to [oauthkey.ResolveAuthKey] when the +// corresponding feature tag is enabled in the build process. +// +// authKey is a standard device auth key or an OAuth client secret to +// resolve into an auth key. +// tags is the list of tags being advertised by the client (required to be +// provided for the OAuth secret case, and required to be the same as the +// list of tags for which the OAuth secret is allowed to issue auth keys). +var HookResolveAuthKey feature.Hook[func(ctx context.Context, authKey string, tags []string) (string, error)] diff --git a/internal/client/tailscale/tailscale.go b/internal/client/tailscale/tailscale.go index cba7228bbc8b3..0e603bf792562 100644 --- a/internal/client/tailscale/tailscale.go +++ b/internal/client/tailscale/tailscale.go @@ -25,6 +25,9 @@ func init() { // AuthMethod is an alias to tailscale.com/client/tailscale. type AuthMethod = tsclient.AuthMethod +// APIKey is an alias to tailscale.com/client/tailscale. +type APIKey = tsclient.APIKey + // Device is an alias to tailscale.com/client/tailscale. type Device = tsclient.Device diff --git a/ipn/backend.go b/ipn/backend.go index fd4442f7160db..91cf81ca52962 100644 --- a/ipn/backend.go +++ b/ipn/backend.go @@ -205,7 +205,11 @@ func (n Notify) String() string { } s := sb.String() - return s[0:len(s)-1] + "}" + if s == "Notify{" { + return "Notify{}" + } else { + return s[0:len(s)-1] + "}" + } } // PartialFile represents an in-progress incoming file transfer. diff --git a/ipn/backend_test.go b/ipn/backend_test.go new file mode 100644 index 0000000000000..d72b966152ca3 --- /dev/null +++ b/ipn/backend_test.go @@ -0,0 +1,42 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package ipn + +import ( + "testing" + + "tailscale.com/health" + "tailscale.com/types/empty" +) + +func TestNotifyString(t *testing.T) { + for _, tt := range []struct { + name string + value Notify + expected string + }{ + { + name: "notify-empty", + value: Notify{}, + expected: "Notify{}", + }, + { + name: "notify-with-login-finished", + value: Notify{LoginFinished: &empty.Message{}}, + expected: "Notify{LoginFinished}", + }, + { + name: "notify-with-multiple-fields", + value: Notify{LoginFinished: &empty.Message{}, Health: &health.State{}}, + expected: "Notify{LoginFinished Health{...}}", + }, + } { + t.Run(tt.name, func(t *testing.T) { + actual := tt.value.String() + if actual != tt.expected { + t.Fatalf("expected=%q, actual=%q", tt.expected, actual) + } + }) + } +} diff --git a/ipn/conffile/cloudconf.go b/ipn/conffile/cloudconf.go index 650611cf161fc..4475a2d7b799e 100644 --- a/ipn/conffile/cloudconf.go +++ b/ipn/conffile/cloudconf.go @@ -10,6 +10,8 @@ import ( "net/http" "strings" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/omit" ) @@ -35,6 +37,9 @@ func getEC2MetadataToken() (string, error) { } func readVMUserData() ([]byte, error) { + if !buildfeatures.HasAWS { + return nil, feature.ErrUnavailable + } // TODO(bradfitz): support GCP, Azure, Proxmox/cloud-init // (NoCloud/ConfigDrive ISO), etc. diff --git a/ipn/conffile/conffile.go b/ipn/conffile/conffile.go index a2bafb8b7fd22..3a2aeffb3a0c6 100644 --- a/ipn/conffile/conffile.go +++ b/ipn/conffile/conffile.go @@ -8,11 +8,11 @@ package conffile import ( "bytes" "encoding/json" - "errors" "fmt" "os" "runtime" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" ) @@ -51,10 +51,6 @@ func Load(path string) (*Config, error) { // compile-time for deadcode elimination return nil, fmt.Errorf("config file loading not supported on %q", runtime.GOOS) } - if hujsonStandardize == nil { - // Build tags are wrong in conffile_hujson.go - return nil, errors.New("[unexpected] config file loading not wired up") - } var c Config c.Path = path var err error @@ -68,14 +64,21 @@ func Load(path string) (*Config, error) { if err != nil { return nil, err } - c.Std, err = hujsonStandardize(c.Raw) - if err != nil { - return nil, fmt.Errorf("error parsing config file %s HuJSON/JSON: %w", path, err) + if buildfeatures.HasHuJSONConf && hujsonStandardize != nil { + c.Std, err = hujsonStandardize(c.Raw) + if err != nil { + return nil, fmt.Errorf("error parsing config file %s HuJSON/JSON: %w", path, err) + } + } else { + c.Std = c.Raw // config file must be valid JSON with ts_omit_hujsonconf } var ver struct { Version string `json:"version"` } if err := json.Unmarshal(c.Std, &ver); err != nil { + if !buildfeatures.HasHuJSONConf { + return nil, fmt.Errorf("error parsing config file %s, which must be valid standard JSON: %w", path, err) + } return nil, fmt.Errorf("error parsing config file %s: %w", path, err) } switch ver.Version { diff --git a/ipn/conffile/conffile_hujson.go b/ipn/conffile/conffile_hujson.go index 6825a06386625..1e967f1bdcca2 100644 --- a/ipn/conffile/conffile_hujson.go +++ b/ipn/conffile/conffile_hujson.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios && !android +//go:build !ios && !android && !ts_omit_hujsonconf package conffile diff --git a/ipn/conffile/serveconf.go b/ipn/conffile/serveconf.go new file mode 100644 index 0000000000000..bb63c1ac5571a --- /dev/null +++ b/ipn/conffile/serveconf.go @@ -0,0 +1,239 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_serve + +package conffile + +import ( + "errors" + "fmt" + "net" + "os" + "path" + "strings" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "tailscale.com/tailcfg" + "tailscale.com/types/opt" + "tailscale.com/util/mak" +) + +// ServicesConfigFile is the config file format for services configuration. +type ServicesConfigFile struct { + // Version is always "0.0.1" and always present. + Version string `json:"version"` + + Services map[tailcfg.ServiceName]*ServiceDetailsFile `json:"services,omitzero"` +} + +// ServiceDetailsFile is the config syntax for an individual Tailscale Service. +type ServiceDetailsFile struct { + // Version is always "0.0.1", set if and only if this is not inside a + // [ServiceConfigFile]. + Version string `json:"version,omitzero"` + + // Endpoints are sets of reverse proxy mappings from ProtoPortRanges on a + // Service to Targets (proto+destination+port) on remote destinations (or + // localhost). + // For example, "tcp:443" -> "tcp://localhost:8000" is an endpoint definition + // mapping traffic on the TCP port 443 of the Service to port 8080 on localhost. + // The Proto in the key must be populated. + // As a special case, if the only mapping provided is "*" -> "TUN", that + // enables TUN/L3 mode, where packets are delivered to the Tailscale network + // interface with the understanding that the user will deal with them manually. + Endpoints map[*tailcfg.ProtoPortRange]*Target `json:"endpoints"` + + // Advertised is a flag that tells control whether or not the client thinks + // it is ready to host a particular Tailscale Service. If unset, it is + // assumed to be true. + Advertised opt.Bool `json:"advertised,omitzero"` +} + +// ServiceProtocol is the protocol of a Target. +type ServiceProtocol string + +const ( + ProtoHTTP ServiceProtocol = "http" + ProtoHTTPS ServiceProtocol = "https" + ProtoHTTPSInsecure ServiceProtocol = "https+insecure" + ProtoTCP ServiceProtocol = "tcp" + ProtoTLSTerminatedTCP ServiceProtocol = "tls-terminated-tcp" + ProtoFile ServiceProtocol = "file" + ProtoTUN ServiceProtocol = "TUN" +) + +// Target is a destination for traffic to go to when it arrives at a Tailscale +// Service host. +type Target struct { + // The protocol over which to communicate with the Destination. + // Protocol == ProtoTUN is a special case, activating "TUN mode" where + // packets are delivered to the Tailscale TUN interface and then manually + // handled by the user. + Protocol ServiceProtocol + + // If Protocol is ProtoFile, then Destination is a file path. + // If Protocol is ProtoTUN, then Destination is empty. + // Otherwise, it is a host. + Destination string + + // If Protocol is not ProtoFile or ProtoTUN, then DestinationPorts is the + // set of ports on which to connect to the host referred to by Destination. + DestinationPorts tailcfg.PortRange +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +func (t *Target) UnmarshalJSON(buf []byte) error { + return jsonv2.Unmarshal(buf, t) +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (t *Target) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + var str string + if err := jsonv2.UnmarshalDecode(dec, &str); err != nil { + return err + } + + // The TUN case does not look like a standard :// arrangement, + // so handled separately. + if str == "TUN" { + t.Protocol = ProtoTUN + t.Destination = "" + t.DestinationPorts = tailcfg.PortRangeAny + return nil + } + + proto, rest, found := strings.Cut(str, "://") + if !found { + return errors.New("handler not of form ://") + } + + switch ServiceProtocol(proto) { + case ProtoFile: + target := path.Clean(rest) + t.Protocol = ProtoFile + t.Destination = target + t.DestinationPorts = tailcfg.PortRange{} + case ProtoHTTP, ProtoHTTPS, ProtoHTTPSInsecure, ProtoTCP, ProtoTLSTerminatedTCP: + host, portRange, err := tailcfg.ParseHostPortRange(rest) + if err != nil { + return err + } + t.Protocol = ServiceProtocol(proto) + t.Destination = host + t.DestinationPorts = portRange + default: + return errors.New("unsupported protocol") + } + + return nil +} + +func (t *Target) MarshalText() ([]byte, error) { + var out string + switch t.Protocol { + case ProtoFile: + out = fmt.Sprintf("%s://%s", t.Protocol, t.Destination) + case ProtoTUN: + out = "TUN" + case ProtoHTTP, ProtoHTTPS, ProtoHTTPSInsecure, ProtoTCP, ProtoTLSTerminatedTCP: + out = fmt.Sprintf("%s://%s", t.Protocol, net.JoinHostPort(t.Destination, t.DestinationPorts.String())) + default: + return nil, errors.New("unsupported protocol") + } + return []byte(out), nil +} + +func LoadServicesConfig(filename string, forService string) (*ServicesConfigFile, error) { + data, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + var json []byte + if hujsonStandardize != nil { + json, err = hujsonStandardize(data) + if err != nil { + return nil, err + } + } else { + json = data + } + var ver struct { + Version string `json:"version"` + } + if err = jsonv2.Unmarshal(json, &ver); err != nil { + return nil, fmt.Errorf("could not parse config file version: %w", err) + } + switch ver.Version { + case "": + return nil, errors.New("config file must have \"version\" field") + case "0.0.1": + return loadConfigV0(json, forService) + } + return nil, fmt.Errorf("unsupported config file version %q", ver.Version) +} + +func loadConfigV0(json []byte, forService string) (*ServicesConfigFile, error) { + var scf ServicesConfigFile + if svcName := tailcfg.AsServiceName(forService); svcName != "" { + var sdf ServiceDetailsFile + err := jsonv2.Unmarshal(json, &sdf, jsonv2.RejectUnknownMembers(true)) + if err != nil { + return nil, err + } + mak.Set(&scf.Services, svcName, &sdf) + + } else { + err := jsonv2.Unmarshal(json, &scf, jsonv2.RejectUnknownMembers(true)) + if err != nil { + return nil, err + } + } + for svcName, svc := range scf.Services { + if forService == "" && svc.Version != "" { + return nil, errors.New("services cannot be versioned separately from config file") + } + if err := svcName.Validate(); err != nil { + return nil, err + } + if svc.Endpoints == nil { + return nil, fmt.Errorf("service %q: missing \"endpoints\" field", svcName) + } + var sourcePorts []tailcfg.PortRange + foundTUN := false + foundNonTUN := false + for ppr, target := range svc.Endpoints { + if target.Protocol == "TUN" { + if ppr.Proto != 0 || ppr.Ports != tailcfg.PortRangeAny { + return nil, fmt.Errorf("service %q: destination \"TUN\" can only be used with source \"*\"", svcName) + } + foundTUN = true + } else { + if ppr.Ports.Last-ppr.Ports.First != target.DestinationPorts.Last-target.DestinationPorts.First { + return nil, fmt.Errorf("service %q: source and destination port ranges must be of equal size", svcName.String()) + } + foundNonTUN = true + } + if foundTUN && foundNonTUN { + return nil, fmt.Errorf("service %q: cannot mix TUN mode with non-TUN mode", svcName) + } + if pr := findOverlappingRange(sourcePorts, ppr.Ports); pr != nil { + return nil, fmt.Errorf("service %q: source port ranges %q and %q overlap", svcName, pr.String(), ppr.Ports.String()) + } + sourcePorts = append(sourcePorts, ppr.Ports) + } + } + return &scf, nil +} + +// findOverlappingRange finds and returns a reference to a [tailcfg.PortRange] +// in haystack that overlaps with needle. It returns nil if it doesn't find one. +func findOverlappingRange(haystack []tailcfg.PortRange, needle tailcfg.PortRange) *tailcfg.PortRange { + for _, pr := range haystack { + if pr.Contains(needle.First) || pr.Contains(needle.Last) || needle.Contains(pr.First) || needle.Contains(pr.Last) { + return &pr + } + } + return nil +} diff --git a/ipn/ipn_view.go b/ipn/ipn_view.go index 170dc409b2095..1c7639f6ff932 100644 --- a/ipn/ipn_view.go +++ b/ipn/ipn_view.go @@ -421,6 +421,8 @@ func (v PrefsView) PostureChecking() bool { return v.ж.PostureChecking } // NetfilterKind specifies what netfilter implementation to use. // +// It can be "iptables", "nftables", or "" to auto-detect. +// // Linux-only. func (v PrefsView) NetfilterKind() string { return v.ж.NetfilterKind } diff --git a/ipn/ipnauth/ipnauth.go b/ipn/ipnauth/ipnauth.go index e6560570cd755..497f30f8c198e 100644 --- a/ipn/ipnauth/ipnauth.go +++ b/ipn/ipnauth/ipnauth.go @@ -14,8 +14,8 @@ import ( "runtime" "strconv" - "github.com/tailscale/peercred" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/safesocket" "tailscale.com/types/logger" @@ -63,8 +63,8 @@ type ConnIdentity struct { notWindows bool // runtime.GOOS != "windows" // Fields used when NotWindows: - isUnixSock bool // Conn is a *net.UnixConn - creds *peercred.Creds // or nil + isUnixSock bool // Conn is a *net.UnixConn + creds PeerCreds // or nil if peercred.Get was not implemented on this OS // Used on Windows: // TODO(bradfitz): merge these into the peercreds package and @@ -78,6 +78,13 @@ type ConnIdentity struct { // It's suitable for passing to LookupUserFromID (os/user.LookupId) on any // operating system. func (ci *ConnIdentity) WindowsUserID() ipn.WindowsUserID { + if !buildfeatures.HasDebug && runtime.GOOS != "windows" { + // This function is only implemented on non-Windows for simulating + // Windows in tests. But that test (per comments below) is broken + // anyway. So disable this testing path in non-debug builds + // and just do the thing that optimizes away. + return "" + } if envknob.GOOS() != "windows" { return "" } @@ -97,9 +104,18 @@ func (ci *ConnIdentity) WindowsUserID() ipn.WindowsUserID { return "" } -func (ci *ConnIdentity) Pid() int { return ci.pid } -func (ci *ConnIdentity) IsUnixSock() bool { return ci.isUnixSock } -func (ci *ConnIdentity) Creds() *peercred.Creds { return ci.creds } +func (ci *ConnIdentity) Pid() int { return ci.pid } +func (ci *ConnIdentity) IsUnixSock() bool { return ci.isUnixSock } +func (ci *ConnIdentity) Creds() PeerCreds { return ci.creds } + +// PeerCreds is the interface for a github.com/tailscale/peercred.Creds, +// if linked into the binary. +// +// (It's not used on some platforms, or if ts_omit_unixsocketidentity is set.) +type PeerCreds interface { + UserID() (uid string, ok bool) + PID() (pid int, ok bool) +} var metricIssue869Workaround = clientmetric.NewCounter("issue_869_workaround") diff --git a/ipn/ipnauth/ipnauth_notwindows.go b/ipn/ipnauth/ipnauth_omit_unixsocketidentity.go similarity index 72% rename from ipn/ipnauth/ipnauth_notwindows.go rename to ipn/ipnauth/ipnauth_omit_unixsocketidentity.go index d9d11bd0a17a1..defe7d89c409b 100644 --- a/ipn/ipnauth/ipnauth_notwindows.go +++ b/ipn/ipnauth/ipnauth_omit_unixsocketidentity.go @@ -1,14 +1,13 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !windows +//go:build !windows && ts_omit_unixsocketidentity package ipnauth import ( "net" - "github.com/tailscale/peercred" "tailscale.com/types/logger" ) @@ -16,12 +15,7 @@ import ( // based on the user who owns the other end of the connection. // and couldn't. The returned connIdentity has NotWindows set to true. func GetConnIdentity(_ logger.Logf, c net.Conn) (ci *ConnIdentity, err error) { - ci = &ConnIdentity{conn: c, notWindows: true} - _, ci.isUnixSock = c.(*net.UnixConn) - if ci.creds, _ = peercred.Get(c); ci.creds != nil { - ci.pid, _ = ci.creds.PID() - } - return ci, nil + return &ConnIdentity{conn: c, notWindows: true}, nil } // WindowsToken is unsupported when GOOS != windows and always returns diff --git a/ipn/ipnauth/ipnauth_unix_creds.go b/ipn/ipnauth/ipnauth_unix_creds.go new file mode 100644 index 0000000000000..89a9ceaa99388 --- /dev/null +++ b/ipn/ipnauth/ipnauth_unix_creds.go @@ -0,0 +1,37 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !windows && !ts_omit_unixsocketidentity + +package ipnauth + +import ( + "net" + + "github.com/tailscale/peercred" + "tailscale.com/types/logger" +) + +// GetConnIdentity extracts the identity information from the connection +// based on the user who owns the other end of the connection. +// and couldn't. The returned connIdentity has NotWindows set to true. +func GetConnIdentity(_ logger.Logf, c net.Conn) (ci *ConnIdentity, err error) { + ci = &ConnIdentity{conn: c, notWindows: true} + _, ci.isUnixSock = c.(*net.UnixConn) + if creds, err := peercred.Get(c); err == nil { + ci.creds = creds + ci.pid, _ = ci.creds.PID() + } else if err == peercred.ErrNotImplemented { + // peercred.Get is not implemented on this OS (such as OpenBSD) + // Just leave creds as nil, as documented. + } else { + return nil, err + } + return ci, nil +} + +// WindowsToken is unsupported when GOOS != windows and always returns +// ErrNotImplemented. +func (ci *ConnIdentity) WindowsToken() (WindowsToken, error) { + return nil, ErrNotImplemented +} diff --git a/ipn/ipnauth/policy.go b/ipn/ipnauth/policy.go index 42366dbd94990..eeee324352387 100644 --- a/ipn/ipnauth/policy.go +++ b/ipn/ipnauth/policy.go @@ -8,6 +8,7 @@ import ( "fmt" "tailscale.com/client/tailscale/apitype" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/tailcfg" "tailscale.com/util/syspolicy/pkey" @@ -51,6 +52,9 @@ func (a actorWithPolicyChecks) CheckProfileAccess(profile ipn.LoginProfileView, // TODO(nickkhyl): unexport it when we move [ipn.Actor] implementations from [ipnserver] // and corp to this package. func CheckDisconnectPolicy(actor Actor, profile ipn.LoginProfileView, reason string, auditFn AuditLogFunc) error { + if !buildfeatures.HasSystemPolicy { + return nil + } if alwaysOn, _ := policyclient.Get().GetBoolean(pkey.AlwaysOn, false); !alwaysOn { return nil } diff --git a/ipn/ipnext/ipnext.go b/ipn/ipnext/ipnext.go index 066763ba4d2fa..4ff37dc8e3775 100644 --- a/ipn/ipnext/ipnext.go +++ b/ipn/ipnext/ipnext.go @@ -372,6 +372,10 @@ type Hooks struct { // SetPeerStatus is called to mutate PeerStatus. // Callers must only use NodeBackend to read data. SetPeerStatus feature.Hooks[func(*ipnstate.PeerStatus, tailcfg.NodeView, NodeBackend)] + + // ShouldUploadServices reports whether this node should include services + // in Hostinfo from the portlist extension. + ShouldUploadServices feature.Hook[func() bool] } // NodeBackend is an interface to query the current node and its peers. @@ -398,4 +402,9 @@ type NodeBackend interface { // It effectively just reports whether PeerAPIBase(node) is non-empty, but // potentially more efficiently. PeerHasPeerAPI(tailcfg.NodeView) bool + + // CollectServices reports whether the control plane is telling this + // node that the portlist service collection is desirable, should it + // choose to report them. + CollectServices() bool } diff --git a/ipn/ipnlocal/autoupdate.go b/ipn/ipnlocal/autoupdate.go deleted file mode 100644 index b7d217a10b5b0..0000000000000 --- a/ipn/ipnlocal/autoupdate.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build linux || windows - -package ipnlocal - -import ( - "context" - "time" - - "tailscale.com/clientupdate" - "tailscale.com/ipn" - "tailscale.com/version" -) - -func (b *LocalBackend) stopOfflineAutoUpdate() { - if b.offlineAutoUpdateCancel != nil { - b.logf("offline auto-update: stopping update checks") - b.offlineAutoUpdateCancel() - b.offlineAutoUpdateCancel = nil - } -} - -func (b *LocalBackend) maybeStartOfflineAutoUpdate(prefs ipn.PrefsView) { - if !prefs.AutoUpdate().Apply.EqualBool(true) { - return - } - // AutoUpdate.Apply field in prefs can only be true for platforms that - // support auto-updates. But check it here again, just in case. - if !clientupdate.CanAutoUpdate() { - return - } - // On macsys, auto-updates are managed by Sparkle. - if version.IsMacSysExt() { - return - } - - if b.offlineAutoUpdateCancel != nil { - // Already running. - return - } - ctx, cancel := context.WithCancel(context.Background()) - b.offlineAutoUpdateCancel = cancel - - b.logf("offline auto-update: starting update checks") - go b.offlineAutoUpdate(ctx) -} - -const offlineAutoUpdateCheckPeriod = time.Hour - -func (b *LocalBackend) offlineAutoUpdate(ctx context.Context) { - t := time.NewTicker(offlineAutoUpdateCheckPeriod) - defer t.Stop() - for { - select { - case <-ctx.Done(): - return - case <-t.C: - } - if err := b.startAutoUpdate("offline auto-update"); err != nil { - b.logf("offline auto-update: failed: %v", err) - } - } -} diff --git a/ipn/ipnlocal/autoupdate_disabled.go b/ipn/ipnlocal/autoupdate_disabled.go deleted file mode 100644 index 88ed68c95fd48..0000000000000 --- a/ipn/ipnlocal/autoupdate_disabled.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !(linux || windows) - -package ipnlocal - -import ( - "tailscale.com/ipn" -) - -func (b *LocalBackend) stopOfflineAutoUpdate() { - // Not supported on this platform. -} - -func (b *LocalBackend) maybeStartOfflineAutoUpdate(prefs ipn.PrefsView) { - // Not supported on this platform. -} diff --git a/ipn/ipnlocal/c2n.go b/ipn/ipnlocal/c2n.go index 2c13f06198455..0c228060faf63 100644 --- a/ipn/ipnlocal/c2n.go +++ b/ipn/ipnlocal/c2n.go @@ -4,77 +4,71 @@ package ipnlocal import ( - "crypto/x509" "encoding/json" - "encoding/pem" - "errors" "fmt" "io" "net/http" - "os" - "os/exec" "path" - "path/filepath" + "reflect" "runtime" "strconv" "strings" "time" - "tailscale.com/clientupdate" - "tailscale.com/envknob" + "tailscale.com/control/controlclient" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" + "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/net/sockstats" - "tailscale.com/posture" "tailscale.com/tailcfg" + "tailscale.com/types/netmap" "tailscale.com/util/clientmetric" "tailscale.com/util/goroutines" + "tailscale.com/util/httpm" "tailscale.com/util/set" - "tailscale.com/util/syspolicy/pkey" - "tailscale.com/util/syspolicy/ptype" "tailscale.com/version" - "tailscale.com/version/distro" ) // c2nHandlers maps an HTTP method and URI path (without query parameters) to // its handler. The exact method+path match is preferred, but if no entry // exists for that, a map entry with an empty method is used as a fallback. -var c2nHandlers = map[methodAndPath]c2nHandler{ - // Debug. - req("/echo"): handleC2NEcho, - req("/debug/goroutines"): handleC2NDebugGoroutines, - req("/debug/prefs"): handleC2NDebugPrefs, - req("/debug/metrics"): handleC2NDebugMetrics, - req("/debug/component-logging"): handleC2NDebugComponentLogging, - req("/debug/logheap"): handleC2NDebugLogHeap, +var c2nHandlers map[methodAndPath]c2nHandler - // PPROF - We only expose a subset of typical pprof endpoints for security. - req("/debug/pprof/heap"): handleC2NPprof, - req("/debug/pprof/allocs"): handleC2NPprof, - - req("POST /logtail/flush"): handleC2NLogtailFlush, - req("POST /sockstats"): handleC2NSockStats, - - // Check TLS certificate status. - req("GET /tls-cert-status"): handleC2NTLSCertStatus, - - // SSH - req("/ssh/usernames"): handleC2NSSHUsernames, - - // Auto-updates. - req("GET /update"): handleC2NUpdateGet, - req("POST /update"): handleC2NUpdatePost, - - // Device posture. - req("GET /posture/identity"): handleC2NPostureIdentityGet, - - // App Connectors. - req("GET /appconnector/routes"): handleC2NAppConnectorDomainRoutesGet, - - // Linux netfilter. - req("POST /netfilter-kind"): handleC2NSetNetfilterKind, - - // VIP services. - req("GET /vip-services"): handleC2NVIPServicesGet, +func init() { + c2nHandlers = map[methodAndPath]c2nHandler{} + if buildfeatures.HasC2N { + // Echo is the basic "ping" handler as used by the control plane to probe + // whether a node is reachable. In particular, it's important for + // high-availability subnet routers for the control plane to probe which of + // several candidate nodes is reachable and actually alive. + RegisterC2N("/echo", handleC2NEcho) + } + if buildfeatures.HasSSH { + RegisterC2N("/ssh/usernames", handleC2NSSHUsernames) + } + if buildfeatures.HasLogTail { + RegisterC2N("POST /logtail/flush", handleC2NLogtailFlush) + } + if buildfeatures.HasDebug { + RegisterC2N("POST /sockstats", handleC2NSockStats) + + // pprof: + // we only expose a subset of typical pprof endpoints for security. + RegisterC2N("/debug/pprof/heap", handleC2NPprof) + RegisterC2N("/debug/pprof/allocs", handleC2NPprof) + + RegisterC2N("/debug/goroutines", handleC2NDebugGoroutines) + RegisterC2N("/debug/prefs", handleC2NDebugPrefs) + RegisterC2N("/debug/metrics", handleC2NDebugMetrics) + RegisterC2N("/debug/component-logging", handleC2NDebugComponentLogging) + RegisterC2N("/debug/logheap", handleC2NDebugLogHeap) + RegisterC2N("/debug/netmap", handleC2NDebugNetMap) + RegisterC2N("/debug/health", handleC2NDebugHealth) + } + if runtime.GOOS == "linux" && buildfeatures.HasOSRouter { + RegisterC2N("POST /netfilter-kind", handleC2NSetNetfilterKind) + } } // RegisterC2N registers a new c2n handler for the given pattern. @@ -82,6 +76,9 @@ var c2nHandlers = map[methodAndPath]c2nHandler{ // A pattern is like "GET /foo" (specific to an HTTP method) or "/foo" (all // methods). It panics if the pattern is already registered. func RegisterC2N(pattern string, h func(*LocalBackend, http.ResponseWriter, *http.Request)) { + if !buildfeatures.HasC2N { + return + } k := req(pattern) if _, ok := c2nHandlers[k]; ok { panic(fmt.Sprintf("c2n: duplicate handler for %q", pattern)) @@ -150,21 +147,109 @@ func handleC2NLogtailFlush(b *LocalBackend, w http.ResponseWriter, r *http.Reque } } +func handleC2NDebugHealth(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + var st *health.State + if buildfeatures.HasDebug && b.health != nil { + st = b.health.CurrentState() + } + writeJSON(w, st) +} + +func handleC2NDebugNetMap(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } + ctx := r.Context() + if r.Method != httpm.POST && r.Method != httpm.GET { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + + b.logf("c2n: %s /debug/netmap received", r.Method) + + // redactAndMarshal redacts private keys from the given netmap, clears fields + // that should be omitted, and marshals it to JSON. + redactAndMarshal := func(nm *netmap.NetworkMap, omitFields []string) (json.RawMessage, error) { + for _, f := range omitFields { + field := reflect.ValueOf(nm).Elem().FieldByName(f) + if !field.IsValid() { + b.logf("c2n: /debug/netmap: unknown field %q in omitFields", f) + continue + } + field.SetZero() + } + nm, _ = redactNetmapPrivateKeys(nm) + return json.Marshal(nm) + } + + var omitFields []string + resp := &tailcfg.C2NDebugNetmapResponse{} + + if r.Method == httpm.POST { + var req tailcfg.C2NDebugNetmapRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, fmt.Sprintf("failed to decode request body: %v", err), http.StatusBadRequest) + return + } + omitFields = req.OmitFields + + if req.Candidate != nil { + cand, err := controlclient.NetmapFromMapResponseForDebug(ctx, b.unsanitizedPersist(), req.Candidate) + if err != nil { + http.Error(w, fmt.Sprintf("failed to convert candidate MapResponse: %v", err), http.StatusBadRequest) + return + } + candJSON, err := redactAndMarshal(cand, omitFields) + if err != nil { + http.Error(w, fmt.Sprintf("failed to marshal candidate netmap: %v", err), http.StatusInternalServerError) + return + } + resp.Candidate = candJSON + } + } + + var err error + resp.Current, err = redactAndMarshal(b.currentNode().netMapWithPeers(), omitFields) + if err != nil { + http.Error(w, fmt.Sprintf("failed to marshal current netmap: %v", err), http.StatusInternalServerError) + return + } + + writeJSON(w, resp) +} + func handleC2NDebugGoroutines(_ *LocalBackend, w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } w.Header().Set("Content-Type", "text/plain") w.Write(goroutines.ScrubbedGoroutineDump(true)) } func handleC2NDebugPrefs(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } writeJSON(w, b.Prefs()) } func handleC2NDebugMetrics(_ *LocalBackend, w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } w.Header().Set("Content-Type", "text/plain") clientmetric.WritePrometheusExpositionFormat(w) } func handleC2NDebugComponentLogging(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } component := r.FormValue("component") secs, _ := strconv.Atoi(r.FormValue("secs")) if secs == 0 { @@ -207,6 +292,10 @@ func handleC2NPprof(b *LocalBackend, w http.ResponseWriter, r *http.Request) { } func handleC2NSSHUsernames(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasSSH { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } var req tailcfg.C2NSSHUsernamesRequest if r.Method == "POST" { if err := json.NewDecoder(r.Body).Decode(&req); err != nil { @@ -233,27 +322,6 @@ func handleC2NSockStats(b *LocalBackend, w http.ResponseWriter, r *http.Request) fmt.Fprintf(w, "debug info: %v\n", sockstats.DebugInfo()) } -// handleC2NAppConnectorDomainRoutesGet handles returning the domains -// that the app connector is responsible for, as well as the resolved -// IP addresses for each domain. If the node is not configured as -// an app connector, an empty map is returned. -func handleC2NAppConnectorDomainRoutesGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - b.logf("c2n: GET /appconnector/routes received") - - var res tailcfg.C2NAppConnectorDomainRoutesResponse - appConnector := b.AppConnector() - if appConnector == nil { - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) - return - } - - res.Domains = appConnector.DomainRoutes() - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) -} - func handleC2NSetNetfilterKind(b *LocalBackend, w http.ResponseWriter, r *http.Request) { b.logf("c2n: POST /netfilter-kind received") @@ -279,285 +347,3 @@ func handleC2NSetNetfilterKind(b *LocalBackend, w http.ResponseWriter, r *http.R w.WriteHeader(http.StatusNoContent) } - -func handleC2NVIPServicesGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - b.logf("c2n: GET /vip-services received") - var res tailcfg.C2NVIPServicesResponse - res.VIPServices = b.VIPServices() - res.ServicesHash = b.vipServiceHash(res.VIPServices) - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) -} - -func handleC2NUpdateGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - b.logf("c2n: GET /update received") - - res := b.newC2NUpdateResponse() - res.Started = b.c2nUpdateStarted() - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) -} - -func handleC2NUpdatePost(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - b.logf("c2n: POST /update received") - res := b.newC2NUpdateResponse() - defer func() { - if res.Err != "" { - b.logf("c2n: POST /update failed: %s", res.Err) - } - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) - }() - - if !res.Enabled { - res.Err = "not enabled" - return - } - if !res.Supported { - res.Err = "not supported" - return - } - - // Do not update if we have active inbound SSH connections. Control can set - // force=true query parameter to override this. - if r.FormValue("force") != "true" && b.sshServer != nil && b.sshServer.NumActiveConns() > 0 { - res.Err = "not updating due to active SSH connections" - return - } - - if err := b.startAutoUpdate("c2n"); err != nil { - res.Err = err.Error() - return - } - res.Started = true -} - -func handleC2NPostureIdentityGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - b.logf("c2n: GET /posture/identity received") - - res := tailcfg.C2NPostureIdentityResponse{} - - // Only collect posture identity if enabled on the client, - // this will first check syspolicy, MDM settings like Registry - // on Windows or defaults on macOS. If they are not set, it falls - // back to the cli-flag, `--posture-checking`. - choice, err := b.polc.GetPreferenceOption(pkey.PostureChecking, ptype.ShowChoiceByPolicy) - if err != nil { - b.logf( - "c2n: failed to read PostureChecking from syspolicy, returning default from CLI: %s; got error: %s", - b.Prefs().PostureChecking(), - err, - ) - } - - if choice.ShouldEnable(b.Prefs().PostureChecking()) { - res.SerialNumbers, err = posture.GetSerialNumbers(b.polc, b.logf) - if err != nil { - b.logf("c2n: GetSerialNumbers returned error: %v", err) - } - - // TODO(tailscale/corp#21371, 2024-07-10): once this has landed in a stable release - // and looks good in client metrics, remove this parameter and always report MAC - // addresses. - if r.FormValue("hwaddrs") == "true" { - res.IfaceHardwareAddrs, err = b.getHardwareAddrs() - if err != nil { - b.logf("c2n: GetHardwareAddrs returned error: %v", err) - } - } - } else { - res.PostureDisabled = true - } - - b.logf("c2n: posture identity disabled=%v reported %d serials %d hwaddrs", res.PostureDisabled, len(res.SerialNumbers), len(res.IfaceHardwareAddrs)) - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) -} - -func (b *LocalBackend) newC2NUpdateResponse() tailcfg.C2NUpdateResponse { - // If NewUpdater does not return an error, we can update the installation. - // - // Note that we create the Updater solely to check for errors; we do not - // invoke it here. For this purpose, it is ok to pass it a zero Arguments. - prefs := b.Prefs().AutoUpdate() - return tailcfg.C2NUpdateResponse{ - Enabled: envknob.AllowsRemoteUpdate() || prefs.Apply.EqualBool(true), - Supported: clientupdate.CanAutoUpdate() && !version.IsMacSysExt(), - } -} - -func (b *LocalBackend) c2nUpdateStarted() bool { - b.mu.Lock() - defer b.mu.Unlock() - return b.c2nUpdateStatus.started -} - -func (b *LocalBackend) setC2NUpdateStarted(v bool) { - b.mu.Lock() - defer b.mu.Unlock() - b.c2nUpdateStatus.started = v -} - -func (b *LocalBackend) trySetC2NUpdateStarted() bool { - b.mu.Lock() - defer b.mu.Unlock() - if b.c2nUpdateStatus.started { - return false - } - b.c2nUpdateStatus.started = true - return true -} - -// findCmdTailscale looks for the cmd/tailscale that corresponds to the -// currently running cmd/tailscaled. It's up to the caller to verify that the -// two match, but this function does its best to find the right one. Notably, it -// doesn't use $PATH for security reasons. -func findCmdTailscale() (string, error) { - self, err := os.Executable() - if err != nil { - return "", err - } - var ts string - switch runtime.GOOS { - case "linux": - if self == "/usr/sbin/tailscaled" || self == "/usr/bin/tailscaled" { - ts = "/usr/bin/tailscale" - } - if self == "/usr/local/sbin/tailscaled" || self == "/usr/local/bin/tailscaled" { - ts = "/usr/local/bin/tailscale" - } - switch distro.Get() { - case distro.QNAP: - // The volume under /share/ where qpkg are installed is not - // predictable. But the rest of the path is. - ok, err := filepath.Match("/share/*/.qpkg/Tailscale/tailscaled", self) - if err == nil && ok { - ts = filepath.Join(filepath.Dir(self), "tailscale") - } - case distro.Unraid: - if self == "/usr/local/emhttp/plugins/tailscale/bin/tailscaled" { - ts = "/usr/local/emhttp/plugins/tailscale/bin/tailscale" - } - } - case "windows": - ts = filepath.Join(filepath.Dir(self), "tailscale.exe") - case "freebsd", "openbsd": - if self == "/usr/local/bin/tailscaled" { - ts = "/usr/local/bin/tailscale" - } - default: - return "", fmt.Errorf("unsupported OS %v", runtime.GOOS) - } - if ts != "" && regularFileExists(ts) { - return ts, nil - } - return "", errors.New("tailscale executable not found in expected place") -} - -func tailscaleUpdateCmd(cmdTS string) *exec.Cmd { - defaultCmd := exec.Command(cmdTS, "update", "--yes") - if runtime.GOOS != "linux" { - return defaultCmd - } - if _, err := exec.LookPath("systemd-run"); err != nil { - return defaultCmd - } - - // When systemd-run is available, use it to run the update command. This - // creates a new temporary unit separate from the tailscaled unit. When - // tailscaled is restarted during the update, systemd won't kill this - // temporary update unit, which could cause unexpected breakage. - // - // We want to use a few optional flags: - // * --wait, to block the update command until completion (added in systemd 232) - // * --pipe, to collect stdout/stderr (added in systemd 235) - // * --collect, to clean up failed runs from memory (added in systemd 236) - // - // We need to check the version of systemd to figure out if those flags are - // available. - // - // The output will look like: - // - // systemd 255 (255.7-1-arch) - // +PAM +AUDIT ... other feature flags ... - systemdVerOut, err := exec.Command("systemd-run", "--version").Output() - if err != nil { - return defaultCmd - } - parts := strings.Fields(string(systemdVerOut)) - if len(parts) < 2 || parts[0] != "systemd" { - return defaultCmd - } - systemdVer, err := strconv.Atoi(parts[1]) - if err != nil { - return defaultCmd - } - if systemdVer >= 236 { - return exec.Command("systemd-run", "--wait", "--pipe", "--collect", cmdTS, "update", "--yes") - } else if systemdVer >= 235 { - return exec.Command("systemd-run", "--wait", "--pipe", cmdTS, "update", "--yes") - } else if systemdVer >= 232 { - return exec.Command("systemd-run", "--wait", cmdTS, "update", "--yes") - } else { - return exec.Command("systemd-run", cmdTS, "update", "--yes") - } -} - -func regularFileExists(path string) bool { - fi, err := os.Stat(path) - return err == nil && fi.Mode().IsRegular() -} - -// handleC2NTLSCertStatus returns info about the last TLS certificate issued for the -// provided domain. This can be called by the controlplane to clean up DNS TXT -// records when they're no longer needed by LetsEncrypt. -// -// It does not kick off a cert fetch or async refresh. It only reports anything -// that's already sitting on disk, and only reports metadata about the public -// cert (stuff that'd be the in CT logs anyway). -func handleC2NTLSCertStatus(b *LocalBackend, w http.ResponseWriter, r *http.Request) { - cs, err := b.getCertStore() - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - domain := r.FormValue("domain") - if domain == "" { - http.Error(w, "no 'domain'", http.StatusBadRequest) - return - } - - ret := &tailcfg.C2NTLSCertInfo{} - pair, err := getCertPEMCached(cs, domain, b.clock.Now()) - ret.Valid = err == nil - if err != nil { - ret.Error = err.Error() - if errors.Is(err, errCertExpired) { - ret.Expired = true - } else if errors.Is(err, ipn.ErrStateNotExist) { - ret.Missing = true - ret.Error = "no certificate" - } - } else { - block, _ := pem.Decode(pair.CertPEM) - if block == nil { - ret.Error = "invalid PEM" - ret.Valid = false - } else { - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - ret.Error = fmt.Sprintf("invalid certificate: %v", err) - ret.Valid = false - } else { - ret.NotBefore = cert.NotBefore.UTC().Format(time.RFC3339) - ret.NotAfter = cert.NotAfter.UTC().Format(time.RFC3339) - } - } - } - - writeJSON(w, ret) -} diff --git a/ipn/ipnlocal/c2n_pprof.go b/ipn/ipnlocal/c2n_pprof.go index b4bc35790973a..13237cc4fad2f 100644 --- a/ipn/ipnlocal/c2n_pprof.go +++ b/ipn/ipnlocal/c2n_pprof.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !js && !wasm +//go:build !js && !wasm && !ts_omit_debug package ipnlocal diff --git a/ipn/ipnlocal/c2n_test.go b/ipn/ipnlocal/c2n_test.go index cc31e284af8a1..95cd5fa6995bc 100644 --- a/ipn/ipnlocal/c2n_test.go +++ b/ipn/ipnlocal/c2n_test.go @@ -4,6 +4,7 @@ package ipnlocal import ( + "bytes" "cmp" "crypto/x509" "encoding/json" @@ -12,14 +13,24 @@ import ( "os" "path/filepath" "reflect" + "strings" "testing" "time" "tailscale.com/ipn/store/mem" "tailscale.com/tailcfg" "tailscale.com/tstest" + "tailscale.com/types/ipproto" + "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/types/netmap" + "tailscale.com/types/views" "tailscale.com/util/must" + "tailscale.com/util/set" + "tailscale.com/wgengine/filter/filtertype" + + gcmp "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" ) func TestHandleC2NTLSCertStatus(t *testing.T) { @@ -132,3 +143,425 @@ func TestHandleC2NTLSCertStatus(t *testing.T) { } } + +// eachStructField calls cb for each struct field in struct type tp, recursively. +func eachStructField(tp reflect.Type, cb func(reflect.Type, reflect.StructField)) { + if !strings.HasPrefix(tp.PkgPath(), "tailscale.com/") { + // Stop traversing when we reach a non-tailscale type. + return + } + + for i := range tp.NumField() { + cb(tp, tp.Field(i)) + + switch tp.Field(i).Type.Kind() { + case reflect.Struct: + eachStructField(tp.Field(i).Type, cb) + case reflect.Slice, reflect.Array, reflect.Ptr, reflect.Map: + if tp.Field(i).Type.Elem().Kind() == reflect.Struct { + eachStructField(tp.Field(i).Type.Elem(), cb) + } + } + } +} + +// eachStructValue calls cb for each struct field in the struct value v, recursively. +func eachStructValue(v reflect.Value, cb func(reflect.Type, reflect.StructField, reflect.Value)) { + if v.IsZero() { + return + } + + for i := range v.NumField() { + cb(v.Type(), v.Type().Field(i), v.Field(i)) + + switch v.Type().Field(i).Type.Kind() { + case reflect.Struct: + eachStructValue(v.Field(i), cb) + case reflect.Slice, reflect.Array, reflect.Ptr, reflect.Map: + if v.Field(i).Type().Elem().Kind() == reflect.Struct { + eachStructValue(v.Field(i).Addr().Elem(), cb) + } + } + } +} + +// TestRedactNetmapPrivateKeys tests that redactNetmapPrivateKeys redacts all private keys +// and other private fields from a netmap.NetworkMap, and only those fields. +func TestRedactNetmapPrivateKeys(t *testing.T) { + type field struct { + t reflect.Type + f string + } + f := func(t any, f string) field { + return field{reflect.TypeOf(t), f} + } + // fields is a map of all struct fields in netmap.NetworkMap and its + // sub-structs, marking each field as private (true) or public (false). + // If you add a new field to netmap.NetworkMap or its sub-structs, + // you must add it to this list, marking it as private or public. + fields := map[field]bool{ + // Private fields to be redacted. + f(netmap.NetworkMap{}, "PrivateKey"): true, + + // All other fields are public. + f(netmap.NetworkMap{}, "AllCaps"): false, + f(netmap.NetworkMap{}, "CollectServices"): false, + f(netmap.NetworkMap{}, "DERPMap"): false, + f(netmap.NetworkMap{}, "DNS"): false, + f(netmap.NetworkMap{}, "DisplayMessages"): false, + f(netmap.NetworkMap{}, "Domain"): false, + f(netmap.NetworkMap{}, "DomainAuditLogID"): false, + f(netmap.NetworkMap{}, "Expiry"): false, + f(netmap.NetworkMap{}, "MachineKey"): false, + f(netmap.NetworkMap{}, "Name"): false, + f(netmap.NetworkMap{}, "NodeKey"): false, + f(netmap.NetworkMap{}, "PacketFilter"): false, + f(netmap.NetworkMap{}, "PacketFilterRules"): false, + f(netmap.NetworkMap{}, "Peers"): false, + f(netmap.NetworkMap{}, "SSHPolicy"): false, + f(netmap.NetworkMap{}, "SelfNode"): false, + f(netmap.NetworkMap{}, "TKAEnabled"): false, + f(netmap.NetworkMap{}, "TKAHead"): false, + f(netmap.NetworkMap{}, "UserProfiles"): false, + f(filtertype.CapMatch{}, "Cap"): false, + f(filtertype.CapMatch{}, "Dst"): false, + f(filtertype.CapMatch{}, "Values"): false, + f(filtertype.Match{}, "Caps"): false, + f(filtertype.Match{}, "Dsts"): false, + f(filtertype.Match{}, "IPProto"): false, + f(filtertype.Match{}, "SrcCaps"): false, + f(filtertype.Match{}, "Srcs"): false, + f(filtertype.Match{}, "SrcsContains"): false, + f(filtertype.NetPortRange{}, "Net"): false, + f(filtertype.NetPortRange{}, "Ports"): false, + f(filtertype.PortRange{}, "First"): false, + f(filtertype.PortRange{}, "Last"): false, + f(key.DiscoPublic{}, "k"): false, + f(key.MachinePublic{}, "k"): false, + f(key.NodePrivate{}, "_"): false, + f(key.NodePrivate{}, "k"): false, + f(key.NodePublic{}, "k"): false, + f(tailcfg.CapGrant{}, "CapMap"): false, + f(tailcfg.CapGrant{}, "Caps"): false, + f(tailcfg.CapGrant{}, "Dsts"): false, + f(tailcfg.DERPHomeParams{}, "RegionScore"): false, + f(tailcfg.DERPMap{}, "HomeParams"): false, + f(tailcfg.DERPMap{}, "OmitDefaultRegions"): false, + f(tailcfg.DERPMap{}, "Regions"): false, + f(tailcfg.DNSConfig{}, "CertDomains"): false, + f(tailcfg.DNSConfig{}, "Domains"): false, + f(tailcfg.DNSConfig{}, "ExitNodeFilteredSet"): false, + f(tailcfg.DNSConfig{}, "ExtraRecords"): false, + f(tailcfg.DNSConfig{}, "FallbackResolvers"): false, + f(tailcfg.DNSConfig{}, "Nameservers"): false, + f(tailcfg.DNSConfig{}, "Proxied"): false, + f(tailcfg.DNSConfig{}, "Resolvers"): false, + f(tailcfg.DNSConfig{}, "Routes"): false, + f(tailcfg.DNSConfig{}, "TempCorpIssue13969"): false, + f(tailcfg.DNSRecord{}, "Name"): false, + f(tailcfg.DNSRecord{}, "Type"): false, + f(tailcfg.DNSRecord{}, "Value"): false, + f(tailcfg.DisplayMessageAction{}, "Label"): false, + f(tailcfg.DisplayMessageAction{}, "URL"): false, + f(tailcfg.DisplayMessage{}, "ImpactsConnectivity"): false, + f(tailcfg.DisplayMessage{}, "PrimaryAction"): false, + f(tailcfg.DisplayMessage{}, "Severity"): false, + f(tailcfg.DisplayMessage{}, "Text"): false, + f(tailcfg.DisplayMessage{}, "Title"): false, + f(tailcfg.FilterRule{}, "CapGrant"): false, + f(tailcfg.FilterRule{}, "DstPorts"): false, + f(tailcfg.FilterRule{}, "IPProto"): false, + f(tailcfg.FilterRule{}, "SrcBits"): false, + f(tailcfg.FilterRule{}, "SrcIPs"): false, + f(tailcfg.HostinfoView{}, "ж"): false, + f(tailcfg.Hostinfo{}, "AllowsUpdate"): false, + f(tailcfg.Hostinfo{}, "App"): false, + f(tailcfg.Hostinfo{}, "AppConnector"): false, + f(tailcfg.Hostinfo{}, "BackendLogID"): false, + f(tailcfg.Hostinfo{}, "Cloud"): false, + f(tailcfg.Hostinfo{}, "Container"): false, + f(tailcfg.Hostinfo{}, "Desktop"): false, + f(tailcfg.Hostinfo{}, "DeviceModel"): false, + f(tailcfg.Hostinfo{}, "Distro"): false, + f(tailcfg.Hostinfo{}, "DistroCodeName"): false, + f(tailcfg.Hostinfo{}, "DistroVersion"): false, + f(tailcfg.Hostinfo{}, "Env"): false, + f(tailcfg.Hostinfo{}, "ExitNodeID"): false, + f(tailcfg.Hostinfo{}, "FrontendLogID"): false, + f(tailcfg.Hostinfo{}, "GoArch"): false, + f(tailcfg.Hostinfo{}, "GoArchVar"): false, + f(tailcfg.Hostinfo{}, "GoVersion"): false, + f(tailcfg.Hostinfo{}, "Hostname"): false, + f(tailcfg.Hostinfo{}, "IPNVersion"): false, + f(tailcfg.Hostinfo{}, "IngressEnabled"): false, + f(tailcfg.Hostinfo{}, "Location"): false, + f(tailcfg.Hostinfo{}, "Machine"): false, + f(tailcfg.Hostinfo{}, "NetInfo"): false, + f(tailcfg.Hostinfo{}, "NoLogsNoSupport"): false, + f(tailcfg.Hostinfo{}, "OS"): false, + f(tailcfg.Hostinfo{}, "OSVersion"): false, + f(tailcfg.Hostinfo{}, "Package"): false, + f(tailcfg.Hostinfo{}, "PushDeviceToken"): false, + f(tailcfg.Hostinfo{}, "RequestTags"): false, + f(tailcfg.Hostinfo{}, "RoutableIPs"): false, + f(tailcfg.Hostinfo{}, "SSH_HostKeys"): false, + f(tailcfg.Hostinfo{}, "Services"): false, + f(tailcfg.Hostinfo{}, "ServicesHash"): false, + f(tailcfg.Hostinfo{}, "ShareeNode"): false, + f(tailcfg.Hostinfo{}, "ShieldsUp"): false, + f(tailcfg.Hostinfo{}, "StateEncrypted"): false, + f(tailcfg.Hostinfo{}, "TPM"): false, + f(tailcfg.Hostinfo{}, "Userspace"): false, + f(tailcfg.Hostinfo{}, "UserspaceRouter"): false, + f(tailcfg.Hostinfo{}, "WireIngress"): false, + f(tailcfg.Hostinfo{}, "WoLMACs"): false, + f(tailcfg.Location{}, "City"): false, + f(tailcfg.Location{}, "CityCode"): false, + f(tailcfg.Location{}, "Country"): false, + f(tailcfg.Location{}, "CountryCode"): false, + f(tailcfg.Location{}, "Latitude"): false, + f(tailcfg.Location{}, "Longitude"): false, + f(tailcfg.Location{}, "Priority"): false, + f(tailcfg.NetInfo{}, "DERPLatency"): false, + f(tailcfg.NetInfo{}, "FirewallMode"): false, + f(tailcfg.NetInfo{}, "HairPinning"): false, + f(tailcfg.NetInfo{}, "HavePortMap"): false, + f(tailcfg.NetInfo{}, "LinkType"): false, + f(tailcfg.NetInfo{}, "MappingVariesByDestIP"): false, + f(tailcfg.NetInfo{}, "OSHasIPv6"): false, + f(tailcfg.NetInfo{}, "PCP"): false, + f(tailcfg.NetInfo{}, "PMP"): false, + f(tailcfg.NetInfo{}, "PreferredDERP"): false, + f(tailcfg.NetInfo{}, "UPnP"): false, + f(tailcfg.NetInfo{}, "WorkingICMPv4"): false, + f(tailcfg.NetInfo{}, "WorkingIPv6"): false, + f(tailcfg.NetInfo{}, "WorkingUDP"): false, + f(tailcfg.NetPortRange{}, "Bits"): false, + f(tailcfg.NetPortRange{}, "IP"): false, + f(tailcfg.NetPortRange{}, "Ports"): false, + f(tailcfg.NetPortRange{}, "_"): false, + f(tailcfg.NodeView{}, "ж"): false, + f(tailcfg.Node{}, "Addresses"): false, + f(tailcfg.Node{}, "AllowedIPs"): false, + f(tailcfg.Node{}, "Cap"): false, + f(tailcfg.Node{}, "CapMap"): false, + f(tailcfg.Node{}, "Capabilities"): false, + f(tailcfg.Node{}, "ComputedName"): false, + f(tailcfg.Node{}, "ComputedNameWithHost"): false, + f(tailcfg.Node{}, "Created"): false, + f(tailcfg.Node{}, "DataPlaneAuditLogID"): false, + f(tailcfg.Node{}, "DiscoKey"): false, + f(tailcfg.Node{}, "Endpoints"): false, + f(tailcfg.Node{}, "ExitNodeDNSResolvers"): false, + f(tailcfg.Node{}, "Expired"): false, + f(tailcfg.Node{}, "HomeDERP"): false, + f(tailcfg.Node{}, "Hostinfo"): false, + f(tailcfg.Node{}, "ID"): false, + f(tailcfg.Node{}, "IsJailed"): false, + f(tailcfg.Node{}, "IsWireGuardOnly"): false, + f(tailcfg.Node{}, "Key"): false, + f(tailcfg.Node{}, "KeyExpiry"): false, + f(tailcfg.Node{}, "KeySignature"): false, + f(tailcfg.Node{}, "LastSeen"): false, + f(tailcfg.Node{}, "LegacyDERPString"): false, + f(tailcfg.Node{}, "Machine"): false, + f(tailcfg.Node{}, "MachineAuthorized"): false, + f(tailcfg.Node{}, "Name"): false, + f(tailcfg.Node{}, "Online"): false, + f(tailcfg.Node{}, "PrimaryRoutes"): false, + f(tailcfg.Node{}, "SelfNodeV4MasqAddrForThisPeer"): false, + f(tailcfg.Node{}, "SelfNodeV6MasqAddrForThisPeer"): false, + f(tailcfg.Node{}, "Sharer"): false, + f(tailcfg.Node{}, "StableID"): false, + f(tailcfg.Node{}, "Tags"): false, + f(tailcfg.Node{}, "UnsignedPeerAPIOnly"): false, + f(tailcfg.Node{}, "User"): false, + f(tailcfg.Node{}, "computedHostIfDifferent"): false, + f(tailcfg.PortRange{}, "First"): false, + f(tailcfg.PortRange{}, "Last"): false, + f(tailcfg.SSHPolicy{}, "Rules"): false, + f(tailcfg.Service{}, "Description"): false, + f(tailcfg.Service{}, "Port"): false, + f(tailcfg.Service{}, "Proto"): false, + f(tailcfg.Service{}, "_"): false, + f(tailcfg.TPMInfo{}, "FamilyIndicator"): false, + f(tailcfg.TPMInfo{}, "FirmwareVersion"): false, + f(tailcfg.TPMInfo{}, "Manufacturer"): false, + f(tailcfg.TPMInfo{}, "Model"): false, + f(tailcfg.TPMInfo{}, "SpecRevision"): false, + f(tailcfg.TPMInfo{}, "Vendor"): false, + f(tailcfg.UserProfileView{}, "ж"): false, + f(tailcfg.UserProfile{}, "DisplayName"): false, + f(tailcfg.UserProfile{}, "ID"): false, + f(tailcfg.UserProfile{}, "LoginName"): false, + f(tailcfg.UserProfile{}, "ProfilePicURL"): false, + f(views.Slice[ipproto.Proto]{}, "ж"): false, + f(views.Slice[tailcfg.FilterRule]{}, "ж"): false, + } + + t.Run("field_list_is_complete", func(t *testing.T) { + seen := set.Set[field]{} + eachStructField(reflect.TypeOf(netmap.NetworkMap{}), func(rt reflect.Type, sf reflect.StructField) { + f := field{rt, sf.Name} + seen.Add(f) + if _, ok := fields[f]; !ok { + // Fail the test if netmap has a field not in the list. If you see this test + // failure, please add the new field to the fields map above, marking it as private or public. + t.Errorf("netmap field has not been declared as private or public: %v.%v", rt, sf.Name) + } + }) + + for want := range fields { + if !seen.Contains(want) { + // Fail the test if the list has a field not in netmap. If you see this test + // failure, please remove the field from the fields map above. + t.Errorf("field declared that has not been found in netmap: %v.%v", want.t, want.f) + } + } + }) + + // tests is a list of test cases, each with a non-redacted netmap and the expected redacted netmap. + // If you add a new private field to netmap.NetworkMap or its sub-structs, please add a test case + // here that has that field set in nm, and the expected redacted value in wantRedacted. + tests := []struct { + name string + nm *netmap.NetworkMap + wantRedacted *netmap.NetworkMap + }{ + { + name: "redact_private_key", + nm: &netmap.NetworkMap{ + PrivateKey: key.NewNode(), + }, + wantRedacted: &netmap.NetworkMap{}, + }, + } + + // confirmedRedacted is a set of all private fields that have been covered by the tests above. + confirmedRedacted := set.Set[field]{} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + // Record which of the private fields are set in the non-redacted netmap. + eachStructValue(reflect.ValueOf(tt.nm).Elem(), func(tt reflect.Type, sf reflect.StructField, v reflect.Value) { + f := field{tt, sf.Name} + if shouldRedact := fields[f]; shouldRedact && !v.IsZero() { + confirmedRedacted.Add(f) + } + }) + + got, _ := redactNetmapPrivateKeys(tt.nm) + if !reflect.DeepEqual(got, tt.wantRedacted) { + t.Errorf("unexpected redacted netmap: %+v", got) + } + + // Check that all private fields in the redacted netmap are zero. + eachStructValue(reflect.ValueOf(got).Elem(), func(tt reflect.Type, sf reflect.StructField, v reflect.Value) { + f := field{tt, sf.Name} + if shouldRedact := fields[f]; shouldRedact && !v.IsZero() { + t.Errorf("field not redacted: %v.%v", tt, sf.Name) + } + }) + }) + } + + // Check that all private fields in netmap.NetworkMap and its sub-structs + // are covered by the tests above. If you see a test failure here, + // please add a test case above that has that field set in nm. + for f, shouldRedact := range fields { + if shouldRedact { + if !confirmedRedacted.Contains(f) { + t.Errorf("field not covered by tests: %v.%v", f.t, f.f) + } + } + } +} + +func TestHandleC2NDebugNetmap(t *testing.T) { + nm := &netmap.NetworkMap{ + Name: "myhost", + SelfNode: (&tailcfg.Node{ + ID: 100, + Name: "myhost", + StableID: "deadbeef", + Key: key.NewNode().Public(), + Hostinfo: (&tailcfg.Hostinfo{Hostname: "myhost"}).View(), + }).View(), + Peers: []tailcfg.NodeView{ + (&tailcfg.Node{ + ID: 101, + Name: "peer1", + StableID: "deadbeef", + Key: key.NewNode().Public(), + Hostinfo: (&tailcfg.Hostinfo{Hostname: "peer1"}).View(), + }).View(), + }, + PrivateKey: key.NewNode(), + } + withoutPrivateKey := *nm + withoutPrivateKey.PrivateKey = key.NodePrivate{} + + for _, tt := range []struct { + name string + req *tailcfg.C2NDebugNetmapRequest + want *netmap.NetworkMap + }{ + { + name: "simple_get", + want: &withoutPrivateKey, + }, + { + name: "post_no_omit", + req: &tailcfg.C2NDebugNetmapRequest{}, + want: &withoutPrivateKey, + }, + { + name: "post_omit_peers_and_name", + req: &tailcfg.C2NDebugNetmapRequest{OmitFields: []string{"Peers", "Name"}}, + want: &netmap.NetworkMap{ + SelfNode: nm.SelfNode, + }, + }, + { + name: "post_omit_nonexistent_field", + req: &tailcfg.C2NDebugNetmapRequest{OmitFields: []string{"ThisFieldDoesNotExist"}}, + want: &withoutPrivateKey, + }, + } { + t.Run(tt.name, func(t *testing.T) { + b := newTestLocalBackend(t) + b.currentNode().SetNetMap(nm) + + rec := httptest.NewRecorder() + req := httptest.NewRequest("GET", "/debug/netmap", nil) + if tt.req != nil { + b, err := json.Marshal(tt.req) + if err != nil { + t.Fatalf("json.Marshal: %v", err) + } + req = httptest.NewRequest("POST", "/debug/netmap", bytes.NewReader(b)) + } + handleC2NDebugNetMap(b, rec, req) + res := rec.Result() + wantStatus := 200 + if res.StatusCode != wantStatus { + t.Fatalf("status code = %v; want %v. Body: %s", res.Status, wantStatus, rec.Body.Bytes()) + } + var resp tailcfg.C2NDebugNetmapResponse + if err := json.Unmarshal(rec.Body.Bytes(), &resp); err != nil { + t.Fatalf("bad JSON: %v", err) + } + got := &netmap.NetworkMap{} + if err := json.Unmarshal(resp.Current, got); err != nil { + t.Fatalf("bad JSON: %v", err) + } + + if diff := gcmp.Diff(tt.want, got, + gcmp.AllowUnexported(netmap.NetworkMap{}, key.NodePublic{}, views.Slice[tailcfg.FilterRule]{}), + cmpopts.EquateComparable(key.MachinePublic{}), + ); diff != "" { + t.Errorf("netmap mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/ipn/ipnlocal/captiveportal.go b/ipn/ipnlocal/captiveportal.go new file mode 100644 index 0000000000000..14f8b799eb6dd --- /dev/null +++ b/ipn/ipnlocal/captiveportal.go @@ -0,0 +1,186 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_captiveportal + +package ipnlocal + +import ( + "context" + "time" + + "tailscale.com/health" + "tailscale.com/net/captivedetection" + "tailscale.com/util/clientmetric" +) + +func init() { + hookCaptivePortalHealthChange.Set(captivePortalHealthChange) + hookCheckCaptivePortalLoop.Set(checkCaptivePortalLoop) +} + +var metricCaptivePortalDetected = clientmetric.NewCounter("captiveportal_detected") + +// captivePortalDetectionInterval is the duration to wait in an unhealthy state with connectivity broken +// before running captive portal detection. +const captivePortalDetectionInterval = 2 * time.Second + +func captivePortalHealthChange(b *LocalBackend, state *health.State) { + isConnectivityImpacted := false + for _, w := range state.Warnings { + // Ignore the captive portal warnable itself. + if w.ImpactsConnectivity && w.WarnableCode != captivePortalWarnable.Code { + isConnectivityImpacted = true + break + } + } + + // captiveCtx can be changed, and is protected with 'mu'; grab that + // before we start our select, below. + // + // It is guaranteed to be non-nil. + b.mu.Lock() + ctx := b.captiveCtx + b.mu.Unlock() + + // If the context is canceled, we don't need to do anything. + if ctx.Err() != nil { + return + } + + if isConnectivityImpacted { + b.logf("health: connectivity impacted; triggering captive portal detection") + + // Ensure that we select on captiveCtx so that we can time out + // triggering captive portal detection if the backend is shutdown. + select { + case b.needsCaptiveDetection <- true: + case <-ctx.Done(): + } + } else { + // If connectivity is not impacted, we know for sure we're not behind a captive portal, + // so drop any warning, and signal that we don't need captive portal detection. + b.health.SetHealthy(captivePortalWarnable) + select { + case b.needsCaptiveDetection <- false: + case <-ctx.Done(): + } + } +} + +// captivePortalWarnable is a Warnable which is set to an unhealthy state when a captive portal is detected. +var captivePortalWarnable = health.Register(&health.Warnable{ + Code: "captive-portal-detected", + Title: "Captive portal detected", + // High severity, because captive portals block all traffic and require user intervention. + Severity: health.SeverityHigh, + Text: health.StaticMessage("This network requires you to log in using your web browser."), + ImpactsConnectivity: true, +}) + +func checkCaptivePortalLoop(b *LocalBackend, ctx context.Context) { + var tmr *time.Timer + + maybeStartTimer := func() { + // If there's an existing timer, nothing to do; just continue + // waiting for it to expire. Otherwise, create a new timer. + if tmr == nil { + tmr = time.NewTimer(captivePortalDetectionInterval) + } + } + maybeStopTimer := func() { + if tmr == nil { + return + } + if !tmr.Stop() { + <-tmr.C + } + tmr = nil + } + + for { + if ctx.Err() != nil { + maybeStopTimer() + return + } + + // First, see if we have a signal on our "healthy" channel, which + // takes priority over an existing timer. Because a select is + // nondeterministic, we explicitly check this channel before + // entering the main select below, so that we're guaranteed to + // stop the timer before starting captive portal detection. + select { + case needsCaptiveDetection := <-b.needsCaptiveDetection: + if needsCaptiveDetection { + maybeStartTimer() + } else { + maybeStopTimer() + } + default: + } + + var timerChan <-chan time.Time + if tmr != nil { + timerChan = tmr.C + } + select { + case <-ctx.Done(): + // All done; stop the timer and then exit. + maybeStopTimer() + return + case <-timerChan: + // Kick off captive portal check + b.performCaptiveDetection() + // nil out timer to force recreation + tmr = nil + case needsCaptiveDetection := <-b.needsCaptiveDetection: + if needsCaptiveDetection { + maybeStartTimer() + } else { + // Healthy; cancel any existing timer + maybeStopTimer() + } + } + } +} + +// shouldRunCaptivePortalDetection reports whether captive portal detection +// should be run. It is enabled by default, but can be disabled via a control +// knob. It is also only run when the user explicitly wants the backend to be +// running. +func (b *LocalBackend) shouldRunCaptivePortalDetection() bool { + b.mu.Lock() + defer b.mu.Unlock() + return !b.ControlKnobs().DisableCaptivePortalDetection.Load() && b.pm.prefs.WantRunning() +} + +// performCaptiveDetection checks if captive portal detection is enabled via controlknob. If so, it runs +// the detection and updates the Warnable accordingly. +func (b *LocalBackend) performCaptiveDetection() { + if !b.shouldRunCaptivePortalDetection() { + return + } + + d := captivedetection.NewDetector(b.logf) + b.mu.Lock() // for b.hostinfo + cn := b.currentNode() + dm := cn.DERPMap() + preferredDERP := 0 + if b.hostinfo != nil { + if b.hostinfo.NetInfo != nil { + preferredDERP = b.hostinfo.NetInfo.PreferredDERP + } + } + ctx := b.ctx + netMon := b.NetMon() + b.mu.Unlock() + found := d.Detect(ctx, netMon, dm, preferredDERP) + if found { + if !b.health.IsUnhealthy(captivePortalWarnable) { + metricCaptivePortalDetected.Add(1) + } + b.health.SetUnhealthy(captivePortalWarnable, health.Args{}) + } else { + b.health.SetHealthy(captivePortalWarnable) + } +} diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index 86052eb8d5861..ab49976c8aeea 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !js +//go:build !js && !ts_omit_acme package ipnlocal @@ -24,6 +24,7 @@ import ( "log" randv2 "math/rand/v2" "net" + "net/http" "os" "path/filepath" "runtime" @@ -34,12 +35,14 @@ import ( "tailscale.com/atomicfile" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" "tailscale.com/ipn/store" "tailscale.com/ipn/store/mem" "tailscale.com/net/bakedroots" + "tailscale.com/tailcfg" "tailscale.com/tempfork/acme" "tailscale.com/types/logger" "tailscale.com/util/testenv" @@ -47,6 +50,10 @@ import ( "tailscale.com/version/distro" ) +func init() { + RegisterC2N("GET /tls-cert-status", handleC2NTLSCertStatus) +} + // Process-wide cache. (A new *Handler is created per connection, // effectively per request) var ( @@ -67,7 +74,7 @@ func (b *LocalBackend) certDir() (string, error) { // As a workaround for Synology DSM6 not having a "var" directory, use the // app's "etc" directory (on a small partition) to hold certs at least. // See https://github.com/tailscale/tailscale/issues/4060#issuecomment-1186592251 - if d == "" && runtime.GOOS == "linux" && distro.Get() == distro.Synology && distro.DSMVersion() == 6 { + if buildfeatures.HasSynology && d == "" && runtime.GOOS == "linux" && distro.Get() == distro.Synology && distro.DSMVersion() == 6 { d = "/var/packages/Tailscale/etc" // base; we append "certs" below } if d == "" { @@ -836,3 +843,54 @@ func checkCertDomain(st *ipnstate.Status, domain string) error { } return fmt.Errorf("invalid domain %q; must be one of %q", domain, st.CertDomains) } + +// handleC2NTLSCertStatus returns info about the last TLS certificate issued for the +// provided domain. This can be called by the controlplane to clean up DNS TXT +// records when they're no longer needed by LetsEncrypt. +// +// It does not kick off a cert fetch or async refresh. It only reports anything +// that's already sitting on disk, and only reports metadata about the public +// cert (stuff that'd be the in CT logs anyway). +func handleC2NTLSCertStatus(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + cs, err := b.getCertStore() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + domain := r.FormValue("domain") + if domain == "" { + http.Error(w, "no 'domain'", http.StatusBadRequest) + return + } + + ret := &tailcfg.C2NTLSCertInfo{} + pair, err := getCertPEMCached(cs, domain, b.clock.Now()) + ret.Valid = err == nil + if err != nil { + ret.Error = err.Error() + if errors.Is(err, errCertExpired) { + ret.Expired = true + } else if errors.Is(err, ipn.ErrStateNotExist) { + ret.Missing = true + ret.Error = "no certificate" + } + } else { + block, _ := pem.Decode(pair.CertPEM) + if block == nil { + ret.Error = "invalid PEM" + ret.Valid = false + } else { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + ret.Error = fmt.Sprintf("invalid certificate: %v", err) + ret.Valid = false + } else { + ret.NotBefore = cert.NotBefore.UTC().Format(time.RFC3339) + ret.NotAfter = cert.NotAfter.UTC().Format(time.RFC3339) + } + } + } + + writeJSON(w, ret) +} diff --git a/ipn/ipnlocal/cert_js.go b/ipn/ipnlocal/cert_disabled.go similarity index 51% rename from ipn/ipnlocal/cert_js.go rename to ipn/ipnlocal/cert_disabled.go index 6acc57a60a0ac..17d446c11af39 100644 --- a/ipn/ipnlocal/cert_js.go +++ b/ipn/ipnlocal/cert_disabled.go @@ -1,20 +1,30 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build js || ts_omit_acme + package ipnlocal import ( "context" "errors" + "io" + "net/http" "time" ) +func init() { + RegisterC2N("GET /tls-cert-status", handleC2NTLSCertStatusDisabled) +} + +var errNoCerts = errors.New("cert support not compiled in this build") + type TLSCertKeyPair struct { CertPEM, KeyPEM []byte } func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string) (*TLSCertKeyPair, error) { - return nil, errors.New("not implemented for js/wasm") + return nil, errNoCerts } var errCertExpired = errors.New("cert expired") @@ -22,9 +32,14 @@ var errCertExpired = errors.New("cert expired") type certStore interface{} func getCertPEMCached(cs certStore, domain string, now time.Time) (p *TLSCertKeyPair, err error) { - return nil, errors.New("not implemented for js/wasm") + return nil, errNoCerts } func (b *LocalBackend) getCertStore() (certStore, error) { - return nil, errors.New("not implemented for js/wasm") + return nil, errNoCerts +} + +func handleC2NTLSCertStatusDisabled(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + io.WriteString(w, `{"Missing":true}`) // a minimal tailcfg.C2NTLSCertInfo } diff --git a/ipn/ipnlocal/drive.go b/ipn/ipnlocal/drive.go index d77481903fc09..7d6dc2427adae 100644 --- a/ipn/ipnlocal/drive.go +++ b/ipn/ipnlocal/drive.go @@ -1,38 +1,35 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_drive + package ipnlocal import ( + "errors" "fmt" + "io" + "net/http" + "net/netip" "os" "slices" "tailscale.com/drive" "tailscale.com/ipn" "tailscale.com/tailcfg" + "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/views" + "tailscale.com/util/httpm" ) -const ( - // DriveLocalPort is the port on which the Taildrive listens for location - // connections on quad 100. - DriveLocalPort = 8080 -) - -// DriveSharingEnabled reports whether sharing to remote nodes via Taildrive is -// enabled. This is currently based on checking for the drive:share node -// attribute. -func (b *LocalBackend) DriveSharingEnabled() bool { - return b.currentNode().SelfHasCap(tailcfg.NodeAttrsTaildriveShare) +func init() { + hookSetNetMapLockedDrive.Set(setNetMapLockedDrive) } -// DriveAccessEnabled reports whether accessing Taildrive shares on remote nodes -// is enabled. This is currently based on checking for the drive:access node -// attribute. -func (b *LocalBackend) DriveAccessEnabled() bool { - return b.currentNode().SelfHasCap(tailcfg.NodeAttrsTaildriveAccess) +func setNetMapLockedDrive(b *LocalBackend, nm *netmap.NetworkMap) { + b.updateDrivePeersLocked(nm) + b.driveNotifyCurrentSharesLocked() } // DriveSetServerAddr tells Taildrive to use the given address for connecting @@ -363,3 +360,137 @@ func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Rem } return driveRemotes } + +// responseBodyWrapper wraps an io.ReadCloser and stores +// the number of bytesRead. +type responseBodyWrapper struct { + io.ReadCloser + logVerbose bool + bytesRx int64 + bytesTx int64 + log logger.Logf + method string + statusCode int + contentType string + fileExtension string + shareNodeKey string + selfNodeKey string + contentLength int64 +} + +// logAccess logs the taildrive: access: log line. If the logger is nil, +// the log will not be written. +func (rbw *responseBodyWrapper) logAccess(err string) { + if rbw.log == nil { + return + } + + // Some operating systems create and copy lots of 0 length hidden files for + // tracking various states. Omit these to keep logs from being too verbose. + if rbw.logVerbose || rbw.contentLength > 0 { + levelPrefix := "" + if rbw.logVerbose { + levelPrefix = "[v1] " + } + rbw.log( + "%staildrive: access: %s from %s to %s: status-code=%d ext=%q content-type=%q content-length=%.f tx=%.f rx=%.f err=%q", + levelPrefix, + rbw.method, + rbw.selfNodeKey, + rbw.shareNodeKey, + rbw.statusCode, + rbw.fileExtension, + rbw.contentType, + roundTraffic(rbw.contentLength), + roundTraffic(rbw.bytesTx), roundTraffic(rbw.bytesRx), err) + } +} + +// Read implements the io.Reader interface. +func (rbw *responseBodyWrapper) Read(b []byte) (int, error) { + n, err := rbw.ReadCloser.Read(b) + rbw.bytesRx += int64(n) + if err != nil && !errors.Is(err, io.EOF) { + rbw.logAccess(err.Error()) + } + + return n, err +} + +// Close implements the io.Close interface. +func (rbw *responseBodyWrapper) Close() error { + err := rbw.ReadCloser.Close() + var errStr string + if err != nil { + errStr = err.Error() + } + rbw.logAccess(errStr) + + return err +} + +// driveTransport is an http.RoundTripper that wraps +// b.Dialer().PeerAPITransport() with metrics tracking. +type driveTransport struct { + b *LocalBackend + tr *http.Transport +} + +func (b *LocalBackend) newDriveTransport() *driveTransport { + return &driveTransport{ + b: b, + tr: b.Dialer().PeerAPITransport(), + } +} + +func (dt *driveTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + // Some WebDAV clients include origin and refer headers, which peerapi does + // not like. Remove them. + req.Header.Del("origin") + req.Header.Del("referer") + + bw := &requestBodyWrapper{} + if req.Body != nil { + bw.ReadCloser = req.Body + req.Body = bw + } + + defer func() { + contentType := "unknown" + if ct := req.Header.Get("Content-Type"); ct != "" { + contentType = ct + } + + dt.b.mu.Lock() + selfNodeKey := dt.b.currentNode().Self().Key().ShortString() + dt.b.mu.Unlock() + n, _, ok := dt.b.WhoIs("tcp", netip.MustParseAddrPort(req.URL.Host)) + shareNodeKey := "unknown" + if ok { + shareNodeKey = string(n.Key().ShortString()) + } + + rbw := responseBodyWrapper{ + log: dt.b.logf, + logVerbose: req.Method != httpm.GET && req.Method != httpm.PUT, // other requests like PROPFIND are quite chatty, so we log those at verbose level + method: req.Method, + bytesTx: int64(bw.bytesRead), + selfNodeKey: selfNodeKey, + shareNodeKey: shareNodeKey, + contentType: contentType, + contentLength: resp.ContentLength, + fileExtension: parseDriveFileExtensionForLog(req.URL.Path), + statusCode: resp.StatusCode, + ReadCloser: resp.Body, + } + + if resp.StatusCode >= 400 { + // in case of error response, just log immediately + rbw.logAccess("") + } else { + resp.Body = &rbw + } + }() + + return dt.tr.RoundTrip(req) +} diff --git a/ipn/ipnlocal/drive_tomove.go b/ipn/ipnlocal/drive_tomove.go new file mode 100644 index 0000000000000..290fe097022fd --- /dev/null +++ b/ipn/ipnlocal/drive_tomove.go @@ -0,0 +1,30 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// This is the Taildrive stuff that should ideally be registered in init only when +// the ts_omit_drive is not set, but for transition reasons is currently (2025-09-08) +// always defined, as we work to pull it out of LocalBackend. + +package ipnlocal + +import "tailscale.com/tailcfg" + +const ( + // DriveLocalPort is the port on which the Taildrive listens for location + // connections on quad 100. + DriveLocalPort = 8080 +) + +// DriveSharingEnabled reports whether sharing to remote nodes via Taildrive is +// enabled. This is currently based on checking for the drive:share node +// attribute. +func (b *LocalBackend) DriveSharingEnabled() bool { + return b.currentNode().SelfHasCap(tailcfg.NodeAttrsTaildriveShare) +} + +// DriveAccessEnabled reports whether accessing Taildrive shares on remote nodes +// is enabled. This is currently based on checking for the drive:access node +// attribute. +func (b *LocalBackend) DriveAccessEnabled() bool { + return b.currentNode().SelfHasCap(tailcfg.NodeAttrsTaildriveAccess) +} diff --git a/ipn/ipnlocal/expiry.go b/ipn/ipnlocal/expiry.go index d1119981594da..8ea63d21a4fb0 100644 --- a/ipn/ipnlocal/expiry.go +++ b/ipn/ipnlocal/expiry.go @@ -6,12 +6,14 @@ package ipnlocal import ( "time" + "tailscale.com/control/controlclient" "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/tstime" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" + "tailscale.com/util/eventbus" ) // For extra defense-in-depth, when we're testing expired nodes we check @@ -40,14 +42,22 @@ type expiryManager struct { logf logger.Logf clock tstime.Clock + + eventClient *eventbus.Client } -func newExpiryManager(logf logger.Logf) *expiryManager { - return &expiryManager{ +func newExpiryManager(logf logger.Logf, bus *eventbus.Bus) *expiryManager { + em := &expiryManager{ previouslyExpired: map[tailcfg.StableNodeID]bool{}, logf: logf, clock: tstime.StdClock{}, } + + em.eventClient = bus.Client("ipnlocal.expiryManager") + eventbus.SubscribeFunc(em.eventClient, func(ct controlclient.ControlTime) { + em.onControlTime(ct.Value) + }) + return em } // onControlTime is called whenever we receive a new timestamp from the control @@ -218,6 +228,8 @@ func (em *expiryManager) nextPeerExpiry(nm *netmap.NetworkMap, localNow time.Tim return nextExpiry } +func (em *expiryManager) close() { em.eventClient.Close() } + // ControlNow estimates the current time on the control server, calculated as // localNow + the delta between local and control server clocks as recorded // when the LocalBackend last received a time message from the control server. diff --git a/ipn/ipnlocal/expiry_test.go b/ipn/ipnlocal/expiry_test.go index a2b10fe325b8a..2c646ca724efd 100644 --- a/ipn/ipnlocal/expiry_test.go +++ b/ipn/ipnlocal/expiry_test.go @@ -14,6 +14,7 @@ import ( "tailscale.com/tstest" "tailscale.com/types/key" "tailscale.com/types/netmap" + "tailscale.com/util/eventbus/eventbustest" ) func TestFlagExpiredPeers(t *testing.T) { @@ -110,7 +111,8 @@ func TestFlagExpiredPeers(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - em := newExpiryManager(t.Logf) + bus := eventbustest.NewBus(t) + em := newExpiryManager(t.Logf, bus) em.clock = tstest.NewClock(tstest.ClockOpts{Start: now}) if tt.controlTime != nil { em.onControlTime(*tt.controlTime) @@ -240,7 +242,8 @@ func TestNextPeerExpiry(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - em := newExpiryManager(t.Logf) + bus := eventbustest.NewBus(t) + em := newExpiryManager(t.Logf, bus) em.clock = tstest.NewClock(tstest.ClockOpts{Start: now}) got := em.nextPeerExpiry(tt.netmap, now) if !got.Equal(tt.want) { @@ -253,7 +256,8 @@ func TestNextPeerExpiry(t *testing.T) { t.Run("ClockSkew", func(t *testing.T) { t.Logf("local time: %q", now.Format(time.RFC3339)) - em := newExpiryManager(t.Logf) + bus := eventbustest.NewBus(t) + em := newExpiryManager(t.Logf, bus) em.clock = tstest.NewClock(tstest.ClockOpts{Start: now}) // The local clock is "running fast"; our clock skew is -2h diff --git a/ipn/ipnlocal/extension_host_test.go b/ipn/ipnlocal/extension_host_test.go index 509833ff6de46..f5c081a5bdb3e 100644 --- a/ipn/ipnlocal/extension_host_test.go +++ b/ipn/ipnlocal/extension_host_test.go @@ -32,6 +32,7 @@ import ( "tailscale.com/types/lazy" "tailscale.com/types/logger" "tailscale.com/types/persist" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" ) @@ -847,7 +848,7 @@ func TestBackgroundProfileResolver(t *testing.T) { // Create a new profile manager and add the profiles to it. // We expose the profile manager to the extensions via the read-only [ipnext.ProfileStore] interface. - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) for i, p := range tt.profiles { // Generate a unique ID and key for each profile, // unless the profile already has them set diff --git a/ipn/ipnlocal/hwattest.go b/ipn/ipnlocal/hwattest.go new file mode 100644 index 0000000000000..2c93cad4c97ff --- /dev/null +++ b/ipn/ipnlocal/hwattest.go @@ -0,0 +1,48 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_tpm + +package ipnlocal + +import ( + "errors" + + "tailscale.com/feature" + "tailscale.com/types/key" + "tailscale.com/types/logger" + "tailscale.com/types/persist" +) + +func init() { + feature.HookGenerateAttestationKeyIfEmpty.Set(generateAttestationKeyIfEmpty) +} + +// generateAttestationKeyIfEmpty generates a new hardware attestation key if +// none exists. It returns true if a new key was generated and stored in +// p.AttestationKey. +func generateAttestationKeyIfEmpty(p *persist.Persist, logf logger.Logf) (bool, error) { + // attempt to generate a new hardware attestation key if none exists + var ak key.HardwareAttestationKey + if p != nil { + ak = p.AttestationKey + } + + if ak == nil || ak.IsZero() { + var err error + ak, err = key.NewHardwareAttestationKey() + if err != nil { + if !errors.Is(err, key.ErrUnsupported) { + logf("failed to create hardware attestation key: %v", err) + } + } else if ak != nil { + logf("using new hardware attestation key: %v", ak.Public()) + if p == nil { + p = &persist.Persist{} + } + p.AttestationKey = ak + return true, nil + } + } + return false, nil +} diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 9a3f08472bc8a..a1fefedc2d1e3 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -6,11 +6,9 @@ package ipnlocal import ( - "bytes" "cmp" "context" "crypto/sha256" - "encoding/base64" "encoding/binary" "encoding/hex" "encoding/json" @@ -18,7 +16,6 @@ import ( "fmt" "io" "log" - "maps" "math" "math/rand/v2" "net" @@ -26,7 +23,6 @@ import ( "net/netip" "net/url" "os" - "os/exec" "reflect" "runtime" "slices" @@ -39,19 +35,15 @@ import ( "go4.org/mem" "go4.org/netipx" "golang.org/x/net/dns/dnsmessage" - "gvisor.dev/gvisor/pkg/tcpip" "tailscale.com/appc" "tailscale.com/client/tailscale/apitype" - "tailscale.com/clientupdate" "tailscale.com/control/controlclient" "tailscale.com/control/controlknobs" - "tailscale.com/doctor" - "tailscale.com/doctor/ethtool" - "tailscale.com/doctor/permissions" - "tailscale.com/doctor/routetable" "tailscale.com/drive" "tailscale.com/envknob" "tailscale.com/envknob/featureknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/health/healthmsg" "tailscale.com/hostinfo" @@ -60,10 +52,8 @@ import ( "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnext" "tailscale.com/ipn/ipnstate" - "tailscale.com/ipn/policy" "tailscale.com/log/sockstatlog" "tailscale.com/logpolicy" - "tailscale.com/net/captivedetection" "tailscale.com/net/dns" "tailscale.com/net/dnscache" "tailscale.com/net/dnsfallback" @@ -77,11 +67,8 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/net/tsdial" "tailscale.com/paths" - "tailscale.com/portlist" - "tailscale.com/posture" "tailscale.com/syncs" "tailscale.com/tailcfg" - "tailscale.com/tka" "tailscale.com/tsd" "tailscale.com/tstime" "tailscale.com/types/appctype" @@ -96,13 +83,12 @@ import ( "tailscale.com/types/preftype" "tailscale.com/types/ptr" "tailscale.com/types/views" + "tailscale.com/util/checkchange" "tailscale.com/util/clientmetric" - "tailscale.com/util/deephash" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus" "tailscale.com/util/goroutines" - "tailscale.com/util/httpm" "tailscale.com/util/mak" - "tailscale.com/util/multierr" "tailscale.com/util/osuser" "tailscale.com/util/rands" "tailscale.com/util/set" @@ -110,7 +96,6 @@ import ( "tailscale.com/util/syspolicy/pkey" "tailscale.com/util/syspolicy/policyclient" "tailscale.com/util/syspolicy/ptype" - "tailscale.com/util/systemd" "tailscale.com/util/testenv" "tailscale.com/util/usermetric" "tailscale.com/version" @@ -168,8 +153,6 @@ type watchSession struct { cancel context.CancelFunc // to shut down the session } -var metricCaptivePortalDetected = clientmetric.NewCounter("captiveportal_detected") - var ( // errShutdown indicates that the [LocalBackend.Shutdown] was called. errShutdown = errors.New("shutting down") @@ -197,12 +180,14 @@ var ( // state machine generates events back out to zero or more components. type LocalBackend struct { // Elements that are thread-safe or constant after construction. - ctx context.Context // canceled by [LocalBackend.Shutdown] - ctxCancel context.CancelCauseFunc // cancels ctx - logf logger.Logf // general logging - keyLogf logger.Logf // for printing list of peers on change - statsLogf logger.Logf // for printing peers stats on change - sys *tsd.System + ctx context.Context // canceled by [LocalBackend.Shutdown] + ctxCancel context.CancelCauseFunc // cancels ctx + logf logger.Logf // general logging + keyLogf logger.Logf // for printing list of peers on change + statsLogf logger.Logf // for printing peers stats on change + sys *tsd.System + eventSubs eventbus.Monitor + health *health.Tracker // always non-nil polc policyclient.Client // always non-nil metrics metrics @@ -210,16 +195,12 @@ type LocalBackend struct { store ipn.StateStore // non-nil; TODO(bradfitz): remove; use sys dialer *tsdial.Dialer // non-nil; TODO(bradfitz): remove; use sys pushDeviceToken syncs.AtomicValue[string] - backendLogID logid.PublicID - unregisterNetMon func() - unregisterHealthWatch func() + backendLogID logid.PublicID // or zero value if logging not in use unregisterSysPolicyWatch func() - portpoll *portlist.Poller // may be nil - portpollOnce sync.Once // guards starting readPoller - varRoot string // or empty if SetVarRoot never called - logFlushFunc func() // or nil if SetLogFlusher wasn't called - em *expiryManager // non-nil; TODO(nickkhyl): move to nodeBackend - sshAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeBackend + varRoot string // or empty if SetVarRoot never called + logFlushFunc func() // or nil if SetLogFlusher wasn't called + em *expiryManager // non-nil; TODO(nickkhyl): move to nodeBackend + sshAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeBackend // webClientAtomicBool controls whether the web client is running. This should // be true unless the disable-web-client node attribute has been set. webClientAtomicBool atomic.Bool // TODO(nickkhyl): move to nodeBackend @@ -281,13 +262,13 @@ type LocalBackend struct { // of [LocalBackend]'s own state that is not tied to the node context. currentNodeAtomic atomic.Pointer[nodeBackend] - conf *conffile.Config // latest parsed config, or nil if not in declarative mode - pm *profileManager // mu guards access - filterHash deephash.Sum // TODO(nickkhyl): move to nodeBackend - httpTestClient *http.Client // for controlclient. nil by default, used by tests. - ccGen clientGen // function for producing controlclient; lazily populated - sshServer SSHServer // or nil, initialized lazily. - appConnector *appc.AppConnector // or nil, initialized when configured. + conf *conffile.Config // latest parsed config, or nil if not in declarative mode + pm *profileManager // mu guards access + lastFilterInputs *filterInputs + httpTestClient *http.Client // for controlclient. nil by default, used by tests. + ccGen clientGen // function for producing controlclient; lazily populated + sshServer SSHServer // or nil, initialized lazily. + appConnector *appc.AppConnector // or nil, initialized when configured. // notifyCancel cancels notifications to the current SetNotifyCallback. notifyCancel context.CancelFunc cc controlclient.Client // TODO(nickkhyl): move to nodeBackend @@ -315,22 +296,11 @@ type LocalBackend struct { notifyWatchers map[string]*watchSession // by session ID lastStatusTime time.Time // status.AsOf value of the last processed status update componentLogUntil map[string]componentLogState - // c2nUpdateStatus is the status of c2n-triggered client update. - c2nUpdateStatus updateStatus - currentUser ipnauth.Actor + currentUser ipnauth.Actor - selfUpdateProgress []ipnstate.UpdateProgress - lastSelfUpdateState ipnstate.SelfUpdateStatus // capForcedNetfilter is the netfilter that control instructs Linux clients // to use, unless overridden locally. capForcedNetfilter string // TODO(nickkhyl): move to nodeBackend - // offlineAutoUpdateCancel stops offline auto-updates when called. It - // should be used via stopOfflineAutoUpdate and - // maybeStartOfflineAutoUpdate. It is nil when offline auto-updates are - // note running. - // - //lint:ignore U1000 only used in Linux and Windows builds in autoupdate.go - offlineAutoUpdateCancel func() // ServeConfig fields. (also guarded by mu) lastServeConfJSON mem.RO // last JSON that was parsed into serveConfig @@ -343,9 +313,8 @@ type LocalBackend struct { serveListeners map[netip.AddrPort]*localListener // listeners for local serve traffic serveProxyHandlers sync.Map // string (HTTPHandler.Proxy) => *reverseProxy - // statusLock must be held before calling statusChanged.Wait() or + // mu must be held before calling statusChanged.Wait() or // statusChanged.Broadcast(). - statusLock sync.Mutex statusChanged *sync.Cond // dialPlan is any dial plan that we've received from the control @@ -371,12 +340,6 @@ type LocalBackend struct { // notified about. lastNotifiedDriveShares *views.SliceView[*drive.Share, drive.ShareView] - // lastKnownHardwareAddrs is a list of the previous known hardware addrs. - // Previously known hwaddrs are kept to work around an issue on Windows - // where all addresses might disappear. - // http://go/corp/25168 - lastKnownHardwareAddrs syncs.AtomicValue[[]string] - // lastSuggestedExitNode stores the last suggested exit node suggestion to // avoid unnecessary churn between multiple equally-good options. lastSuggestedExitNode tailcfg.StableNodeID @@ -429,13 +392,31 @@ type LocalBackend struct { // // See tailscale/corp#29969. overrideExitNodePolicy bool + + // hardwareAttested is whether backend should use a hardware-backed key to + // bind the node identity to this device. + hardwareAttested atomic.Bool } -// HealthTracker returns the health tracker for the backend. -func (b *LocalBackend) HealthTracker() *health.Tracker { - return b.health +// SetHardwareAttested enables hardware attestation key signatures in map +// requests, if supported on this platform. SetHardwareAttested should be called +// before Start. +func (b *LocalBackend) SetHardwareAttested() { + b.hardwareAttested.Store(true) +} + +// HardwareAttested reports whether hardware-backed attestation keys should be +// used to bind the node's identity to this device. +func (b *LocalBackend) HardwareAttested() bool { + return b.hardwareAttested.Load() } +// HealthTracker returns the health tracker for the backend. +func (b *LocalBackend) HealthTracker() *health.Tracker { return b.health } + +// Logger returns the logger for the backend. +func (b *LocalBackend) Logger() logger.Logf { return b.logf } + // UserMetricsRegistry returns the usermetrics registry for the backend func (b *LocalBackend) UserMetricsRegistry() *usermetric.Registry { return b.sys.UserMetricsRegistry() @@ -446,9 +427,8 @@ func (b *LocalBackend) NetMon() *netmon.Monitor { return b.sys.NetMon.Get() } -type updateStatus struct { - started bool -} +// PolicyClient returns the policy client for the backend. +func (b *LocalBackend) PolicyClient() policyclient.Client { return b.polc } type metrics struct { // advertisedRoutes is a metric that reports the number of network routes that are advertised by the local node. @@ -468,6 +448,8 @@ type clientGen func(controlclient.Options) (controlclient.Client, error) // but is not actually running. // // If dialer is nil, a new one is made. +// +// The logID may be the zero value if logging is not in use. func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, loginFlags controlclient.LoginFlags) (_ *LocalBackend, err error) { e := sys.Engine.Get() store := sys.StateStore.Get() @@ -484,7 +466,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo if loginFlags&controlclient.LocalBackendStartKeyOSNeutral != 0 { goos = "" } - pm, err := newProfileManagerWithGOOS(store, logf, sys.HealthTracker(), goos) + pm, err := newProfileManagerWithGOOS(store, logf, sys.HealthTracker.Get(), goos) if err != nil { return nil, err } @@ -517,7 +499,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo statsLogf: logger.LogOnChange(logf, 5*time.Minute, clock.Now), sys: sys, polc: sys.PolicyClientOrDefault(), - health: sys.HealthTracker(), + health: sys.HealthTracker.Get(), metrics: m, e: e, dialer: dialer, @@ -525,17 +507,15 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo pm: pm, backendLogID: logID, state: ipn.NoState, - portpoll: new(portlist.Poller), - em: newExpiryManager(logf), + em: newExpiryManager(logf, sys.Bus.Get()), loginFlags: loginFlags, clock: clock, - selfUpdateProgress: make([]ipnstate.UpdateProgress, 0), - lastSelfUpdateState: ipnstate.UpdateFinished, captiveCtx: captiveCtx, captiveCancel: nil, // so that we start checkCaptivePortalLoop when Running needsCaptiveDetection: make(chan bool), } - nb := newNodeBackend(ctx, b.sys.Bus.Get()) + + nb := newNodeBackend(ctx, b.logf, b.sys.Bus.Get()) b.currentNodeAtomic.Store(nb) nb.ready() @@ -562,7 +542,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo }() netMon := sys.NetMon.Get() - b.sockstatLogger, err = sockstatlog.NewLogger(logpolicy.LogsDir(logf), logf, logID, netMon, sys.HealthTracker()) + b.sockstatLogger, err = sockstatlog.NewLogger(logpolicy.LogsDir(logf), logf, logID, netMon, sys.HealthTracker.Get(), sys.Bus.Get()) if err != nil { log.Printf("error setting up sockstat logger: %v", err) } @@ -577,37 +557,110 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo b.e.SetJailedFilter(noneFilter) b.setTCPPortsIntercepted(nil) - b.setVIPServicesTCPPortsIntercepted(nil) - b.statusChanged = sync.NewCond(&b.statusLock) + b.statusChanged = sync.NewCond(&b.mu) b.e.SetStatusCallback(b.setWgengineStatus) b.prevIfState = netMon.InterfaceState() - // Call our linkChange code once with the current state, and - // then also whenever it changes: + // Call our linkChange code once with the current state. + // Following changes are triggered via the eventbus. b.linkChange(&netmon.ChangeDelta{New: netMon.InterfaceState()}) - b.unregisterNetMon = netMon.RegisterChangeCallback(b.linkChange) - b.unregisterHealthWatch = b.health.RegisterWatcher(b.onHealthChange) - - if tunWrap, ok := b.sys.Tun.GetOK(); ok { - tunWrap.PeerAPIPort = b.GetPeerAPIPort - } else { - b.logf("[unexpected] failed to wire up PeerAPI port for engine %T", e) + if buildfeatures.HasPeerAPIServer { + if tunWrap, ok := b.sys.Tun.GetOK(); ok { + tunWrap.PeerAPIPort = b.GetPeerAPIPort + } else { + b.logf("[unexpected] failed to wire up PeerAPI port for engine %T", e) + } } - for _, component := range ipn.DebuggableComponents { - key := componentStateKey(component) - if ut, err := ipn.ReadStoreInt(pm.Store(), key); err == nil { - if until := time.Unix(ut, 0); until.After(b.clock.Now()) { - // conditional to avoid log spam at start when off - b.SetComponentDebugLogging(component, until) + if buildfeatures.HasDebug { + for _, component := range ipn.DebuggableComponents { + key := componentStateKey(component) + if ut, err := ipn.ReadStoreInt(pm.Store(), key); err == nil { + if until := time.Unix(ut, 0); until.After(b.clock.Now()) { + // conditional to avoid log spam at start when off + b.SetComponentDebugLogging(component, until) + } } } } + + // Start the event bus late, once all the assignments above are done. + // (See previous race in tailscale/tailscale#17252) + ec := b.Sys().Bus.Get().Client("ipnlocal.LocalBackend") + b.eventSubs = ec.Monitor(b.consumeEventbusTopics(ec)) + return b, nil } +// consumeEventbusTopics consumes events from all relevant +// [eventbus.Subscriber]'s and passes them to their related handler. Events are +// always handled in the order they are received, i.e. the next event is not +// read until the previous event's handler has returned. It returns when the +// [eventbus.Client] is closed. +func (b *LocalBackend) consumeEventbusTopics(ec *eventbus.Client) func(*eventbus.Client) { + clientVersionSub := eventbus.Subscribe[tailcfg.ClientVersion](ec) + autoUpdateSub := eventbus.Subscribe[controlclient.AutoUpdate](ec) + + var healthChange <-chan health.Change + if buildfeatures.HasHealth { + healthChangeSub := eventbus.Subscribe[health.Change](ec) + healthChange = healthChangeSub.Events() + } + changeDeltaSub := eventbus.Subscribe[netmon.ChangeDelta](ec) + routeUpdateSub := eventbus.Subscribe[appctype.RouteUpdate](ec) + storeRoutesSub := eventbus.Subscribe[appctype.RouteInfo](ec) + + var portlist <-chan PortlistServices + if buildfeatures.HasPortList { + portlistSub := eventbus.Subscribe[PortlistServices](ec) + portlist = portlistSub.Events() + } + + return func(ec *eventbus.Client) { + for { + select { + case <-ec.Done(): + return + case clientVersion := <-clientVersionSub.Events(): + b.onClientVersion(&clientVersion) + case au := <-autoUpdateSub.Events(): + b.onTailnetDefaultAutoUpdate(au.Value) + case change := <-healthChange: + b.onHealthChange(change) + case changeDelta := <-changeDeltaSub.Events(): + b.linkChange(&changeDelta) + + case pl := <-portlist: + if buildfeatures.HasPortList { // redundant, but explicit for linker deadcode and humans + b.setPortlistServices(pl) + } + case ru := <-routeUpdateSub.Events(): + // TODO(creachadair, 2025-10-02): It is currently possible for updates produced under + // one profile to arrive and be applied after a switch to another profile. + // We need to find a way to ensure that changes to the backend state are applied + // consistently in the presnce of profile changes, which currently may not happen in + // a single atomic step. See: https://github.com/tailscale/tailscale/issues/17414 + if err := b.AdvertiseRoute(ru.Advertise...); err != nil { + b.logf("appc: failed to advertise routes: %v: %v", ru.Advertise, err) + } + if err := b.UnadvertiseRoute(ru.Unadvertise...); err != nil { + b.logf("appc: failed to unadvertise routes: %v: %v", ru.Unadvertise, err) + } + case ri := <-storeRoutesSub.Events(): + // Whether or not routes should be stored can change over time. + shouldStoreRoutes := b.ControlKnobs().AppCStoreRoutes.Load() + if shouldStoreRoutes { + if err := b.storeRouteInfo(ri); err != nil { + b.logf("appc: failed to store route info: %v", err) + } + } + } + } + } +} + func (b *LocalBackend) Clock() tstime.Clock { return b.clock } func (b *LocalBackend) Sys() *tsd.System { return b.sys } @@ -620,7 +673,7 @@ func (b *LocalBackend) currentNode() *nodeBackend { if v := b.currentNodeAtomic.Load(); v != nil || !testenv.InTest() { return v } - v := newNodeBackend(cmp.Or(b.ctx, context.Background()), b.sys.Bus.Get()) + v := newNodeBackend(cmp.Or(b.ctx, context.Background()), b.logf, b.sys.Bus.Get()) if b.currentNodeAtomic.CompareAndSwap(nil, v) { v.ready() } @@ -660,6 +713,9 @@ func componentStateKey(component string) ipn.StateKey { // - magicsock // - sockstats func (b *LocalBackend) SetComponentDebugLogging(component string, until time.Time) error { + if !buildfeatures.HasDebug { + return feature.ErrUnavailable + } b.mu.Lock() defer b.mu.Unlock() @@ -723,6 +779,9 @@ func (b *LocalBackend) SetComponentDebugLogging(component string, until time.Tim // GetDNSOSConfig returns the base OS DNS configuration, as seen by the DNS manager. func (b *LocalBackend) GetDNSOSConfig() (dns.OSConfig, error) { + if !buildfeatures.HasDNS { + panic("unreachable") + } manager, ok := b.sys.DNSManager.GetOK() if !ok { return dns.OSConfig{}, errors.New("DNS manager not available") @@ -734,6 +793,9 @@ func (b *LocalBackend) GetDNSOSConfig() (dns.OSConfig, error) { // the raw DNS response and the resolvers that are were able to handle the query (the internal forwarder // may race multiple resolvers). func (b *LocalBackend) QueryDNS(name string, queryType dnsmessage.Type) (res []byte, resolvers []*dnstype.Resolver, err error) { + if !buildfeatures.HasDNS { + return nil, nil, feature.ErrUnavailable + } manager, ok := b.sys.DNSManager.GetOK() if !ok { return nil, nil, errors.New("DNS manager not available") @@ -778,6 +840,9 @@ func (b *LocalBackend) QueryDNS(name string, queryType dnsmessage.Type) (res []b // enabled until, or the zero time if component's time is not currently // enabled. func (b *LocalBackend) GetComponentDebugLogging(component string) time.Time { + if !buildfeatures.HasDebug { + return time.Time{} + } b.mu.Lock() defer b.mu.Unlock() @@ -911,10 +976,6 @@ func (b *LocalBackend) DisconnectControl() { cc.Shutdown() } -// captivePortalDetectionInterval is the duration to wait in an unhealthy state with connectivity broken -// before running captive portal detection. -const captivePortalDetectionInterval = 2 * time.Second - // linkChange is our network monitor callback, called whenever the network changes. func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { b.mu.Lock() @@ -957,20 +1018,31 @@ func (b *LocalBackend) linkChange(delta *netmon.ChangeDelta) { b.updateFilterLocked(prefs) updateExitNodeUsageWarning(prefs, delta.New, b.health) - cn := b.currentNode() - nm := cn.NetMap() - if peerAPIListenAsync && nm != nil && b.state == ipn.Running { - want := nm.GetAddresses().Len() - have := len(b.peerAPIListeners) - b.logf("[v1] linkChange: have %d peerAPIListeners, want %d", have, want) - if have < want { - b.logf("linkChange: peerAPIListeners too low; trying again") - b.goTracker.Go(b.initPeerAPIListener) + if buildfeatures.HasPeerAPIServer { + cn := b.currentNode() + nm := cn.NetMap() + if peerAPIListenAsync && nm != nil && b.state == ipn.Running { + want := nm.GetAddresses().Len() + have := len(b.peerAPIListeners) + b.logf("[v1] linkChange: have %d peerAPIListeners, want %d", have, want) + if have < want { + b.logf("linkChange: peerAPIListeners too low; trying again") + b.goTracker.Go(b.initPeerAPIListener) + } } } } +// Captive portal detection hooks. +var ( + hookCaptivePortalHealthChange feature.Hook[func(*LocalBackend, *health.State)] + hookCheckCaptivePortalLoop feature.Hook[func(*LocalBackend, context.Context)] +) + func (b *LocalBackend) onHealthChange(change health.Change) { + if !buildfeatures.HasHealth { + return + } if change.WarnableChanged { w := change.Warnable us := change.UnhealthyState @@ -987,51 +1059,17 @@ func (b *LocalBackend) onHealthChange(change health.Change) { Health: state, }) - isConnectivityImpacted := false - for _, w := range state.Warnings { - // Ignore the captive portal warnable itself. - if w.ImpactsConnectivity && w.WarnableCode != captivePortalWarnable.Code { - isConnectivityImpacted = true - break - } - } - - // captiveCtx can be changed, and is protected with 'mu'; grab that - // before we start our select, below. - // - // It is guaranteed to be non-nil. - b.mu.Lock() - ctx := b.captiveCtx - b.mu.Unlock() - - // If the context is canceled, we don't need to do anything. - if ctx.Err() != nil { - return - } - - if isConnectivityImpacted { - b.logf("health: connectivity impacted; triggering captive portal detection") - - // Ensure that we select on captiveCtx so that we can time out - // triggering captive portal detection if the backend is shutdown. - select { - case b.needsCaptiveDetection <- true: - case <-ctx.Done(): - } - } else { - // If connectivity is not impacted, we know for sure we're not behind a captive portal, - // so drop any warning, and signal that we don't need captive portal detection. - b.health.SetHealthy(captivePortalWarnable) - select { - case b.needsCaptiveDetection <- false: - case <-ctx.Done(): - } + if f, ok := hookCaptivePortalHealthChange.GetOk(); ok { + f(b, state) } } // GetOrSetCaptureSink returns the current packet capture sink, creating it // with the provided newSink function if it does not already exist. func (b *LocalBackend) GetOrSetCaptureSink(newSink func() packet.CaptureSink) packet.CaptureSink { + if !buildfeatures.HasCapture { + return nil + } b.mu.Lock() defer b.mu.Unlock() @@ -1045,6 +1083,9 @@ func (b *LocalBackend) GetOrSetCaptureSink(newSink func() packet.CaptureSink) pa } func (b *LocalBackend) ClearCaptureSink() { + if !buildfeatures.HasCapture { + return + } // Shut down & uninstall the sink if there are no longer // any outputs on it. b.mu.Lock() @@ -1066,6 +1107,16 @@ func (b *LocalBackend) ClearCaptureSink() { // Shutdown halts the backend and all its sub-components. The backend // can no longer be used after Shutdown returns. func (b *LocalBackend) Shutdown() { + // Close the [eventbus.Client] and wait for LocalBackend.consumeEventbusTopics + // to return. Do this before acquiring b.mu: + // 1. LocalBackend.consumeEventbusTopics event handlers also acquire b.mu, + // they can deadlock with c.Shutdown(). + // 2. LocalBackend.consumeEventbusTopics event handlers may not guard against + // undesirable post/in-progress LocalBackend.Shutdown() behaviors. + b.eventSubs.Close() + + b.em.close() + b.mu.Lock() if b.shutdownCalled { b.mu.Unlock() @@ -1073,7 +1124,7 @@ func (b *LocalBackend) Shutdown() { } b.shutdownCalled = true - if b.captiveCancel != nil { + if buildfeatures.HasCaptivePortal && b.captiveCancel != nil { b.logf("canceling captive portal context") b.captiveCancel() } @@ -1108,8 +1159,7 @@ func (b *LocalBackend) Shutdown() { if b.notifyCancel != nil { b.notifyCancel() } - extHost := b.extHost - b.extHost = nil + b.appConnector.Close() b.mu.Unlock() b.webClientShutdown() @@ -1118,24 +1168,21 @@ func (b *LocalBackend) Shutdown() { defer cancel() b.sockstatLogger.Shutdown(ctx) } - b.stopOfflineAutoUpdate() - b.unregisterNetMon() - b.unregisterHealthWatch() b.unregisterSysPolicyWatch() if cc != nil { cc.Shutdown() } b.ctxCancel(errShutdown) b.currentNode().shutdown(errShutdown) - extHost.Shutdown() + b.extHost.Shutdown() b.e.Close() <-b.e.Done() b.awaitNoGoroutinesInTest() } func (b *LocalBackend) awaitNoGoroutinesInTest() { - if !testenv.InTest() { + if !buildfeatures.HasDebug || !testenv.InTest() { return } ctx, cancel := context.WithTimeout(context.Background(), 8*time.Second) @@ -1169,6 +1216,7 @@ func stripKeysFromPrefs(p ipn.PrefsView) ipn.PrefsView { p2.Persist.PrivateNodeKey = key.NodePrivate{} p2.Persist.OldPrivateNodeKey = key.NodePrivate{} p2.Persist.NetworkLockKey = key.NLPrivate{} + p2.Persist.AttestationKey = nil return p2.View() } @@ -1183,6 +1231,13 @@ func (b *LocalBackend) sanitizedPrefsLocked() ipn.PrefsView { return stripKeysFromPrefs(b.pm.CurrentPrefs()) } +// unsanitizedPersist returns the current PersistView, including any private keys. +func (b *LocalBackend) unsanitizedPersist() persist.PersistView { + b.mu.Lock() + defer b.mu.Unlock() + return b.pm.CurrentPrefs().Persist() +} + // Status returns the latest status of the backend and its // sub-components. func (b *LocalBackend) Status() *ipnstate.Status { @@ -1365,7 +1420,7 @@ func peerStatusFromNode(ps *ipnstate.PeerStatus, n tailcfg.NodeView) { ps.PublicKey = n.Key() ps.ID = n.StableID() ps.Created = n.Created() - ps.ExitNodeOption = tsaddr.ContainsExitRoutes(n.AllowedIPs()) + ps.ExitNodeOption = buildfeatures.HasUseExitNode && tsaddr.ContainsExitRoutes(n.AllowedIPs()) if n.Tags().Len() != 0 { v := n.Tags() ps.Tags = &v @@ -1440,7 +1495,7 @@ func (b *LocalBackend) WhoIs(proto string, ipp netip.AddrPort) (n tailcfg.NodeVi cn := b.currentNode() nid, ok := cn.NodeByAddr(ipp.Addr()) - if !ok { + if !ok && buildfeatures.HasNetstack { var ip netip.Addr if ipp.Port() != 0 { var protos []string @@ -1488,9 +1543,7 @@ func (b *LocalBackend) PeerCaps(src netip.Addr) tailcfg.PeerCapMap { } func (b *LocalBackend) GetFilterForTest() *filter.Filter { - if !testenv.InTest() { - panic("GetFilterForTest called outside of test") - } + testenv.AssertInTest() nb := b.currentNode() return nb.filterAtomic.Load() } @@ -1565,6 +1618,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control } wasBlocked := b.blocked + authWasInProgress := b.authURL != "" keyExpiryExtended := false if st.NetMap != nil { wasExpired := b.keyExpired @@ -1582,7 +1636,7 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.blockEngineUpdates(false) } - if st.LoginFinished() && (wasBlocked || b.seamlessRenewalEnabled()) { + if st.LoginFinished() && (wasBlocked || authWasInProgress) { if wasBlocked { // Auth completed, unblock the engine b.blockEngineUpdates(false) @@ -1831,6 +1885,9 @@ var preferencePolicies = []preferencePolicyInfo{ // // b.mu must be held. func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { + if !buildfeatures.HasSystemPolicy { + return false + } if controlURL, err := b.polc.GetString(pkey.ControlURL, prefs.ControlURL); err == nil && prefs.ControlURL != controlURL { prefs.ControlURL = controlURL anyChange = true @@ -1896,6 +1953,9 @@ func (b *LocalBackend) applySysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { // // b.mu must be held. func (b *LocalBackend) applyExitNodeSysPolicyLocked(prefs *ipn.Prefs) (anyChange bool) { + if !buildfeatures.HasUseExitNode { + return false + } if exitNodeIDStr, _ := b.polc.GetString(pkey.ExitNodeID, ""); exitNodeIDStr != "" { exitNodeID := tailcfg.StableNodeID(exitNodeIDStr) @@ -2001,7 +2061,7 @@ func (b *LocalBackend) sysPolicyChanged(policy policyclient.PolicyChange) { b.mu.Unlock() } - if policy.HasChanged(pkey.AllowedSuggestedExitNodes) { + if buildfeatures.HasUseExitNode && policy.HasChanged(pkey.AllowedSuggestedExitNodes) { b.refreshAllowedSuggestions() // Re-evaluate exit node suggestion now that the policy setting has changed. if _, err := b.SuggestExitNode(); err != nil && !errors.Is(err, ErrNoPreferredDERP) { @@ -2072,6 +2132,9 @@ func (b *LocalBackend) UpdateNetmapDelta(muts []netmap.NodeMutation) (handled bo // mustationsAreWorthyOfRecalculatingSuggestedExitNode reports whether any mutation type in muts is // worthy of recalculating the suggested exit node. func mutationsAreWorthyOfRecalculatingSuggestedExitNode(muts []netmap.NodeMutation, cn *nodeBackend, sid tailcfg.StableNodeID) bool { + if !buildfeatures.HasUseExitNode { + return false + } for _, m := range muts { n, ok := cn.NodeByID(m.NodeIDBeingMutated()) if !ok { @@ -2125,6 +2188,9 @@ func mutationsAreWorthyOfTellingIPNBus(muts []netmap.NodeMutation) bool { // // b.mu must be held. func (b *LocalBackend) resolveAutoExitNodeLocked(prefs *ipn.Prefs) (prefsChanged bool) { + if !buildfeatures.HasUseExitNode { + return false + } // As of 2025-07-08, the only supported auto exit node expression is [ipn.AnyExitNode]. // // However, to maintain forward compatibility with future auto exit node expressions, @@ -2169,6 +2235,9 @@ func (b *LocalBackend) resolveAutoExitNodeLocked(prefs *ipn.Prefs) (prefsChanged // // b.mu must be held. func (b *LocalBackend) resolveExitNodeIPLocked(prefs *ipn.Prefs) (prefsChanged bool) { + if !buildfeatures.HasUseExitNode { + return false + } // If we have a desired IP on file, try to find the corresponding node. if !prefs.ExitNodeIP.IsValid() { return false @@ -2237,14 +2306,15 @@ func (b *LocalBackend) setWgengineStatus(s *wgengine.Status, err error) { b.send(ipn.Notify{Engine: &es}) } +// broadcastStatusChanged must not be called with b.mu held. func (b *LocalBackend) broadcastStatusChanged() { // The sync.Cond docs say: "It is allowed but not required for the caller to hold c.L during the call." - // In this particular case, we must acquire b.statusLock. Otherwise we might broadcast before + // In this particular case, we must acquire b.mu. Otherwise we might broadcast before // the waiter (in requestEngineStatusAndWait) starts to wait, in which case // the waiter can get stuck indefinitely. See PR 2865. - b.statusLock.Lock() + b.mu.Lock() b.statusChanged.Broadcast() - b.statusLock.Unlock() + b.mu.Unlock() } // SetNotifyCallback sets the function to call when the backend has something to @@ -2289,17 +2359,10 @@ func (b *LocalBackend) SetControlClientGetterForTesting(newControlClient func(co b.ccGen = newControlClient } -// DisablePortMapperForTest disables the portmapper for tests. -// It must be called before Start. -func (b *LocalBackend) DisablePortMapperForTest() { - b.mu.Lock() - defer b.mu.Unlock() - b.portpoll = nil -} - // PeersForTest returns all the current peers, sorted by Node.ID, // for integration tests in another repo. func (b *LocalBackend) PeersForTest() []tailcfg.NodeView { + testenv.AssertInTest() return b.currentNode().PeersForTest() } @@ -2410,10 +2473,23 @@ func (b *LocalBackend) Start(opts ipn.Options) error { if b.reconcilePrefsLocked(newPrefs) { prefsChanged = true } + + // neither UpdatePrefs or reconciliation should change Persist + newPrefs.Persist = b.pm.CurrentPrefs().Persist().AsStruct() + + if buildfeatures.HasTPM { + if genKey, ok := feature.HookGenerateAttestationKeyIfEmpty.GetOk(); ok { + newKey, err := genKey(newPrefs.Persist, b.logf) + if err != nil { + b.logf("failed to populate attestation key from TPM: %v", err) + } + if newKey { + prefsChanged = true + } + } + } + if prefsChanged { - // Neither opts.UpdatePrefs nor prefs reconciliation - // is allowed to modify Persist; retain the old value. - newPrefs.Persist = b.pm.CurrentPrefs().Persist().AsStruct() if err := b.pm.SetPrefs(newPrefs.View(), cn.NetworkProfile()); err != nil { b.logf("failed to save updated and reconciled prefs: %v", err) } @@ -2444,16 +2520,8 @@ func (b *LocalBackend) Start(opts ipn.Options) error { persistv = new(persist.Persist) } - if b.portpoll != nil { - b.portpollOnce.Do(func() { - b.goTracker.Go(b.readPoller) - }) - } - discoPublic := b.MagicConn().DiscoPublicKey() - var err error - isNetstack := b.sys.IsNetstackRouter() debugFlags := controlDebugFlags if isNetstack { @@ -2466,33 +2534,37 @@ func (b *LocalBackend) Start(opts ipn.Options) error { cb() } } + + var c2nHandler http.Handler + if buildfeatures.HasC2N { + c2nHandler = http.HandlerFunc(b.handleC2N) + } + // TODO(apenwarr): The only way to change the ServerURL is to // re-run b.Start, because this is the only place we create a // new controlclient. EditPrefs allows you to overwrite ServerURL, // but it won't take effect until the next Start. cc, err := b.getNewControlClientFuncLocked()(controlclient.Options{ - GetMachinePrivateKey: b.createGetMachinePrivateKeyFunc(), - Logf: logger.WithPrefix(b.logf, "control: "), - Persist: *persistv, - ServerURL: serverURL, - AuthKey: opts.AuthKey, - Hostinfo: hostinfo, - HTTPTestClient: httpTestClient, - DiscoPublicKey: discoPublic, - DebugFlags: debugFlags, - HealthTracker: b.health, - PolicyClient: b.sys.PolicyClientOrDefault(), - Pinger: b, - PopBrowserURL: b.tellClientToBrowseToURL, - OnClientVersion: b.onClientVersion, - OnTailnetDefaultAutoUpdate: b.onTailnetDefaultAutoUpdate, - OnControlTime: b.em.onControlTime, - Dialer: b.Dialer(), - Observer: b, - C2NHandler: http.HandlerFunc(b.handleC2N), - DialPlan: &b.dialPlan, // pointer because it can't be copied - ControlKnobs: b.sys.ControlKnobs(), - Shutdown: ccShutdown, + GetMachinePrivateKey: b.createGetMachinePrivateKeyFunc(), + Logf: logger.WithPrefix(b.logf, "control: "), + Persist: *persistv, + ServerURL: serverURL, + AuthKey: opts.AuthKey, + Hostinfo: hostinfo, + HTTPTestClient: httpTestClient, + DiscoPublicKey: discoPublic, + DebugFlags: debugFlags, + HealthTracker: b.health, + PolicyClient: b.sys.PolicyClientOrDefault(), + Pinger: b, + PopBrowserURL: b.tellClientToBrowseToURL, + Dialer: b.Dialer(), + Observer: b, + C2NHandler: c2nHandler, + DialPlan: &b.dialPlan, // pointer because it can't be copied + ControlKnobs: b.sys.ControlKnobs(), + Shutdown: ccShutdown, + Bus: b.sys.Bus.Get(), // Don't warn about broken Linux IP forwarding when // netstack is being used. @@ -2584,6 +2656,36 @@ var invalidPacketFilterWarnable = health.Register(&health.Warnable{ Text: health.StaticMessage("The coordination server sent an invalid packet filter permitting traffic to unlocked nodes; rejecting all packets for safety"), }) +// filterInputs holds the inputs to the packet filter. +// +// Any field changes or additions here should be accompanied by a change to +// [filterInputs.Equal] and [filterInputs.Clone] if necessary. (e.g. non-view +// and non-value fields) +type filterInputs struct { + HaveNetmap bool + Addrs views.Slice[netip.Prefix] + FilterMatch views.Slice[filter.Match] + LocalNets views.Slice[netipx.IPRange] + LogNets views.Slice[netipx.IPRange] + ShieldsUp bool + SSHPolicy tailcfg.SSHPolicyView +} + +func (fi *filterInputs) Equal(o *filterInputs) bool { + if fi == nil || o == nil { + return fi == o + } + return reflect.DeepEqual(fi, o) +} + +func (fi *filterInputs) Clone() *filterInputs { + if fi == nil { + return nil + } + v := *fi // all fields are shallow copyable + return &v +} + // updateFilterLocked updates the packet filter in wgengine based on the // given netMap and user preferences. // @@ -2641,31 +2743,33 @@ func (b *LocalBackend) updateFilterLocked(prefs ipn.PrefsView) { } } if prefs.Valid() { - for _, r := range prefs.AdvertiseRoutes().All() { - if r.Bits() == 0 { - // When offering a default route to the world, we - // filter out locally reachable LANs, so that the - // default route effectively appears to be a "guest - // wifi": you get internet access, but to additionally - // get LAN access the LAN(s) need to be offered - // explicitly as well. - localInterfaceRoutes, hostIPs, err := interfaceRoutes() - if err != nil { - b.logf("getting local interface routes: %v", err) - continue - } - s, err := shrinkDefaultRoute(r, localInterfaceRoutes, hostIPs) - if err != nil { - b.logf("computing default route filter: %v", err) - continue + if buildfeatures.HasAdvertiseRoutes { + for _, r := range prefs.AdvertiseRoutes().All() { + if r.Bits() == 0 { + // When offering a default route to the world, we + // filter out locally reachable LANs, so that the + // default route effectively appears to be a "guest + // wifi": you get internet access, but to additionally + // get LAN access the LAN(s) need to be offered + // explicitly as well. + localInterfaceRoutes, hostIPs, err := interfaceRoutes() + if err != nil { + b.logf("getting local interface routes: %v", err) + continue + } + s, err := shrinkDefaultRoute(r, localInterfaceRoutes, hostIPs) + if err != nil { + b.logf("computing default route filter: %v", err) + continue + } + localNetsB.AddSet(s) + } else { + localNetsB.AddPrefix(r) + // When advertising a non-default route, we assume + // this is a corporate subnet that should be present + // in the audit logs. + logNetsB.AddPrefix(r) } - localNetsB.AddSet(s) - } else { - localNetsB.AddPrefix(r) - // When advertising a non-default route, we assume - // this is a corporate subnet that should be present - // in the audit logs. - logNetsB.AddPrefix(r) } } @@ -2676,27 +2780,27 @@ func (b *LocalBackend) updateFilterLocked(prefs ipn.PrefsView) { // The correct filter rules are synthesized by the coordination server // and sent down, but the address needs to be part of the 'local net' for the // filter package to even bother checking the filter rules, so we set them here. - if prefs.AppConnector().Advertise { + if buildfeatures.HasAppConnectors && prefs.AppConnector().Advertise { localNetsB.Add(netip.MustParseAddr("0.0.0.0")) localNetsB.Add(netip.MustParseAddr("::0")) } } localNets, _ := localNetsB.IPSet() logNets, _ := logNetsB.IPSet() - var sshPol tailcfg.SSHPolicy - if haveNetmap && netMap.SSHPolicy != nil { - sshPol = *netMap.SSHPolicy - } - - changed := deephash.Update(&b.filterHash, &struct { - HaveNetmap bool - Addrs views.Slice[netip.Prefix] - FilterMatch []filter.Match - LocalNets []netipx.IPRange - LogNets []netipx.IPRange - ShieldsUp bool - SSHPolicy tailcfg.SSHPolicy - }{haveNetmap, addrs, packetFilter, localNets.Ranges(), logNets.Ranges(), shieldsUp, sshPol}) + var sshPol tailcfg.SSHPolicyView + if buildfeatures.HasSSH && haveNetmap && netMap.SSHPolicy != nil { + sshPol = netMap.SSHPolicy.View() + } + + changed := checkchange.Update(&b.lastFilterInputs, &filterInputs{ + HaveNetmap: haveNetmap, + Addrs: addrs, + FilterMatch: views.SliceOf(packetFilter), + LocalNets: views.SliceOf(localNets.Ranges()), + LogNets: views.SliceOf(logNets.Ranges()), + ShieldsUp: shieldsUp, + SSHPolicy: sshPol, + }) if !changed { return } @@ -2726,123 +2830,6 @@ func (b *LocalBackend) updateFilterLocked(prefs ipn.PrefsView) { } } -// captivePortalWarnable is a Warnable which is set to an unhealthy state when a captive portal is detected. -var captivePortalWarnable = health.Register(&health.Warnable{ - Code: "captive-portal-detected", - Title: "Captive portal detected", - // High severity, because captive portals block all traffic and require user intervention. - Severity: health.SeverityHigh, - Text: health.StaticMessage("This network requires you to log in using your web browser."), - ImpactsConnectivity: true, -}) - -func (b *LocalBackend) checkCaptivePortalLoop(ctx context.Context) { - var tmr *time.Timer - - maybeStartTimer := func() { - // If there's an existing timer, nothing to do; just continue - // waiting for it to expire. Otherwise, create a new timer. - if tmr == nil { - tmr = time.NewTimer(captivePortalDetectionInterval) - } - } - maybeStopTimer := func() { - if tmr == nil { - return - } - if !tmr.Stop() { - <-tmr.C - } - tmr = nil - } - - for { - if ctx.Err() != nil { - maybeStopTimer() - return - } - - // First, see if we have a signal on our "healthy" channel, which - // takes priority over an existing timer. Because a select is - // nondeterministic, we explicitly check this channel before - // entering the main select below, so that we're guaranteed to - // stop the timer before starting captive portal detection. - select { - case needsCaptiveDetection := <-b.needsCaptiveDetection: - if needsCaptiveDetection { - maybeStartTimer() - } else { - maybeStopTimer() - } - default: - } - - var timerChan <-chan time.Time - if tmr != nil { - timerChan = tmr.C - } - select { - case <-ctx.Done(): - // All done; stop the timer and then exit. - maybeStopTimer() - return - case <-timerChan: - // Kick off captive portal check - b.performCaptiveDetection() - // nil out timer to force recreation - tmr = nil - case needsCaptiveDetection := <-b.needsCaptiveDetection: - if needsCaptiveDetection { - maybeStartTimer() - } else { - // Healthy; cancel any existing timer - maybeStopTimer() - } - } - } -} - -// performCaptiveDetection checks if captive portal detection is enabled via controlknob. If so, it runs -// the detection and updates the Warnable accordingly. -func (b *LocalBackend) performCaptiveDetection() { - if !b.shouldRunCaptivePortalDetection() { - return - } - - d := captivedetection.NewDetector(b.logf) - b.mu.Lock() // for b.hostinfo - cn := b.currentNode() - dm := cn.DERPMap() - preferredDERP := 0 - if b.hostinfo != nil { - if b.hostinfo.NetInfo != nil { - preferredDERP = b.hostinfo.NetInfo.PreferredDERP - } - } - ctx := b.ctx - netMon := b.NetMon() - b.mu.Unlock() - found := d.Detect(ctx, netMon, dm, preferredDERP) - if found { - if !b.health.IsUnhealthy(captivePortalWarnable) { - metricCaptivePortalDetected.Add(1) - } - b.health.SetUnhealthy(captivePortalWarnable, health.Args{}) - } else { - b.health.SetHealthy(captivePortalWarnable) - } -} - -// shouldRunCaptivePortalDetection reports whether captive portal detection -// should be run. It is enabled by default, but can be disabled via a control -// knob. It is also only run when the user explicitly wants the backend to be -// running. -func (b *LocalBackend) shouldRunCaptivePortalDetection() bool { - b.mu.Lock() - defer b.mu.Unlock() - return !b.ControlKnobs().DisableCaptivePortalDetection.Load() && b.pm.prefs.WantRunning() -} - // packetFilterPermitsUnlockedNodes reports any peer in peers with the // UnsignedPeerAPIOnly bool set true has any of its allowed IPs in the packet // filter. @@ -3016,57 +3003,6 @@ func shrinkDefaultRoute(route netip.Prefix, localInterfaceRoutes *netipx.IPSet, return b.IPSet() } -// readPoller is a goroutine that receives service lists from -// b.portpoll and propagates them into the controlclient's HostInfo. -func (b *LocalBackend) readPoller() { - if !envknob.BoolDefaultTrue("TS_PORTLIST") { - return - } - - ticker, tickerChannel := b.clock.NewTicker(portlist.PollInterval()) - defer ticker.Stop() - for { - select { - case <-tickerChannel: - case <-b.ctx.Done(): - return - } - - if !b.shouldUploadServices() { - continue - } - - ports, changed, err := b.portpoll.Poll() - if err != nil { - b.logf("error polling for open ports: %v", err) - return - } - if !changed { - continue - } - sl := []tailcfg.Service{} - for _, p := range ports { - s := tailcfg.Service{ - Proto: tailcfg.ServiceProto(p.Proto), - Port: p.Port, - Description: p.Process, - } - if policy.IsInterestingService(s, version.OS()) { - sl = append(sl, s) - } - } - - b.mu.Lock() - if b.hostinfo == nil { - b.hostinfo = new(tailcfg.Hostinfo) - } - b.hostinfo.Services = sl - b.mu.Unlock() - - b.doSetHostinfoFilterServices() - } -} - // GetPushDeviceToken returns the push notification device token. func (b *LocalBackend) GetPushDeviceToken() string { return b.pushDeviceToken.Load() @@ -3223,21 +3159,34 @@ func (b *LocalBackend) WatchNotificationsAs(ctx context.Context, actor ipnauth.A // listener. func filterPrivateKeys(fn func(roNotify *ipn.Notify) (keepGoing bool)) func(*ipn.Notify) bool { return func(n *ipn.Notify) bool { - if n.NetMap == nil || n.NetMap.PrivateKey.IsZero() { + redacted, changed := redactNetmapPrivateKeys(n.NetMap) + if !changed { return fn(n) } // The netmap in n is shared across all watchers, so to mutate it for a // single watcher we have to clone the notify and the netmap. We can // make shallow clones, at least. - nm2 := *n.NetMap n2 := *n - n2.NetMap = &nm2 - n2.NetMap.PrivateKey = key.NodePrivate{} + n2.NetMap = redacted return fn(&n2) } } +// redactNetmapPrivateKeys returns a copy of nm with private keys zeroed out. +// If no change was needed, it returns nm unmodified. +func redactNetmapPrivateKeys(nm *netmap.NetworkMap) (redacted *netmap.NetworkMap, changed bool) { + if nm == nil || nm.PrivateKey.IsZero() { + return nm, false + } + + // The netmap might be shared across watchers, so make at least a shallow + // clone before mutating it. + nm2 := *nm + nm2.PrivateKey = key.NodePrivate{} + return &nm2, true +} + // appendHealthActions returns an IPN listener func that wraps the supplied IPN // listener func and transforms health messages passed to the wrapped listener. // If health messages with PrimaryActions are present, it appends the label & @@ -3482,11 +3431,12 @@ func (b *LocalBackend) popBrowserAuthNow(url string, keyExpired bool, recipient if !b.seamlessRenewalEnabled() || keyExpired { b.blockEngineUpdates(true) b.stopEngineAndWait() + + if b.State() == ipn.Running { + b.enterState(ipn.Starting) + } } b.tellRecipientToBrowseToURL(url, toNotificationTarget(recipient)) - if b.State() == ipn.Running { - b.enterState(ipn.Starting) - } } // validPopBrowserURL reports whether urlStr is a valid value for a @@ -3574,7 +3524,7 @@ func (b *LocalBackend) onTailnetDefaultAutoUpdate(au bool) { // can still manually enable auto-updates on this node. return } - if clientupdate.CanAutoUpdate() { + if buildfeatures.HasClientUpdate && feature.CanAutoUpdate() { b.logf("using tailnet default auto-update setting: %v", au) prefsClone := prefs.AsStruct() prefsClone.AutoUpdate.Apply = opt.NewBool(au) @@ -3723,46 +3673,6 @@ func generateInterceptVIPServicesTCPPortFunc(svcAddrPorts map[netip.Addr]func(ui } } -// setVIPServicesTCPPortsIntercepted populates b.shouldInterceptVIPServicesTCPPortAtomic with an -// efficient func for ShouldInterceptTCPPort to use, which is called on every incoming packet. -func (b *LocalBackend) setVIPServicesTCPPortsIntercepted(svcPorts map[tailcfg.ServiceName][]uint16) { - b.mu.Lock() - defer b.mu.Unlock() - b.setVIPServicesTCPPortsInterceptedLocked(svcPorts) -} - -func (b *LocalBackend) setVIPServicesTCPPortsInterceptedLocked(svcPorts map[tailcfg.ServiceName][]uint16) { - if len(svcPorts) == 0 { - b.shouldInterceptVIPServicesTCPPortAtomic.Store(func(netip.AddrPort) bool { return false }) - return - } - nm := b.currentNode().NetMap() - if nm == nil { - b.logf("can't set intercept function for Service TCP Ports, netMap is nil") - return - } - vipServiceIPMap := nm.GetVIPServiceIPMap() - if len(vipServiceIPMap) == 0 { - // No approved VIP Services - return - } - - svcAddrPorts := make(map[netip.Addr]func(uint16) bool) - // Only set the intercept function if the service has been assigned a VIP. - for svcName, ports := range svcPorts { - addrs, ok := vipServiceIPMap[svcName] - if !ok { - continue - } - interceptFn := generateInterceptTCPPortFunc(ports) - for _, addr := range addrs { - svcAddrPorts[addr] = interceptFn - } - } - - b.shouldInterceptVIPServicesTCPPortAtomic.Store(generateInterceptVIPServicesTCPPortFunc(svcAddrPorts)) -} - // setAtomicValuesFromPrefsLocked populates sshAtomicBool, containsViaIPFuncAtomic, // shouldInterceptTCPPortAtomic, and exposeRemoteWebClientAtomicBool from the prefs p, // which may be !Valid(). @@ -3773,7 +3683,9 @@ func (b *LocalBackend) setAtomicValuesFromPrefsLocked(p ipn.PrefsView) { if !p.Valid() { b.containsViaIPFuncAtomic.Store(ipset.FalseContainsIPFunc()) b.setTCPPortsIntercepted(nil) - b.setVIPServicesTCPPortsInterceptedLocked(nil) + if f, ok := hookServeClearVIPServicesTCPPortsInterceptedLocked.GetOk(); ok { + f(b) + } b.lastServeConfJSON = mem.B(nil) b.serveConfig = ipn.ServeConfigView{} } else { @@ -3923,6 +3835,9 @@ func (b *LocalBackend) Ping(ctx context.Context, ip netip.Addr, pingType tailcfg } func (b *LocalBackend) pingPeerAPI(ctx context.Context, ip netip.Addr) (peer tailcfg.NodeView, peerBase string, err error) { + if !buildfeatures.HasPeerAPIClient { + return peer, peerBase, feature.ErrUnavailable + } var zero tailcfg.NodeView ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() @@ -3988,23 +3903,6 @@ func (b *LocalBackend) parseWgStatusLocked(s *wgengine.Status) (ret ipn.EngineSt return ret } -// shouldUploadServices reports whether this node should include services -// in Hostinfo. When the user preferences currently request "shields up" -// mode, all inbound connections are refused, so services are not reported. -// Otherwise, shouldUploadServices respects NetMap.CollectServices. -// TODO(nickkhyl): move this into [nodeBackend]? -func (b *LocalBackend) shouldUploadServices() bool { - b.mu.Lock() - defer b.mu.Unlock() - - p := b.pm.CurrentPrefs() - nm := b.currentNode().NetMap() - if !p.Valid() || nm == nil { - return false // default to safest setting - } - return !p.ShieldsUp() && nm.CollectServices -} - // SetCurrentUser is used to implement support for multi-user systems (only // Windows 2022-11-25). On such systems, the actor is used to determine which // user's state should be used. The current user is maintained by active @@ -4163,6 +4061,7 @@ func (b *LocalBackend) resolveBestProfileLocked() (_ ipn.LoginProfileView, isBac // It is used for testing only, and will be removed along with the rest of the // "current user" functionality as we progress on the multi-user improvements (tailscale/corp#18342). func (b *LocalBackend) CurrentUserForTest() (ipn.WindowsUserID, ipnauth.Actor) { + testenv.AssertInTest() b.mu.Lock() defer b.mu.Unlock() return b.pm.CurrentUserID(), b.currentUser @@ -4207,7 +4106,7 @@ func (b *LocalBackend) checkPrefsLocked(p *ipn.Prefs) error { if err := b.checkAutoUpdatePrefsLocked(p); err != nil { errs = append(errs, err) } - return multierr.New(errs...) + return errors.Join(errs...) } func (b *LocalBackend) checkSSHPrefsLocked(p *ipn.Prefs) error { @@ -4278,6 +4177,9 @@ var exitNodeMisconfigurationWarnable = health.Register(&health.Warnable{ // updateExitNodeUsageWarning updates a warnable meant to notify users of // configuration issues that could break exit node usage. func updateExitNodeUsageWarning(p ipn.PrefsView, state *netmon.State, healthTracker *health.Tracker) { + if !buildfeatures.HasUseExitNode { + return + } var msg string if p.ExitNodeIP().IsValid() || p.ExitNodeID() != "" { warn, _ := netutil.CheckReversePathFiltering(state) @@ -4297,6 +4199,9 @@ func (b *LocalBackend) checkExitNodePrefsLocked(p *ipn.Prefs) error { if !tryingToUseExitNode { return nil } + if !buildfeatures.HasUseExitNode { + return feature.ErrUnavailable + } if err := featureknob.CanUseExitNode(); err != nil { return err @@ -4316,7 +4221,12 @@ func (b *LocalBackend) checkFunnelEnabledLocked(p *ipn.Prefs) error { } func (b *LocalBackend) checkAutoUpdatePrefsLocked(p *ipn.Prefs) error { - if p.AutoUpdate.Apply.EqualBool(true) && !clientupdate.CanAutoUpdate() { + if !buildfeatures.HasClientUpdate { + if p.AutoUpdate.Apply.EqualBool(true) { + return errors.New("Auto-update support is disabled in this build") + } + } + if p.AutoUpdate.Apply.EqualBool(true) && !feature.CanAutoUpdate() { return errors.New("Auto-updates are not supported on this platform.") } return nil @@ -4332,6 +4242,9 @@ func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.P defer unlock() p0 := b.pm.CurrentPrefs() + if !buildfeatures.HasUseExitNode { + return p0, nil + } if v && p0.ExitNodeID() != "" { // Already on. return p0, nil @@ -4376,6 +4289,9 @@ func (b *LocalBackend) SetUseExitNodeEnabled(actor ipnauth.Actor, v bool) (ipn.P // MaybeClearAppConnector clears the routes from any AppConnector if // AdvertiseRoutes has been set in the MaskedPrefs. func (b *LocalBackend) MaybeClearAppConnector(mp *ipn.MaskedPrefs) error { + if !buildfeatures.HasAppConnectors { + return nil + } var err error if ac := b.AppConnector(); ac != nil && mp.AdvertiseRoutesSet { err = ac.ClearRoutes() @@ -4443,7 +4359,7 @@ func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, prefs ipn } } - return multierr.New(errs...) + return errors.Join(errs...) } // changeDisablesExitNodeLocked reports whether applying the change @@ -4459,6 +4375,9 @@ func (b *LocalBackend) checkEditPrefsAccessLocked(actor ipnauth.Actor, prefs ipn // // b.mu must be held. func (b *LocalBackend) changeDisablesExitNodeLocked(prefs ipn.PrefsView, change *ipn.MaskedPrefs) bool { + if !buildfeatures.HasUseExitNode { + return false + } if !change.AutoExitNodeSet && !change.ExitNodeIDSet && !change.ExitNodeIPSet { // The change does not affect exit node usage. return false @@ -4488,7 +4407,6 @@ func (b *LocalBackend) changeDisablesExitNodeLocked(prefs ipn.PrefsView, change // but wasn't empty before, then the change disables // exit node usage. return tmpPrefs.ExitNodeID == "" - } // adjustEditPrefsLocked applies additional changes to mp if necessary, @@ -4703,32 +4621,6 @@ func (b *LocalBackend) checkProfileNameLocked(p *ipn.Prefs) error { return nil } -// wantIngressLocked reports whether this node has ingress configured. This bool -// is sent to the coordination server (in Hostinfo.WireIngress) as an -// optimization hint to know primarily which nodes are NOT using ingress, to -// avoid doing work for regular nodes. -// -// Even if the user's ServeConfig.AllowFunnel map was manually edited in raw -// mode and contains map entries with false values, sending true (from Len > 0) -// is still fine. This is only an optimization hint for the control plane and -// doesn't affect security or correctness. And we also don't expect people to -// modify their ServeConfig in raw mode. -func (b *LocalBackend) wantIngressLocked() bool { - return b.serveConfig.Valid() && b.serveConfig.HasAllowFunnel() -} - -// hasIngressEnabledLocked reports whether the node has any funnel endpoint enabled. This bool is sent to control (in -// Hostinfo.IngressEnabled) to determine whether 'Funnel' badge should be displayed on this node in the admin panel. -func (b *LocalBackend) hasIngressEnabledLocked() bool { - return b.serveConfig.Valid() && b.serveConfig.IsFunnelOn() -} - -// shouldWireInactiveIngressLocked reports whether the node is in a state where funnel is not actively enabled, but it -// seems that it is intended to be used with funnel. -func (b *LocalBackend) shouldWireInactiveIngressLocked() bool { - return b.serveConfig.Valid() && !b.hasIngressEnabledLocked() && b.wantIngressLocked() -} - // setPrefsLockedOnEntry requires b.mu be held to call it, but it // unlocks b.mu when done. newp ownership passes to this function. // It returns a read-only copy of the new prefs. @@ -4763,7 +4655,7 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) b.updateFilterLocked(newp.View()) - if oldp.ShouldSSHBeRunning() && !newp.ShouldSSHBeRunning() { + if buildfeatures.HasSSH && oldp.ShouldSSHBeRunning() && !newp.ShouldSSHBeRunning() { if b.sshServer != nil { b.goTracker.Go(b.sshServer.Shutdown) b.sshServer = nil @@ -4795,14 +4687,6 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) b.resetAlwaysOnOverrideLocked() } - if newp.AutoUpdate.Apply.EqualBool(true) { - if b.state != ipn.Running { - b.maybeStartOfflineAutoUpdate(newp.View()) - } - } else { - b.stopOfflineAutoUpdate() - } - unlock.UnlockEarly() if oldp.ShieldsUp() != newp.ShieldsUp || hostInfoChanged { @@ -4831,6 +4715,9 @@ func (b *LocalBackend) setPrefsLockedOnEntry(newp *ipn.Prefs, unlock unlockOnce) // GetPeerAPIPort returns the port number for the peerapi server // running on the provided IP. func (b *LocalBackend) GetPeerAPIPort(ip netip.Addr) (port uint16, ok bool) { + if !buildfeatures.HasPeerAPIServer { + return 0, false + } b.mu.Lock() defer b.mu.Unlock() for _, pln := range b.peerAPIListeners { @@ -4872,62 +4759,15 @@ var ( magicDNSIPv6 = tsaddr.TailscaleServiceIPv6() ) -// TCPHandlerForDst returns a TCP handler for connections to dst, or nil if -// no handler is needed. It also returns a list of TCP socket options to -// apply to the socket before calling the handler. -// TCPHandlerForDst is called both for connections to our node's local IP -// as well as to the service IP (quad 100). -func (b *LocalBackend) TCPHandlerForDst(src, dst netip.AddrPort) (handler func(c net.Conn) error, opts []tcpip.SettableSocketOption) { - // First handle internal connections to the service IP - hittingServiceIP := dst.Addr() == magicDNSIP || dst.Addr() == magicDNSIPv6 - if hittingServiceIP { - switch dst.Port() { - case 80: - // TODO(mpminardi): do we want to show an error message if the web client - // has been disabled instead of the more "basic" web UI? - if b.ShouldRunWebClient() { - return b.handleWebClientConn, opts - } - return b.HandleQuad100Port80Conn, opts - case DriveLocalPort: - return b.handleDriveConn, opts - } - } - - // TODO(tailscale/corp#26001): Get handler for VIP services and Local IPs using - // the same function. - if handler := b.tcpHandlerForVIPService(dst, src); handler != nil { - return handler, opts - } - // Then handle external connections to the local IP. - if !b.isLocalIP(dst.Addr()) { - return nil, nil - } - if dst.Port() == 22 && b.ShouldRunSSH() { - // Use a higher keepalive idle time for SSH connections, as they are - // typically long lived and idle connections are more likely to be - // intentional. Ideally we would turn this off entirely, but we can't - // tell the difference between a long lived connection that is idle - // vs a connection that is dead because the peer has gone away. - // We pick 72h as that is typically sufficient for a long weekend. - opts = append(opts, ptr.To(tcpip.KeepaliveIdleOption(72*time.Hour))) - return b.handleSSHConn, opts - } - // TODO(will,sonia): allow customizing web client port ? - if dst.Port() == webClientPort && b.ShouldExposeRemoteWebClient() { - return b.handleWebClientConn, opts - } - if port, ok := b.GetPeerAPIPort(dst.Addr()); ok && dst.Port() == port { - return func(c net.Conn) error { - b.handlePeerAPIConn(src, dst, c) - return nil - }, opts - } - if handler := b.tcpHandlerForServe(dst.Port(), src, nil); handler != nil { - return handler, opts - } - return nil, nil -} +// Hook exclusively for serve. +var ( + hookServeTCPHandlerForVIPService feature.Hook[func(b *LocalBackend, dst netip.AddrPort, src netip.AddrPort) (handler func(c net.Conn) error)] + hookTCPHandlerForServe feature.Hook[func(b *LocalBackend, dport uint16, srcAddr netip.AddrPort, f *funnelFlow) (handler func(net.Conn) error)] + hookServeUpdateServeTCPPortNetMapAddrListenersLocked feature.Hook[func(b *LocalBackend, ports []uint16)] + + hookServeSetTCPPortsInterceptedFromNetmapAndPrefsLocked feature.Hook[func(b *LocalBackend, prefs ipn.PrefsView) (handlePorts []uint16)] + hookServeClearVIPServicesTCPPortsInterceptedLocked feature.Hook[func(*LocalBackend)] +) func (b *LocalBackend) handleDriveConn(conn net.Conn) error { fs, ok := b.sys.DriveForLocal.GetOK() @@ -4961,6 +4801,25 @@ func (b *LocalBackend) peerAPIServicesLocked() (ret []tailcfg.Service) { return ret } +// PortlistServices is an eventbus topic for the portlist extension +// to advertise the running services on the host. +type PortlistServices []tailcfg.Service + +func (b *LocalBackend) setPortlistServices(sl []tailcfg.Service) { + if !buildfeatures.HasPortList { // redundant, but explicit for linker deadcode and humans + return + } + + b.mu.Lock() + if b.hostinfo == nil { + b.hostinfo = new(tailcfg.Hostinfo) + } + b.hostinfo.Services = sl + b.mu.Unlock() + + b.doSetHostinfoFilterServices() +} + // doSetHostinfoFilterServices calls SetHostinfo on the controlclient, // possibly after mangling the given hostinfo. // @@ -4986,13 +4845,15 @@ func (b *LocalBackend) doSetHostinfoFilterServices() { // TODO(maisem,bradfitz): store hostinfo as a view, not as a mutable struct. hi := *b.hostinfo // shallow copy - unlock.UnlockEarly() // Make a shallow copy of hostinfo so we can mutate // at the Service field. - if !b.shouldUploadServices() { + if f, ok := b.extHost.Hooks().ShouldUploadServices.GetOk(); !ok || !f() { hi.Services = []tailcfg.Service{} } + + unlock.UnlockEarly() + // Don't mutate hi.Service's underlying array. Append to // the slice with no free capacity. c := len(hi.Services) @@ -5053,6 +4914,9 @@ func (b *LocalBackend) blockEngineUpdates(block bool) { // current network map and preferences. // b.mu must be held. func (b *LocalBackend) reconfigAppConnectorLocked(nm *netmap.NetworkMap, prefs ipn.PrefsView) { + if !buildfeatures.HasAppConnectors { + return + } const appConnectorCapName = "tailscale.com/app-connectors" defer func() { if b.hostinfo != nil { @@ -5060,27 +4924,28 @@ func (b *LocalBackend) reconfigAppConnectorLocked(nm *netmap.NetworkMap, prefs i } }() + // App connectors have been disabled. if !prefs.AppConnector().Advertise { + b.appConnector.Close() // clean up a previous connector (safe on nil) b.appConnector = nil return } - shouldAppCStoreRoutes := b.ControlKnobs().AppCStoreRoutes.Load() - if b.appConnector == nil || b.appConnector.ShouldStoreRoutes() != shouldAppCStoreRoutes { - var ri *appc.RouteInfo - var storeFunc func(*appc.RouteInfo) error - if shouldAppCStoreRoutes { - var err error - ri, err = b.readRouteInfoLocked() - if err != nil { - ri = &appc.RouteInfo{} - if err != ipn.ErrStateNotExist { - b.logf("Unsuccessful Read RouteInfo: ", err) - } - } - storeFunc = b.storeRouteInfo - } - b.appConnector = appc.NewAppConnector(b.logf, b, ri, storeFunc) + // We don't (yet) have an app connector configured, or the configured + // connector has a different route persistence setting. + shouldStoreRoutes := b.ControlKnobs().AppCStoreRoutes.Load() + if b.appConnector == nil || (shouldStoreRoutes != b.appConnector.ShouldStoreRoutes()) { + ri, err := b.readRouteInfoLocked() + if err != nil && err != ipn.ErrStateNotExist { + b.logf("Unsuccessful Read RouteInfo: %v", err) + } + b.appConnector.Close() // clean up a previous connector (safe on nil) + b.appConnector = appc.NewAppConnector(appc.Config{ + Logf: b.logf, + EventBus: b.sys.Bus.Get(), + RouteInfo: ri, + HasStoredRoutes: shouldStoreRoutes, + }) } if nm == nil { return @@ -5166,7 +5031,7 @@ func (b *LocalBackend) authReconfig() { hasPAC := b.prevIfState.HasPAC() disableSubnetsIfPAC := cn.SelfHasCap(tailcfg.NodeAttrDisableSubnetsIfPAC) dohURL, dohURLOK := cn.exitNodeCanProxyDNS(prefs.ExitNodeID()) - dcfg := cn.dnsConfigForNetmap(prefs, b.keyExpired, b.logf, version.OS()) + dcfg := cn.dnsConfigForNetmap(prefs, b.keyExpired, version.OS()) // If the current node is an app connector, ensure the app connector machine is started b.reconfigAppConnectorLocked(nm, prefs) closing := b.shutdownCalled @@ -5204,10 +5069,12 @@ func (b *LocalBackend) authReconfig() { // Keep the dialer updated about whether we're supposed to use // an exit node's DNS server (so SOCKS5/HTTP outgoing dials // can use it for name resolution) - if dohURLOK { - b.dialer.SetExitDNSDoH(dohURL) - } else { - b.dialer.SetExitDNSDoH("") + if buildfeatures.HasUseExitNode { + if dohURLOK { + b.dialer.SetExitDNSDoH(dohURL) + } else { + b.dialer.SetExitDNSDoH("") + } } cfg, err := nmcfg.WGCfg(nm, b.logf, flags, prefs.ExitNodeID()) @@ -5226,7 +5093,9 @@ func (b *LocalBackend) authReconfig() { b.logf("[v1] authReconfig: ra=%v dns=%v 0x%02x: %v", prefs.RouteAll(), prefs.CorpDNS(), flags, err) b.initPeerAPIListener() - b.readvertiseAppConnectorRoutes() + if buildfeatures.HasAppConnectors { + b.readvertiseAppConnectorRoutes() + } } // shouldUseOneCGNATRoute reports whether we should prefer to make one big @@ -5285,6 +5154,9 @@ func (b *LocalBackend) SetVarRoot(dir string) { // // It should only be called before the LocalBackend is used. func (b *LocalBackend) SetLogFlusher(flushFunc func()) { + if !buildfeatures.HasLogTail { + return + } b.logFlushFunc = flushFunc } @@ -5293,7 +5165,7 @@ func (b *LocalBackend) SetLogFlusher(flushFunc func()) { // // TryFlushLogs should not block. func (b *LocalBackend) TryFlushLogs() bool { - if b.logFlushFunc == nil { + if !buildfeatures.HasLogTail || b.logFlushFunc == nil { return false } b.logFlushFunc() @@ -5327,6 +5199,9 @@ func (b *LocalBackend) TailscaleVarRoot() string { // // b.mu must be held. func (b *LocalBackend) closePeerAPIListenersLocked() { + if !buildfeatures.HasPeerAPIServer { + return + } b.peerAPIServer = nil for _, pln := range b.peerAPIListeners { pln.Close() @@ -5342,6 +5217,9 @@ func (b *LocalBackend) closePeerAPIListenersLocked() { const peerAPIListenAsync = runtime.GOOS == "windows" || runtime.GOOS == "android" func (b *LocalBackend) initPeerAPIListener() { + if !buildfeatures.HasPeerAPIServer { + return + } b.logf("[v1] initPeerAPIListener: entered") b.mu.Lock() defer b.mu.Unlock() @@ -5542,7 +5420,7 @@ func (b *LocalBackend) routerConfig(cfg *wgcfg.Config, prefs ipn.PrefsView, oneC NetfilterKind: netfilterKind, } - if distro.Get() == distro.Synology { + if buildfeatures.HasSynology && distro.Get() == distro.Synology { // Issue 1995: we don't use iptables on Synology. rs.NetfilterMode = preftype.NetfilterOff } @@ -5553,7 +5431,7 @@ func (b *LocalBackend) routerConfig(cfg *wgcfg.Config, prefs ipn.PrefsView, oneC // likely to break some functionality, but if the user expressed a // preference for routing remotely, we want to avoid leaking // traffic at the expense of functionality. - if prefs.ExitNodeID() != "" || prefs.ExitNodeIP().IsValid() { + if buildfeatures.HasUseExitNode && (prefs.ExitNodeID() != "" || prefs.ExitNodeIP().IsValid()) { var default4, default6 bool for _, route := range rs.Routes { switch route { @@ -5625,12 +5503,14 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip hi.RoutableIPs = prefs.AdvertiseRoutes().AsSlice() hi.RequestTags = prefs.AdvertiseTags().AsSlice() hi.ShieldsUp = prefs.ShieldsUp() - hi.AllowsUpdate = envknob.AllowsRemoteUpdate() || prefs.AutoUpdate().Apply.EqualBool(true) + hi.AllowsUpdate = buildfeatures.HasClientUpdate && (envknob.AllowsRemoteUpdate() || prefs.AutoUpdate().Apply.EqualBool(true)) - b.metrics.advertisedRoutes.Set(float64(tsaddr.WithoutExitRoute(prefs.AdvertiseRoutes()).Len())) + if buildfeatures.HasAdvertiseRoutes { + b.metrics.advertisedRoutes.Set(float64(tsaddr.WithoutExitRoute(prefs.AdvertiseRoutes()).Len())) + } var sshHostKeys []string - if prefs.RunSSH() && envknob.CanSSHD() { + if buildfeatures.HasSSH && prefs.RunSSH() && envknob.CanSSHD() { // TODO(bradfitz): this is called with b.mu held. Not ideal. // If the filesystem gets wedged or something we could block for // a long time. But probably fine. @@ -5656,7 +5536,10 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip // DNS records are needed, so we can save bandwidth and not send // WireIngress. hi.WireIngress = b.shouldWireInactiveIngressLocked() - hi.AppConnector.Set(prefs.AppConnector().Advertise) + + if buildfeatures.HasAppConnectors { + hi.AppConnector.Set(prefs.AppConnector().Advertise) + } // The [tailcfg.Hostinfo.ExitNodeID] field tells control which exit node // was selected, if any. @@ -5672,8 +5555,10 @@ func (b *LocalBackend) applyPrefsToHostinfoLocked(hi *tailcfg.Hostinfo, prefs ip // ExitNodeID here; [LocalBackend.ResolveExitNode] will be called once // the netmap and/or net report have been received to both pick the exit // node and notify control of the change. - if sid := prefs.ExitNodeID(); sid != unresolvedExitNodeID { - hi.ExitNodeID = prefs.ExitNodeID() + if buildfeatures.HasUseExitNode { + if sid := prefs.ExitNodeID(); sid != unresolvedExitNodeID { + hi.ExitNodeID = prefs.ExitNodeID() + } } } @@ -5709,21 +5594,30 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock activeLogin := b.activeLogin authURL := b.authURL if newState == ipn.Running { - b.resetAuthURLLocked() + // TODO(zofrex): Is this needed? As of 2025-10-03 it doesn't seem to be + // necessary when logging in or authenticating. When do we need to reset it + // here, rather than the other places it is reset? We should test if it is + // necessary and add unit tests to cover those cases, or remove it. + if oldState != ipn.Running { + b.resetAuthURLLocked() + } // Start a captive portal detection loop if none has been // started. Create a new context if none is present, since it // can be shut down if we transition away from Running. - if b.captiveCancel == nil { - b.captiveCtx, b.captiveCancel = context.WithCancel(b.ctx) - b.goTracker.Go(func() { b.checkCaptivePortalLoop(b.captiveCtx) }) + if buildfeatures.HasCaptivePortal { + if b.captiveCancel == nil { + captiveCtx, captiveCancel := context.WithCancel(b.ctx) + b.captiveCtx, b.captiveCancel = captiveCtx, captiveCancel + b.goTracker.Go(func() { hookCheckCaptivePortalLoop.Get()(b, captiveCtx) }) + } } } else if oldState == ipn.Running { // Transitioning away from running. b.closePeerAPIListenersLocked() // Stop any existing captive portal detection loop. - if b.captiveCancel != nil { + if buildfeatures.HasCaptivePortal && b.captiveCancel != nil { b.captiveCancel() b.captiveCancel = nil @@ -5734,12 +5628,6 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock } b.pauseOrResumeControlClientLocked() - if newState == ipn.Running { - b.stopOfflineAutoUpdate() - } else { - b.maybeStartOfflineAutoUpdate(prefs) - } - unlock.UnlockEarly() // prefs may change irrespective of state; WantRunning should be explicitly @@ -5754,10 +5642,10 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock switch newState { case ipn.NeedsLogin: - systemd.Status("Needs login: %s", authURL) - if b.seamlessRenewalEnabled() { - break - } + feature.SystemdStatus("Needs login: %s", authURL) + // always block updates on NeedsLogin even if seamless renewal is enabled, + // to prevent calls to authReconfig from reconfiguring the engine when our + // key has expired and we're waiting to authenticate to use the new key. b.blockEngineUpdates(true) fallthrough case ipn.Stopped, ipn.NoState: @@ -5769,7 +5657,7 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock } if newState == ipn.Stopped && authURL == "" { - systemd.Status("Stopped; run 'tailscale up' to log in") + feature.SystemdStatus("Stopped; run 'tailscale up' to log in") } case ipn.Starting, ipn.NeedsMachineAuth: b.authReconfig() @@ -5781,7 +5669,7 @@ func (b *LocalBackend) enterStateLockedOnEntry(newState ipn.State, unlock unlock for _, p := range addrs.All() { addrStrs = append(addrStrs, p.Addr().String()) } - systemd.Status("Connected; %s; %s", activeLogin, strings.Join(addrStrs, " ")) + feature.SystemdStatus("Connected; %s; %s", activeLogin, strings.Join(addrStrs, " ")) default: b.logf("[unexpected] unknown newState %#v", newState) } @@ -5950,29 +5838,38 @@ func (u unlockOnce) UnlockEarly() { } // stopEngineAndWait deconfigures the local network data plane, and -// waits for it to deliver a status update before returning. -// -// TODO(danderson): this may be racy. We could unblock upon receiving -// a status update that predates the "I've shut down" update. +// waits for it to deliver a status update indicating it has stopped +// before returning. func (b *LocalBackend) stopEngineAndWait() { b.logf("stopEngineAndWait...") b.e.Reconfig(&wgcfg.Config{}, &router.Config{}, &dns.Config{}) - b.requestEngineStatusAndWait() + b.requestEngineStatusAndWaitForStopped() b.logf("stopEngineAndWait: done.") } -// Requests the wgengine status, and does not return until the status -// was delivered (to the usual callback). -func (b *LocalBackend) requestEngineStatusAndWait() { - b.logf("requestEngineStatusAndWait") +// Requests the wgengine status, and does not return until a status was +// delivered (to the usual callback) that indicates the engine is stopped. +func (b *LocalBackend) requestEngineStatusAndWaitForStopped() { + b.logf("requestEngineStatusAndWaitForStopped") - b.statusLock.Lock() - defer b.statusLock.Unlock() + b.mu.Lock() + defer b.mu.Unlock() b.goTracker.Go(b.e.RequestStatus) - b.logf("requestEngineStatusAndWait: waiting...") - b.statusChanged.Wait() // temporarily releases lock while waiting - b.logf("requestEngineStatusAndWait: got status update.") + b.logf("requestEngineStatusAndWaitForStopped: waiting...") + for { + b.statusChanged.Wait() // temporarily releases lock while waiting + + if !b.blocked { + b.logf("requestEngineStatusAndWaitForStopped: engine is no longer blocked, must have stopped and started again, not safe to wait.") + break + } + if b.engineStatus.NumLive == 0 && b.engineStatus.LiveDERPs == 0 { + b.logf("requestEngineStatusAndWaitForStopped: engine is stopped.") + break + } + b.logf("requestEngineStatusAndWaitForStopped: engine is still running. Waiting...") + } } // setControlClientLocked sets the control client to cc, @@ -6055,6 +5952,9 @@ func (b *LocalBackend) setWebClientAtomicBoolLocked(nm *netmap.NetworkMap) { // // b.mu must be held. func (b *LocalBackend) setExposeRemoteWebClientAtomicBoolLocked(prefs ipn.PrefsView) { + if !buildfeatures.HasWebClient { + return + } shouldExpose := prefs.Valid() && prefs.RunWebClient() b.exposeRemoteWebClientAtomicBool.Store(shouldExpose) } @@ -6170,6 +6070,9 @@ func (b *LocalBackend) setNetInfo(ni *tailcfg.NetInfo) { // RefreshExitNode determines which exit node to use based on the current // prefs and netmap and switches to it if needed. func (b *LocalBackend) RefreshExitNode() { + if !buildfeatures.HasUseExitNode { + return + } if b.resolveExitNode() { b.authReconfig() } @@ -6185,6 +6088,9 @@ func (b *LocalBackend) RefreshExitNode() { // // b.mu must not be held. func (b *LocalBackend) resolveExitNode() (changed bool) { + if !buildfeatures.HasUseExitNode { + return false + } b.mu.Lock() defer b.mu.Unlock() @@ -6227,10 +6133,10 @@ func (b *LocalBackend) resolveExitNode() (changed bool) { // // b.mu must be held. func (b *LocalBackend) reconcilePrefsLocked(prefs *ipn.Prefs) (changed bool) { - if b.applySysPolicyLocked(prefs) { + if buildfeatures.HasSystemPolicy && b.applySysPolicyLocked(prefs) { changed = true } - if b.resolveExitNodeInPrefsLocked(prefs) { + if buildfeatures.HasUseExitNode && b.resolveExitNodeInPrefsLocked(prefs) { changed = true } if changed { @@ -6245,6 +6151,9 @@ func (b *LocalBackend) reconcilePrefsLocked(prefs *ipn.Prefs) (changed bool) { // // b.mu must be held. func (b *LocalBackend) resolveExitNodeInPrefsLocked(prefs *ipn.Prefs) (changed bool) { + if !buildfeatures.HasUseExitNode { + return false + } if b.resolveAutoExitNodeLocked(prefs) { changed = true } @@ -6288,25 +6197,31 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { b.health.SetControlHealth(nil) } - if nm.HasCap(tailcfg.NodeAttrLinuxMustUseIPTables) { - b.capForcedNetfilter = "iptables" - } else if nm.HasCap(tailcfg.NodeAttrLinuxMustUseNfTables) { - b.capForcedNetfilter = "nftables" - } else { - b.capForcedNetfilter = "" // empty string means client can auto-detect + if runtime.GOOS == "linux" && buildfeatures.HasOSRouter { + if nm.HasCap(tailcfg.NodeAttrLinuxMustUseIPTables) { + b.capForcedNetfilter = "iptables" + } else if nm.HasCap(tailcfg.NodeAttrLinuxMustUseNfTables) { + b.capForcedNetfilter = "nftables" + } else { + b.capForcedNetfilter = "" // empty string means client can auto-detect + } } b.MagicConn().SetSilentDisco(b.ControlKnobs().SilentDisco.Load()) b.MagicConn().SetProbeUDPLifetime(b.ControlKnobs().ProbeUDPLifetime.Load()) - b.setDebugLogsByCapabilityLocked(nm) + if buildfeatures.HasDebug { + b.setDebugLogsByCapabilityLocked(nm) + } // See the netns package for documentation on what this capability does. netns.SetBindToInterfaceByRoute(nm.HasCap(tailcfg.CapabilityBindToInterfaceByRoute)) netns.SetDisableBindConnToInterface(nm.HasCap(tailcfg.CapabilityDebugDisableBindConnToInterface)) b.setTCPPortsInterceptedFromNetmapAndPrefsLocked(b.pm.CurrentPrefs()) - b.ipVIPServiceMap = nm.GetIPVIPServiceMap() + if buildfeatures.HasServe { + b.ipVIPServiceMap = nm.GetIPVIPServiceMap() + } if !oldSelf.Equal(nm.SelfNodeOrZero()) { for _, f := range b.extHost.Hooks().OnSelfChange { @@ -6314,160 +6229,30 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) { } } - if nm == nil { - // If there is no netmap, the client is going into a "turned off" - // state so reset the metrics. - b.metrics.approvedRoutes.Set(0) - return - } - - if nm.SelfNode.Valid() { - var approved float64 - for _, route := range nm.SelfNode.AllowedIPs().All() { - if !views.SliceContains(nm.SelfNode.Addresses(), route) && !tsaddr.IsExitRoute(route) { - approved++ + if buildfeatures.HasAdvertiseRoutes { + if nm == nil { + // If there is no netmap, the client is going into a "turned off" + // state so reset the metrics. + b.metrics.approvedRoutes.Set(0) + } else if nm.SelfNode.Valid() { + var approved float64 + for _, route := range nm.SelfNode.AllowedIPs().All() { + if !views.SliceContains(nm.SelfNode.Addresses(), route) && !tsaddr.IsExitRoute(route) { + approved++ + } } + b.metrics.approvedRoutes.Set(approved) } - b.metrics.approvedRoutes.Set(approved) - } - - b.updateDrivePeersLocked(nm) - b.driveNotifyCurrentSharesLocked() -} - -// responseBodyWrapper wraps an io.ReadCloser and stores -// the number of bytesRead. -type responseBodyWrapper struct { - io.ReadCloser - logVerbose bool - bytesRx int64 - bytesTx int64 - log logger.Logf - method string - statusCode int - contentType string - fileExtension string - shareNodeKey string - selfNodeKey string - contentLength int64 -} - -// logAccess logs the taildrive: access: log line. If the logger is nil, -// the log will not be written. -func (rbw *responseBodyWrapper) logAccess(err string) { - if rbw.log == nil { - return } - // Some operating systems create and copy lots of 0 length hidden files for - // tracking various states. Omit these to keep logs from being too verbose. - if rbw.logVerbose || rbw.contentLength > 0 { - levelPrefix := "" - if rbw.logVerbose { - levelPrefix = "[v1] " + if buildfeatures.HasDrive && nm != nil { + if f, ok := hookSetNetMapLockedDrive.GetOk(); ok { + f(b, nm) } - rbw.log( - "%staildrive: access: %s from %s to %s: status-code=%d ext=%q content-type=%q content-length=%.f tx=%.f rx=%.f err=%q", - levelPrefix, - rbw.method, - rbw.selfNodeKey, - rbw.shareNodeKey, - rbw.statusCode, - rbw.fileExtension, - rbw.contentType, - roundTraffic(rbw.contentLength), - roundTraffic(rbw.bytesTx), roundTraffic(rbw.bytesRx), err) } } -// Read implements the io.Reader interface. -func (rbw *responseBodyWrapper) Read(b []byte) (int, error) { - n, err := rbw.ReadCloser.Read(b) - rbw.bytesRx += int64(n) - if err != nil && !errors.Is(err, io.EOF) { - rbw.logAccess(err.Error()) - } - - return n, err -} - -// Close implements the io.Close interface. -func (rbw *responseBodyWrapper) Close() error { - err := rbw.ReadCloser.Close() - var errStr string - if err != nil { - errStr = err.Error() - } - rbw.logAccess(errStr) - - return err -} - -// driveTransport is an http.RoundTripper that wraps -// b.Dialer().PeerAPITransport() with metrics tracking. -type driveTransport struct { - b *LocalBackend - tr *http.Transport -} - -func (b *LocalBackend) newDriveTransport() *driveTransport { - return &driveTransport{ - b: b, - tr: b.Dialer().PeerAPITransport(), - } -} - -func (dt *driveTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { - // Some WebDAV clients include origin and refer headers, which peerapi does - // not like. Remove them. - req.Header.Del("origin") - req.Header.Del("referer") - - bw := &requestBodyWrapper{} - if req.Body != nil { - bw.ReadCloser = req.Body - req.Body = bw - } - - defer func() { - contentType := "unknown" - if ct := req.Header.Get("Content-Type"); ct != "" { - contentType = ct - } - - dt.b.mu.Lock() - selfNodeKey := dt.b.currentNode().Self().Key().ShortString() - dt.b.mu.Unlock() - n, _, ok := dt.b.WhoIs("tcp", netip.MustParseAddrPort(req.URL.Host)) - shareNodeKey := "unknown" - if ok { - shareNodeKey = string(n.Key().ShortString()) - } - - rbw := responseBodyWrapper{ - log: dt.b.logf, - logVerbose: req.Method != httpm.GET && req.Method != httpm.PUT, // other requests like PROPFIND are quite chatty, so we log those at verbose level - method: req.Method, - bytesTx: int64(bw.bytesRead), - selfNodeKey: selfNodeKey, - shareNodeKey: shareNodeKey, - contentType: contentType, - contentLength: resp.ContentLength, - fileExtension: parseDriveFileExtensionForLog(req.URL.Path), - statusCode: resp.StatusCode, - ReadCloser: resp.Body, - } - - if resp.StatusCode >= 400 { - // in case of error response, just log immediately - rbw.logAccess("") - } else { - resp.Body = &rbw - } - }() - - return dt.tr.RoundTrip(req) -} +var hookSetNetMapLockedDrive feature.Hook[func(*LocalBackend, *netmap.NetworkMap)] // roundTraffic rounds bytes. This is used to preserve user privacy within logs. func roundTraffic(bytes int64) float64 { @@ -6507,55 +6292,12 @@ func (b *LocalBackend) setDebugLogsByCapabilityLocked(nm *netmap.NetworkMap) { } } -// reloadServeConfigLocked reloads the serve config from the store or resets the -// serve config to nil if not logged in. The "changed" parameter, when false, instructs -// the method to only run the reset-logic and not reload the store from memory to ensure -// foreground sessions are not removed if they are not saved on disk. -func (b *LocalBackend) reloadServeConfigLocked(prefs ipn.PrefsView) { - if !b.currentNode().Self().Valid() || !prefs.Valid() || b.pm.CurrentProfile().ID() == "" { - // We're not logged in, so we don't have a profile. - // Don't try to load the serve config. - b.lastServeConfJSON = mem.B(nil) - b.serveConfig = ipn.ServeConfigView{} - return - } - - confKey := ipn.ServeConfigKey(b.pm.CurrentProfile().ID()) - // TODO(maisem,bradfitz): prevent reading the config from disk - // if the profile has not changed. - confj, err := b.store.ReadState(confKey) - if err != nil { - b.lastServeConfJSON = mem.B(nil) - b.serveConfig = ipn.ServeConfigView{} - return - } - if b.lastServeConfJSON.Equal(mem.B(confj)) { - return - } - b.lastServeConfJSON = mem.B(confj) - var conf ipn.ServeConfig - if err := json.Unmarshal(confj, &conf); err != nil { - b.logf("invalid ServeConfig %q in StateStore: %v", confKey, err) - b.serveConfig = ipn.ServeConfigView{} - return - } - - // remove inactive sessions - maps.DeleteFunc(conf.Foreground, func(sessionID string, sc *ipn.ServeConfig) bool { - _, ok := b.notifyWatchers[sessionID] - return !ok - }) - - b.serveConfig = conf.View() -} - // setTCPPortsInterceptedFromNetmapAndPrefsLocked calls setTCPPortsIntercepted with // the ports that tailscaled should handle as a function of b.netMap and b.prefs. // // b.mu must be held. func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked(prefs ipn.PrefsView) { handlePorts := make([]uint16, 0, 4) - var vipServicesPorts map[tailcfg.ServiceName][]uint16 if prefs.Valid() && prefs.RunSSH() && envknob.CanSSHD() { handlePorts = append(handlePorts, 22) @@ -6569,42 +6311,14 @@ func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked(prefs ipn. } } - b.reloadServeConfigLocked(prefs) - if b.serveConfig.Valid() { - servePorts := make([]uint16, 0, 3) - for port := range b.serveConfig.TCPs() { - if port > 0 { - servePorts = append(servePorts, uint16(port)) - } - } - handlePorts = append(handlePorts, servePorts...) - - for svc, cfg := range b.serveConfig.Services().All() { - servicePorts := make([]uint16, 0, 3) - for port := range cfg.TCP().All() { - if port > 0 { - servicePorts = append(servicePorts, uint16(port)) - } - } - if _, ok := vipServicesPorts[svc]; !ok { - mak.Set(&vipServicesPorts, svc, servicePorts) - } else { - mak.Set(&vipServicesPorts, svc, append(vipServicesPorts[svc], servicePorts...)) - } - } - - b.setServeProxyHandlersLocked() - - // don't listen on netmap addresses if we're in userspace mode - if !b.sys.IsNetstack() { - b.updateServeTCPPortNetMapAddrListenersLocked(servePorts) - } + if f, ok := hookServeSetTCPPortsInterceptedFromNetmapAndPrefsLocked.GetOk(); ok { + v := f(b, prefs) + handlePorts = append(handlePorts, v...) } // Update funnel and service hash info in hostinfo and kick off control update if needed. b.updateIngressAndServiceHashLocked(prefs) b.setTCPPortsIntercepted(handlePorts) - b.setVIPServicesTCPPortsInterceptedLocked(vipServicesPorts) } // updateIngressAndServiceHashLocked updates the hostinfo.ServicesHash, hostinfo.WireIngress and @@ -6637,51 +6351,6 @@ func (b *LocalBackend) updateIngressAndServiceHashLocked(prefs ipn.PrefsView) { } } -// setServeProxyHandlersLocked ensures there is an http proxy handler for each -// backend specified in serveConfig. It expects serveConfig to be valid and -// up-to-date, so should be called after reloadServeConfigLocked. -func (b *LocalBackend) setServeProxyHandlersLocked() { - if !b.serveConfig.Valid() { - return - } - var backends map[string]bool - for _, conf := range b.serveConfig.Webs() { - for _, h := range conf.Handlers().All() { - backend := h.Proxy() - if backend == "" { - // Only create proxy handlers for servers with a proxy backend. - continue - } - mak.Set(&backends, backend, true) - if _, ok := b.serveProxyHandlers.Load(backend); ok { - continue - } - - b.logf("serve: creating a new proxy handler for %s", backend) - p, err := b.proxyHandlerForBackend(backend) - if err != nil { - // The backend endpoint (h.Proxy) should have been validated by expandProxyTarget - // in the CLI, so just log the error here. - b.logf("[unexpected] could not create proxy for %v: %s", backend, err) - continue - } - b.serveProxyHandlers.Store(backend, p) - } - } - - // Clean up handlers for proxy backends that are no longer present - // in configuration. - b.serveProxyHandlers.Range(func(key, value any) bool { - backend := key.(string) - if !backends[backend] { - b.logf("serve: closing idle connections to %s", backend) - b.serveProxyHandlers.Delete(backend) - value.(*reverseProxy).close() - } - return true - }) -} - // operatorUserName returns the current pref's OperatorUser's name, or the // empty string if none. func (b *LocalBackend) operatorUserName() string { @@ -6735,6 +6404,9 @@ func (b *LocalBackend) TestOnlyPublicKeys() (machineKey key.MachinePublic, nodeK // This is the low-level interface. Other layers will provide more // friendly options to get HTTPS certs. func (b *LocalBackend) SetDNS(ctx context.Context, name, value string) error { + if !buildfeatures.HasACME { + return feature.ErrUnavailable + } req := &tailcfg.SetDNSRequest{ Version: 1, // TODO(bradfitz,maisem): use tailcfg.CurrentCapabilityVersion when using the Noise transport Type: "TXT", @@ -6777,6 +6449,9 @@ func peerAPIPorts(peer tailcfg.NodeView) (p4, p6 uint16) { } func (b *LocalBackend) CheckIPForwarding() error { + if !buildfeatures.HasAdvertiseRoutes { + return nil + } if b.sys.IsNetstackRouter() { return nil } @@ -6892,6 +6567,9 @@ func (b *LocalBackend) OfferingExitNode() bool { // OfferingAppConnector reports whether b is currently offering app // connector services. func (b *LocalBackend) OfferingAppConnector() bool { + if !buildfeatures.HasAppConnectors { + return false + } b.mu.Lock() defer b.mu.Unlock() return b.appConnector != nil @@ -6901,6 +6579,9 @@ func (b *LocalBackend) OfferingAppConnector() bool { // // TODO(nickkhyl): move app connectors to [nodeBackend], or perhaps a feature package? func (b *LocalBackend) AppConnector() *appc.AppConnector { + if !buildfeatures.HasAppConnectors { + return nil + } b.mu.Lock() defer b.mu.Unlock() return b.appConnector @@ -6971,6 +6652,9 @@ func (b *LocalBackend) SetDeviceAttrs(ctx context.Context, attrs tailcfg.AttrUpd // // If exitNodeID is the zero valid, it returns "", false. func exitNodeCanProxyDNS(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg.NodeView, exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) { + if !buildfeatures.HasUseExitNode { + return "", false + } if exitNodeID.IsZero() { return "", false } @@ -7045,67 +6729,16 @@ func (b *LocalBackend) ControlKnobs() *controlknobs.Knobs { return b.sys.ControlKnobs() } +// EventBus returns the node's event bus. +func (b *LocalBackend) EventBus() *eventbus.Bus { + return b.sys.Bus.Get() +} + // MagicConn returns the backend's *magicsock.Conn. func (b *LocalBackend) MagicConn() *magicsock.Conn { return b.sys.MagicSock.Get() } -type keyProvingNoiseRoundTripper struct { - b *LocalBackend -} - -func (n keyProvingNoiseRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - b := n.b - - var priv key.NodePrivate - - b.mu.Lock() - cc := b.ccAuto - if nm := b.NetMap(); nm != nil { - priv = nm.PrivateKey - } - b.mu.Unlock() - if cc == nil { - return nil, errors.New("no client") - } - if priv.IsZero() { - return nil, errors.New("no netmap or private key") - } - rt, ep, err := cc.GetSingleUseNoiseRoundTripper(req.Context()) - if err != nil { - return nil, err - } - if ep == nil || ep.NodeKeyChallenge.IsZero() { - go rt.RoundTrip(new(http.Request)) // return our reservation with a bogus request - return nil, errors.New("this coordination server does not support API calls over the Noise channel") - } - - // QueryEscape the node key since it has a colon in it. - nk := url.QueryEscape(priv.Public().String()) - req.SetBasicAuth(nk, "") - - // genNodeProofHeaderValue returns the Tailscale-Node-Proof header's value to prove - // to chalPub that we control claimedPrivate. - genNodeProofHeaderValue := func(claimedPrivate key.NodePrivate, chalPub key.ChallengePublic) string { - // TODO(bradfitz): cache this somewhere? - box := claimedPrivate.SealToChallenge(chalPub, []byte(chalPub.String())) - return claimedPrivate.Public().String() + " " + base64.StdEncoding.EncodeToString(box) - } - - // And prove we have the private key corresponding to the public key sent - // tin the basic auth username. - req.Header.Set("Tailscale-Node-Proof", genNodeProofHeaderValue(priv, ep.NodeKeyChallenge)) - - return rt.RoundTrip(req) -} - -// KeyProvingNoiseRoundTripper returns an http.RoundTripper that uses the LocalBackend's -// DoNoiseRequest method and mutates the request to add an authorization header -// to prove the client's nodekey. -func (b *LocalBackend) KeyProvingNoiseRoundTripper() http.RoundTripper { - return keyProvingNoiseRoundTripper{b} -} - // DoNoiseRequest sends a request to URL over the control plane // Noise connection. func (b *LocalBackend) DoNoiseRequest(req *http.Request) (*http.Response, error) { @@ -7118,6 +6751,15 @@ func (b *LocalBackend) DoNoiseRequest(req *http.Request) (*http.Response, error) return cc.DoNoiseRequest(req) } +// ActiveSSHConns returns the number of active SSH connections, +// or 0 if SSH is not linked into the binary or available on the platform. +func (b *LocalBackend) ActiveSSHConns() int { + if b.sshServer == nil { + return 0 + } + return b.sshServer.NumActiveConns() +} + func (b *LocalBackend) sshServerOrInit() (_ SSHServer, err error) { b.mu.Lock() defer b.mu.Unlock() @@ -7210,56 +6852,8 @@ func (b *LocalBackend) handleQuad100Port80Conn(w http.ResponseWriter, r *http.Re io.WriteString(w, "\n") } -func (b *LocalBackend) Doctor(ctx context.Context, logf logger.Logf) { - // We can write logs too fast for logtail to handle, even when - // opting-out of rate limits. Limit ourselves to at most one message - // per 20ms and a burst of 60 log lines, which should be fast enough to - // not block for too long but slow enough that we can upload all lines. - logf = logger.SlowLoggerWithClock(ctx, logf, 20*time.Millisecond, 60, b.clock.Now) - - var checks []doctor.Check - checks = append(checks, - permissions.Check{}, - routetable.Check{}, - ethtool.Check{}, - ) - - // Print a log message if any of the global DNS resolvers are Tailscale - // IPs; this can interfere with our ability to connect to the Tailscale - // controlplane. - checks = append(checks, doctor.CheckFunc("dns-resolvers", func(_ context.Context, logf logger.Logf) error { - b.mu.Lock() - nm := b.NetMap() - b.mu.Unlock() - if nm == nil { - return nil - } - - for i, resolver := range nm.DNS.Resolvers { - ipp, ok := resolver.IPPort() - if ok && tsaddr.IsTailscaleIP(ipp.Addr()) { - logf("resolver %d is a Tailscale address: %v", i, resolver) - } - } - for i, resolver := range nm.DNS.FallbackResolvers { - ipp, ok := resolver.IPPort() - if ok && tsaddr.IsTailscaleIP(ipp.Addr()) { - logf("fallback resolver %d is a Tailscale address: %v", i, resolver) - } - } - return nil - })) - - // TODO(andrew): more - - numChecks := len(checks) - checks = append(checks, doctor.CheckFunc("numchecks", func(_ context.Context, log logger.Logf) error { - log("%d checks", numChecks) - return nil - })) - - doctor.RunChecks(ctx, logf, checks...) -} +// HookDoctor is an optional hook for the "doctor" problem diagnosis feature. +var HookDoctor feature.Hook[func(context.Context, *LocalBackend, logger.Logf)] // SetDevStateStore updates the LocalBackend's state storage to the provided values. // @@ -7292,7 +6886,14 @@ func (b *LocalBackend) ShouldInterceptTCPPort(port uint16) bool { // ShouldInterceptVIPServiceTCPPort reports whether the given TCP port number // to a VIP service should be intercepted by Tailscaled and handled in-process. func (b *LocalBackend) ShouldInterceptVIPServiceTCPPort(ap netip.AddrPort) bool { - return b.shouldInterceptVIPServicesTCPPortAtomic.Load()(ap) + if !buildfeatures.HasServe { + return false + } + f := b.shouldInterceptVIPServicesTCPPortAtomic.Load() + if f == nil { + return false + } + return f(ap) } // SwitchProfile switches to the profile with the given id. @@ -7315,53 +6916,6 @@ func (b *LocalBackend) SwitchProfile(profile ipn.ProfileID) error { return b.resetForProfileChangeLockedOnEntry(unlock) } -func (b *LocalBackend) initTKALocked() error { - cp := b.pm.CurrentProfile() - if cp.ID() == "" { - b.tka = nil - return nil - } - if b.tka != nil { - if b.tka.profile == cp.ID() { - // Already initialized. - return nil - } - // As we're switching profiles, we need to reset the TKA to nil. - b.tka = nil - } - root := b.TailscaleVarRoot() - if root == "" { - b.tka = nil - b.logf("network-lock unavailable; no state directory") - return nil - } - - chonkDir := b.chonkPathLocked() - if _, err := os.Stat(chonkDir); err == nil { - // The directory exists, which means network-lock has been initialized. - storage, err := tka.ChonkDir(chonkDir) - if err != nil { - return fmt.Errorf("opening tailchonk: %v", err) - } - authority, err := tka.Open(storage) - if err != nil { - return fmt.Errorf("initializing tka: %v", err) - } - if err := authority.Compact(storage, tkaCompactionDefaults); err != nil { - b.logf("tka compaction failed: %v", err) - } - - b.tka = &tkaState{ - profile: cp.ID(), - authority: authority, - storage: storage, - } - b.logf("tka initialized at head %x", authority.Head()) - } - - return nil -} - // resetDialPlan resets the dialPlan for this LocalBackend. It will log if // anything is reset. // @@ -7373,25 +6927,6 @@ func (b *LocalBackend) resetDialPlan() { } } -// getHardwareAddrs returns the hardware addresses for the machine. If the list -// of hardware addresses is empty, it will return the previously known hardware -// addresses. Both the current, and previously known hardware addresses might be -// empty. -func (b *LocalBackend) getHardwareAddrs() ([]string, error) { - addrs, err := posture.GetHardwareAddrs() - if err != nil { - return nil, err - } - - if len(addrs) == 0 { - b.logf("getHardwareAddrs: got empty list of hwaddrs, returning previous list") - return b.lastKnownHardwareAddrs.Load(), nil - } - - b.lastKnownHardwareAddrs.Store(addrs) - return addrs, nil -} - // resetForProfileChangeLockedOnEntry resets the backend for a profile change. // // b.mu must held on entry. It is released on exit. @@ -7404,7 +6939,7 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err // down, so no need to do any work. return nil } - newNode := newNodeBackend(b.ctx, b.sys.Bus.Get()) + newNode := newNodeBackend(b.ctx, b.logf, b.sys.Bus.Get()) if oldNode := b.currentNodeAtomic.Swap(newNode); oldNode != nil { oldNode.shutdown(errNodeContextChanged) } @@ -7536,57 +7071,12 @@ func (b *LocalBackend) DebugBreakDERPConns() error { return b.MagicConn().DebugBreakDERPConns() } -func (b *LocalBackend) pushSelfUpdateProgress(up ipnstate.UpdateProgress) { - b.mu.Lock() - defer b.mu.Unlock() - b.selfUpdateProgress = append(b.selfUpdateProgress, up) - b.lastSelfUpdateState = up.Status -} - -func (b *LocalBackend) clearSelfUpdateProgress() { - b.mu.Lock() - defer b.mu.Unlock() - b.selfUpdateProgress = make([]ipnstate.UpdateProgress, 0) - b.lastSelfUpdateState = ipnstate.UpdateFinished -} - -func (b *LocalBackend) GetSelfUpdateProgress() []ipnstate.UpdateProgress { - b.mu.Lock() - defer b.mu.Unlock() - res := make([]ipnstate.UpdateProgress, len(b.selfUpdateProgress)) - copy(res, b.selfUpdateProgress) - return res -} - -func (b *LocalBackend) DoSelfUpdate() { - b.mu.Lock() - updateState := b.lastSelfUpdateState - b.mu.Unlock() - // don't start an update if one is already in progress - if updateState == ipnstate.UpdateInProgress { - return - } - b.clearSelfUpdateProgress() - b.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateInProgress, "")) - up, err := clientupdate.NewUpdater(clientupdate.Arguments{ - Logf: func(format string, args ...any) { - b.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateInProgress, fmt.Sprintf(format, args...))) - }, - }) - if err != nil { - b.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFailed, err.Error())) - } - err = up.Update() - if err != nil { - b.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFailed, err.Error())) - } else { - b.pushSelfUpdateProgress(ipnstate.NewUpdateProgress(ipnstate.UpdateFinished, "tailscaled did not restart; please restart Tailscale manually.")) - } -} - // ObserveDNSResponse passes a DNS response from the PeerAPI DNS server to the // App Connector to enable route discovery. func (b *LocalBackend) ObserveDNSResponse(res []byte) error { + if !buildfeatures.HasAppConnectors { + return nil + } var appConnector *appc.AppConnector b.mu.Lock() if b.appConnector == nil { @@ -7602,9 +7092,9 @@ func (b *LocalBackend) ObserveDNSResponse(res []byte) error { // ErrDisallowedAutoRoute is returned by AdvertiseRoute when a route that is not allowed is requested. var ErrDisallowedAutoRoute = errors.New("route is not allowed") -// AdvertiseRoute implements the appc.RouteAdvertiser interface. It sets a new -// route advertisement if one is not already present in the existing routes. -// If the route is disallowed, ErrDisallowedAutoRoute is returned. +// AdvertiseRoute implements the appctype.RouteAdvertiser interface. It sets a +// new route advertisement if one is not already present in the existing +// routes. If the route is disallowed, ErrDisallowedAutoRoute is returned. func (b *LocalBackend) AdvertiseRoute(ipps ...netip.Prefix) error { finalRoutes := b.Prefs().AdvertiseRoutes().AsSlice() var newRoutes []netip.Prefix @@ -7660,8 +7150,8 @@ func coveredRouteRangeNoDefault(finalRoutes []netip.Prefix, ipp netip.Prefix) bo return false } -// UnadvertiseRoute implements the appc.RouteAdvertiser interface. It removes -// a route advertisement if one is present in the existing routes. +// UnadvertiseRoute implements the appctype.RouteAdvertiser interface. It +// removes a route advertisement if one is present in the existing routes. func (b *LocalBackend) UnadvertiseRoute(toRemove ...netip.Prefix) error { currentRoutes := b.Prefs().AdvertiseRoutes().AsSlice() finalRoutes := currentRoutes[:0] @@ -7689,7 +7179,10 @@ func namespaceKeyForCurrentProfile(pm *profileManager, key ipn.StateKey) ipn.Sta const routeInfoStateStoreKey ipn.StateKey = "_routeInfo" -func (b *LocalBackend) storeRouteInfo(ri *appc.RouteInfo) error { +func (b *LocalBackend) storeRouteInfo(ri appctype.RouteInfo) error { + if !buildfeatures.HasAppConnectors { + return feature.ErrUnavailable + } b.mu.Lock() defer b.mu.Unlock() if b.pm.CurrentProfile().ID() == "" { @@ -7703,13 +7196,16 @@ func (b *LocalBackend) storeRouteInfo(ri *appc.RouteInfo) error { return b.pm.WriteState(key, bs) } -func (b *LocalBackend) readRouteInfoLocked() (*appc.RouteInfo, error) { +func (b *LocalBackend) readRouteInfoLocked() (*appctype.RouteInfo, error) { + if !buildfeatures.HasAppConnectors { + return nil, feature.ErrUnavailable + } if b.pm.CurrentProfile().ID() == "" { - return &appc.RouteInfo{}, nil + return &appctype.RouteInfo{}, nil } key := namespaceKeyForCurrentProfile(b.pm, routeInfoStateStoreKey) bs, err := b.pm.Store().ReadState(key) - ri := &appc.RouteInfo{} + ri := &appctype.RouteInfo{} if err != nil { return nil, err } @@ -7719,10 +7215,19 @@ func (b *LocalBackend) readRouteInfoLocked() (*appc.RouteInfo, error) { return ri, nil } -// seamlessRenewalEnabled reports whether seamless key renewals are enabled -// (i.e. we saw our self node with the SeamlessKeyRenewal attr in a netmap). -// This enables beta functionality of renewing node keys without breaking -// connections. +// ReadRouteInfo returns the app connector route information that is +// stored in prefs to be consistent across restarts. It should be up +// to date with the RouteInfo in memory being used by appc. +func (b *LocalBackend) ReadRouteInfo() (*appctype.RouteInfo, error) { + b.mu.Lock() + defer b.mu.Unlock() + return b.readRouteInfoLocked() +} + +// seamlessRenewalEnabled reports whether seamless key renewals are enabled. +// +// As of 2025-09-11, this is the default behaviour unless nodes receive +// [tailcfg.NodeAttrDisableSeamlessKeyRenewal] in their netmap. func (b *LocalBackend) seamlessRenewalEnabled() bool { return b.ControlKnobs().SeamlessKeyRenewal.Load() } @@ -7766,6 +7271,9 @@ var ErrNoPreferredDERP = errors.New("no preferred DERP, try again later") // // b.mu.lock() must be held. func (b *LocalBackend) suggestExitNodeLocked() (response apitype.ExitNodeSuggestionResponse, err error) { + if !buildfeatures.HasUseExitNode { + return response, feature.ErrUnavailable + } lastReport := b.MagicConn().GetLastNetcheckReport(b.ctx) prevSuggestion := b.lastSuggestedExitNode @@ -7783,6 +7291,9 @@ func (b *LocalBackend) suggestExitNodeLocked() (response apitype.ExitNodeSuggest } func (b *LocalBackend) SuggestExitNode() (response apitype.ExitNodeSuggestionResponse, err error) { + if !buildfeatures.HasUseExitNode { + return response, feature.ErrUnavailable + } b.mu.Lock() defer b.mu.Unlock() return b.suggestExitNodeLocked() @@ -7799,6 +7310,9 @@ func (b *LocalBackend) getAllowedSuggestions() set.Set[tailcfg.StableNodeID] { // refreshAllowedSuggestions rebuilds the set of permitted exit nodes // from the current [pkey.AllowedSuggestedExitNodes] value. func (b *LocalBackend) refreshAllowedSuggestions() { + if !buildfeatures.HasUseExitNode { + return + } b.allowedSuggestedExitNodesMu.Lock() defer b.allowedSuggestedExitNodesMu.Unlock() b.allowedSuggestedExitNodes = fillAllowedSuggestions(b.polc) @@ -7855,6 +7369,10 @@ func suggestExitNode(report *netcheck.Report, nb *nodeBackend, prevSuggestion ta // the lowest latency to this device. For peers without a DERP home, we look for // geographic proximity to this device's DERP home. func suggestExitNodeUsingDERP(report *netcheck.Report, nb *nodeBackend, prevSuggestion tailcfg.StableNodeID, selectRegion selectRegionFunc, selectNode selectNodeFunc, allowList set.Set[tailcfg.StableNodeID]) (res apitype.ExitNodeSuggestionResponse, err error) { + // TODO(sfllaw): Context needs to be plumbed down here to support + // reachability testing. + ctx := context.TODO() + netMap := nb.NetMap() if report == nil || report.PreferredDERP == 0 || netMap == nil || netMap.DERPMap == nil { return res, ErrNoPreferredDERP @@ -7863,7 +7381,7 @@ func suggestExitNodeUsingDERP(report *netcheck.Report, nb *nodeBackend, prevSugg // since the netmap doesn't include delta updates (e.g., home DERP or Online // status changes) from the control plane since the last full update. candidates := nb.AppendMatchingPeers(nil, func(peer tailcfg.NodeView) bool { - if !peer.Valid() || !peer.Online().Get() { + if !peer.Valid() || !nb.PeerIsReachable(ctx, peer) { return false } if allowList != nil && !allowList.Contains(peer.StableID()) { @@ -7982,6 +7500,10 @@ var ErrNoNetMap = errors.New("no network map, try again later") // the node’s [tailcfg.Location]. To be eligible for consideration, the node // must have NodeAttrSuggestExitNode in its CapMap. func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, allowed set.Set[tailcfg.StableNodeID]) (apitype.ExitNodeSuggestionResponse, error) { + // TODO(sfllaw): Context needs to be plumbed down here to support + // reachability testing. + ctx := context.TODO() + nm := nb.NetMap() if nm == nil { return apitype.ExitNodeSuggestionResponse{}, ErrNoNetMap @@ -7996,12 +7518,11 @@ func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, allowed set.Set[tailcf panic("missing traffic-steering capability") } - var force tailcfg.NodeView nodes := nb.AppendMatchingPeers(nil, func(p tailcfg.NodeView) bool { if !p.Valid() { return false } - if !p.Online().Get() { + if !nb.PeerIsReachable(ctx, p) { return false } if allowed != nil && !allowed.Contains(p.StableID()) { @@ -8015,9 +7536,6 @@ func suggestExitNodeUsingTrafficSteering(nb *nodeBackend, allowed set.Set[tailcf } return true }) - if force.Valid() { - nodes = append(nodes[:0], force) - } scores := make(map[tailcfg.NodeID]int, len(nodes)) score := func(n tailcfg.NodeView) int { @@ -8185,63 +7703,10 @@ func isAllowedAutoExitNodeID(polc policyclient.Client, exitNodeID tailcfg.Stable } if nodes, _ := polc.GetStringArray(pkey.AllowedSuggestedExitNodes, nil); nodes != nil { return slices.Contains(nodes, string(exitNodeID)) - } return true // no policy configured; allow all exit nodes } -// startAutoUpdate triggers an auto-update attempt. The actual update happens -// asynchronously. If another update is in progress, an error is returned. -func (b *LocalBackend) startAutoUpdate(logPrefix string) (retErr error) { - // Check if update was already started, and mark as started. - if !b.trySetC2NUpdateStarted() { - return errors.New("update already started") - } - defer func() { - // Clear the started flag if something failed. - if retErr != nil { - b.setC2NUpdateStarted(false) - } - }() - - cmdTS, err := findCmdTailscale() - if err != nil { - return fmt.Errorf("failed to find cmd/tailscale binary: %w", err) - } - var ver struct { - Long string `json:"long"` - } - out, err := exec.Command(cmdTS, "version", "--json").Output() - if err != nil { - return fmt.Errorf("failed to find cmd/tailscale binary: %w", err) - } - if err := json.Unmarshal(out, &ver); err != nil { - return fmt.Errorf("invalid JSON from cmd/tailscale version --json: %w", err) - } - if ver.Long != version.Long() { - return fmt.Errorf("cmd/tailscale version %q does not match tailscaled version %q", ver.Long, version.Long()) - } - - cmd := tailscaleUpdateCmd(cmdTS) - buf := new(bytes.Buffer) - cmd.Stdout = buf - cmd.Stderr = buf - b.logf("%s: running %q", logPrefix, strings.Join(cmd.Args, " ")) - if err := cmd.Start(); err != nil { - return fmt.Errorf("failed to start cmd/tailscale update: %w", err) - } - - go func() { - if err := cmd.Wait(); err != nil { - b.logf("%s: update command failed: %v, output: %s", logPrefix, err, buf) - } else { - b.logf("%s: update attempt complete", logPrefix) - } - b.setC2NUpdateStarted(false) - }() - return nil -} - // srcIPHasCapForFilter is called by the packet filter when evaluating firewall // rules that require a source IP to have a certain node capability. // @@ -8275,15 +7740,6 @@ func maybeUsernameOf(actor ipnauth.Actor) string { return username } -// VIPServices returns the list of tailnet services that this node -// is serving as a destination for. -// The returned memory is owned by the caller. -func (b *LocalBackend) VIPServices() []*tailcfg.VIPService { - b.mu.Lock() - defer b.mu.Unlock() - return b.vipServicesFromPrefsLocked(b.pm.CurrentPrefs()) -} - func (b *LocalBackend) vipServiceHash(services []*tailcfg.VIPService) string { if len(services) == 0 { return "" @@ -8297,38 +7753,6 @@ func (b *LocalBackend) vipServiceHash(services []*tailcfg.VIPService) string { return hex.EncodeToString(hash[:]) } -func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcfg.VIPService { - // keyed by service name - var services map[tailcfg.ServiceName]*tailcfg.VIPService - if b.serveConfig.Valid() { - for svc, config := range b.serveConfig.Services().All() { - mak.Set(&services, svc, &tailcfg.VIPService{ - Name: svc, - Ports: config.ServicePortRange(), - }) - } - } - - for _, s := range prefs.AdvertiseServices().All() { - sn := tailcfg.ServiceName(s) - if services == nil || services[sn] == nil { - mak.Set(&services, sn, &tailcfg.VIPService{ - Name: sn, - }) - } - services[sn].Active = true - } - - servicesList := slicesx.MapValues(services) - // [slicesx.MapValues] provides the values in an indeterminate order, but since we'll - // be hashing a representation of this list later we want it to be in a consistent - // order. - slices.SortFunc(servicesList, func(a, b *tailcfg.VIPService) int { - return strings.Compare(a.Name.String(), b.Name.String()) - }) - return servicesList -} - var ( metricCurrentWatchIPNBus = clientmetric.NewGauge("localbackend_current_watch_ipn_bus") ) @@ -8342,11 +7766,7 @@ func (b *LocalBackend) stateEncrypted() opt.Bool { case version.IsMacAppStore(): return opt.NewBool(true) case version.IsMacSysExt(): - // MacSys still stores its state in plaintext on disk in addition to - // the Keychain. A future release will clean up the on-disk state - // files. - // TODO(#15830): always return true here once MacSys is fully migrated. - sp, _ := b.polc.GetBoolean(pkey.EncryptState, false) + sp, _ := b.polc.GetBoolean(pkey.EncryptState, true) return opt.NewBool(sp) default: // Probably self-compiled tailscaled, we don't use the Keychain diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 7d1c452f30697..33ecb688c52a3 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -30,10 +30,11 @@ import ( "golang.org/x/net/dns/dnsmessage" "tailscale.com/appc" "tailscale.com/appc/appctest" - "tailscale.com/clientupdate" "tailscale.com/control/controlclient" "tailscale.com/drive" "tailscale.com/drive/driveimpl" + "tailscale.com/feature" + _ "tailscale.com/feature/condregister/portmapper" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn" @@ -48,6 +49,7 @@ import ( "tailscale.com/tsd" "tailscale.com/tstest" "tailscale.com/tstest/deptest" + "tailscale.com/types/appctype" "tailscale.com/types/dnstype" "tailscale.com/types/ipproto" "tailscale.com/types/key" @@ -59,6 +61,7 @@ import ( "tailscale.com/types/views" "tailscale.com/util/dnsname" "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/util/set" @@ -72,8 +75,6 @@ import ( "tailscale.com/wgengine/wgcfg" ) -func fakeStoreRoutes(*appc.RouteInfo) error { return nil } - func inRemove(ip netip.Addr) bool { for _, pfx := range removeFromDefaultRoute { if pfx.Contains(ip) { @@ -455,7 +456,8 @@ func (panicOnUseTransport) RoundTrip(*http.Request) (*http.Response, error) { } func newTestLocalBackend(t testing.TB) *LocalBackend { - return newTestLocalBackendWithSys(t, tsd.NewSystem()) + bus := eventbustest.NewBus(t) + return newTestLocalBackendWithSys(t, tsd.NewSystemWithBus(bus)) } // newTestLocalBackendWithSys creates a new LocalBackend with the given tsd.System. @@ -468,7 +470,7 @@ func newTestLocalBackendWithSys(t testing.TB, sys *tsd.System) *LocalBackend { t.Log("Added memory store for testing") } if _, ok := sys.Engine.GetOK(); !ok { - eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } @@ -477,7 +479,9 @@ func newTestLocalBackendWithSys(t testing.TB, sys *tsd.System) *LocalBackend { t.Log("Added fake userspace engine for testing") } if _, ok := sys.Dialer.GetOK(); !ok { - sys.Set(tsdial.NewDialer(netmon.NewStatic())) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(sys.Bus.Get()) + sys.Set(dialer) t.Log("Added static dialer for testing") } lb, err := NewLocalBackend(logf, logid.PublicID{}, sys, 0) @@ -533,7 +537,6 @@ func TestZeroExitNodeViaLocalAPI(t *testing.T) { ExitNodeID: "", }, }, user) - if err != nil { t.Fatalf("enabling first exit node: %v", err) } @@ -543,7 +546,6 @@ func TestZeroExitNodeViaLocalAPI(t *testing.T) { if got, want := pv.InternalExitNodePrior(), tailcfg.StableNodeID(""); got != want { t.Fatalf("unexpected InternalExitNodePrior %q, want: %q", got, want) } - } func TestSetUseExitNodeEnabled(t *testing.T) { @@ -1501,6 +1503,15 @@ func wantExitNodeIDNotify(want tailcfg.StableNodeID) wantedNotification { } } +func wantStateNotify(want ipn.State) wantedNotification { + return wantedNotification{ + name: "State=" + want.String(), + cond: func(_ testing.TB, _ ipnauth.Actor, n *ipn.Notify) bool { + return n.State != nil && *n.State == want + }, + } +} + func TestInternalAndExternalInterfaces(t *testing.T) { type interfacePrefix struct { i netmon.Interface @@ -2304,14 +2315,13 @@ func TestDNSConfigForNetmapForExitNodeConfigs(t *testing.T) { func TestOfferingAppConnector(t *testing.T) { for _, shouldStore := range []bool{false, true} { b := newTestBackend(t) + bus := b.sys.Bus.Get() if b.OfferingAppConnector() { t.Fatal("unexpected offering app connector") } - if shouldStore { - b.appConnector = appc.NewAppConnector(t.Logf, nil, &appc.RouteInfo{}, fakeStoreRoutes) - } else { - b.appConnector = appc.NewAppConnector(t.Logf, nil, nil, nil) - } + b.appConnector = appc.NewAppConnector(appc.Config{ + Logf: t.Logf, EventBus: bus, HasStoredRoutes: shouldStore, + }) if !b.OfferingAppConnector() { t.Fatal("unexpected not offering app connector") } @@ -2361,6 +2371,8 @@ func TestRouterAdvertiserIgnoresContainedRoutes(t *testing.T) { func TestObserveDNSResponse(t *testing.T) { for _, shouldStore := range []bool{false, true} { b := newTestBackend(t) + bus := b.sys.Bus.Get() + w := eventbustest.NewWatcher(t, bus) // ensure no error when no app connector is configured if err := b.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { @@ -2368,22 +2380,30 @@ func TestObserveDNSResponse(t *testing.T) { } rc := &appctest.RouteCollector{} - if shouldStore { - b.appConnector = appc.NewAppConnector(t.Logf, rc, &appc.RouteInfo{}, fakeStoreRoutes) - } else { - b.appConnector = appc.NewAppConnector(t.Logf, rc, nil, nil) - } - b.appConnector.UpdateDomains([]string{"example.com"}) - b.appConnector.Wait(context.Background()) + a := appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + EventBus: bus, + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + a.UpdateDomains([]string{"example.com"}) + a.Wait(t.Context()) + b.appConnector = a if err := b.ObserveDNSResponse(dnsResponse("example.com.", "192.0.0.8")); err != nil { t.Errorf("ObserveDNSResponse: %v", err) } - b.appConnector.Wait(context.Background()) + a.Wait(t.Context()) wantRoutes := []netip.Prefix{netip.MustParsePrefix("192.0.0.8/32")} if !slices.Equal(rc.Routes(), wantRoutes) { t.Fatalf("got routes %v, want %v", rc.Routes(), wantRoutes) } + + if err := eventbustest.Expect(w, + eqUpdate(appctype.RouteUpdate{Advertise: mustPrefix("192.0.0.8/32")}), + ); err != nil { + t.Error(err) + } } } @@ -2534,7 +2554,7 @@ func TestBackfillAppConnectorRoutes(t *testing.T) { // Store the test IP in profile data, but not in Prefs.AdvertiseRoutes. b.ControlKnobs().AppCStoreRoutes.Store(true) - if err := b.storeRouteInfo(&appc.RouteInfo{ + if err := b.storeRouteInfo(appctype.RouteInfo{ Domains: map[string][]netip.Addr{ "example.com": {ip}, }, @@ -2897,7 +2917,7 @@ func TestSetExitNodeIDPolicy(t *testing.T) { if test.prefs == nil { test.prefs = ipn.NewPrefs() } - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) pm.prefs = test.prefs.View() b.currentNode().SetNetMap(test.nm) b.pm = pm @@ -3100,12 +3120,14 @@ func TestAutoExitNodeSetNetInfoCallback(t *testing.T) { b.hostinfo = hi k := key.NewMachine() var cc *mockControl + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(sys.Bus.Get()) opts := controlclient.Options{ ServerURL: "https://example.com", GetMachinePrivateKey: func() (key.MachinePrivate, error) { return k, nil }, - Dialer: tsdial.NewDialer(netmon.NewStatic()), + Dialer: dialer, Logf: b.logf, PolicyClient: polc, } @@ -3501,7 +3523,7 @@ func TestApplySysPolicy(t *testing.T) { wantPrefs.ControlURL = ipn.DefaultControlURL } - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) pm.prefs = usePrefs.View() b := newTestBackend(t, polc) @@ -3619,7 +3641,8 @@ func TestPreferencePolicyInfo(t *testing.T) { prefs := defaultPrefs.AsStruct() pp.set(prefs, tt.initialValue) - sys := tsd.NewSystem() + bus := eventbustest.NewBus(t) + sys := tsd.NewSystemWithBus(bus) sys.PolicyClient.Set(polc) lb := newTestLocalBackendWithSys(t, sys) @@ -3708,7 +3731,7 @@ func TestOnTailnetDefaultAutoUpdate(t *testing.T) { // On platforms that don't support auto-update we can never // transition to auto-updates being enabled. The value should // remain unchanged after onTailnetDefaultAutoUpdate. - if !clientupdate.CanAutoUpdate() { + if !feature.CanAutoUpdate() { want = tt.before } if got := b.pm.CurrentPrefs().AutoUpdate().Apply; got != want { @@ -4902,7 +4925,7 @@ func TestSuggestExitNode(t *testing.T) { allowList = set.SetOf(tt.allowPolicy) } - nb := newNodeBackend(t.Context(), eventbus.New()) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) defer nb.shutdown(errShutdown) nb.SetNetMap(tt.netMap) @@ -5355,7 +5378,7 @@ func TestSuggestExitNodeTrafficSteering(t *testing.T) { tt.netMap.AllCaps = set.SetOf(slices.Collect(caps)) } - nb := newNodeBackend(t.Context(), eventbus.New()) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) defer nb.shutdown(errShutdown) nb.SetNetMap(tt.netMap) @@ -5453,7 +5476,7 @@ func TestEnableAutoUpdates(t *testing.T) { }) // Enabling may fail, depending on which environment we are running this // test in. - wantErr := !clientupdate.CanAutoUpdate() + wantErr := !feature.CanAutoUpdate() gotErr := err != nil if gotErr != wantErr { t.Fatalf("enabling auto-updates: got error: %v (%v); want error: %v", gotErr, err, wantErr) @@ -5484,10 +5507,10 @@ func TestReadWriteRouteInfo(t *testing.T) { b.pm.currentProfile = prof1.View() // set up routeInfo - ri1 := &appc.RouteInfo{} + ri1 := appctype.RouteInfo{} ri1.Wildcards = []string{"1"} - ri2 := &appc.RouteInfo{} + ri2 := appctype.RouteInfo{} ri2.Wildcards = []string{"2"} // read before write @@ -5786,7 +5809,8 @@ func TestNotificationTargetMatch(t *testing.T) { type newTestControlFn func(tb testing.TB, opts controlclient.Options) controlclient.Client func newLocalBackendWithTestControl(t *testing.T, enableLogging bool, newControl newTestControlFn) *LocalBackend { - return newLocalBackendWithSysAndTestControl(t, enableLogging, tsd.NewSystem(), newControl) + bus := eventbustest.NewBus(t) + return newLocalBackendWithSysAndTestControl(t, enableLogging, tsd.NewSystemWithBus(bus), newControl) } func newLocalBackendWithSysAndTestControl(t *testing.T, enableLogging bool, sys *tsd.System, newControl newTestControlFn) *LocalBackend { @@ -5800,7 +5824,7 @@ func newLocalBackendWithSysAndTestControl(t *testing.T, enableLogging bool, sys sys.Set(store) } if _, hasEngine := sys.Engine.GetOK(); !hasEngine { - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } @@ -5813,7 +5837,6 @@ func newLocalBackendWithSysAndTestControl(t *testing.T, enableLogging bool, sys t.Fatalf("NewLocalBackend: %v", err) } t.Cleanup(b.Shutdown) - b.DisablePortMapperForTest() b.SetControlClientGetterForTesting(func(opts controlclient.Options) (controlclient.Client, error) { return newControl(t, opts), nil @@ -5945,7 +5968,6 @@ func (w *notificationWatcher) watch(mask ipn.NotifyWatchOpt, wanted []wantedNoti return true }) - }() <-watchAddedCh } @@ -6123,7 +6145,7 @@ func TestLoginNotifications(t *testing.T) { t.Fatal(err) } - lb.cc.(*mockControl).send(nil, loginURL, false, nil) + lb.cc.(*mockControl).send(sendOpt{url: loginURL}) var wg sync.WaitGroup wg.Add(len(sessions)) @@ -6788,7 +6810,7 @@ func TestSrcCapPacketFilter(t *testing.T) { must.Do(k.UnmarshalText([]byte("nodekey:5c8f86d5fc70d924e55f02446165a5dae8f822994ad26bcf4b08fd841f9bf261"))) controlClient := lb.cc.(*mockControl) - controlClient.send(nil, "", false, &netmap.NetworkMap{ + controlClient.send(sendOpt{nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{ Addresses: []netip.Prefix{netip.MustParsePrefix("1.1.1.1/32")}, }).View(), @@ -6817,7 +6839,7 @@ func TestSrcCapPacketFilter(t *testing.T) { }, }}, }}, - }) + }}) f := lb.GetFilterForTest() res := f.Check(netip.MustParseAddr("2.2.2.2"), netip.MustParseAddr("1.1.1.1"), 22, ipproto.TCP) @@ -6993,10 +7015,10 @@ func TestDisplayMessageIPNBus(t *testing.T) { cc := lb.cc.(*mockControl) // Assert that we are logged in and authorized, and also send our DisplayMessages - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), DisplayMessages: msgs, - }) + }}) // Tell the health tracker that we are in a map poll because // mockControl doesn't tell it @@ -7008,6 +7030,27 @@ func TestDisplayMessageIPNBus(t *testing.T) { } } +func TestHardwareAttested(t *testing.T) { + b := new(LocalBackend) + + // default false + if got := b.HardwareAttested(); got != false { + t.Errorf("HardwareAttested() = %v, want false", got) + } + + // set true + b.SetHardwareAttested() + if got := b.HardwareAttested(); got != true { + t.Errorf("HardwareAttested() = %v, want true after SetHardwareAttested()", got) + } + + // repeat calls are safe; still true + b.SetHardwareAttested() + if got := b.HardwareAttested(); got != true { + t.Errorf("HardwareAttested() = %v, want true after second SetHardwareAttested()", got) + } +} + func TestDeps(t *testing.T) { deptest.DepChecker{ OnImport: func(pkg string) { @@ -7040,3 +7083,41 @@ func toStrings[T ~string](in []T) []string { } return out } + +type textUpdate struct { + Advertise []string + Unadvertise []string +} + +func routeUpdateToText(u appctype.RouteUpdate) textUpdate { + var out textUpdate + for _, p := range u.Advertise { + out.Advertise = append(out.Advertise, p.String()) + } + for _, p := range u.Unadvertise { + out.Unadvertise = append(out.Unadvertise, p.String()) + } + return out +} + +func mustPrefix(ss ...string) (out []netip.Prefix) { + for _, s := range ss { + out = append(out, netip.MustParsePrefix(s)) + } + return +} + +// eqUpdate generates an eventbus test filter that matches an appctype.RouteUpdate +// message equal to want, or reports an error giving a human-readable diff. +// +// TODO(creachadair): This is copied from the appc test package, but we can't +// put it into the appctest package because the appc tests depend on it and +// that makes a cycle. Clean up those tests and put this somewhere common. +func eqUpdate(want appctype.RouteUpdate) func(appctype.RouteUpdate) error { + return func(got appctype.RouteUpdate) error { + if diff := cmp.Diff(routeUpdateToText(got), routeUpdateToText(want)); diff != "" { + return fmt.Errorf("wrong update (-got, +want):\n%s", diff) + } + return nil + } +} diff --git a/ipn/ipnlocal/loglines_test.go b/ipn/ipnlocal/loglines_test.go index 5bea6cabca4c4..d831aa8b075dc 100644 --- a/ipn/ipnlocal/loglines_test.go +++ b/ipn/ipnlocal/loglines_test.go @@ -50,7 +50,7 @@ func TestLocalLogLines(t *testing.T) { sys := tsd.NewSystem() store := new(mem.Store) sys.Set(store) - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatal(err) } diff --git a/ipn/ipnlocal/netstack.go b/ipn/ipnlocal/netstack.go new file mode 100644 index 0000000000000..f7ffd03058879 --- /dev/null +++ b/ipn/ipnlocal/netstack.go @@ -0,0 +1,74 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_netstack + +package ipnlocal + +import ( + "net" + "net/netip" + "time" + + "gvisor.dev/gvisor/pkg/tcpip" + "tailscale.com/types/ptr" +) + +// TCPHandlerForDst returns a TCP handler for connections to dst, or nil if +// no handler is needed. It also returns a list of TCP socket options to +// apply to the socket before calling the handler. +// TCPHandlerForDst is called both for connections to our node's local IP +// as well as to the service IP (quad 100). +func (b *LocalBackend) TCPHandlerForDst(src, dst netip.AddrPort) (handler func(c net.Conn) error, opts []tcpip.SettableSocketOption) { + // First handle internal connections to the service IP + hittingServiceIP := dst.Addr() == magicDNSIP || dst.Addr() == magicDNSIPv6 + if hittingServiceIP { + switch dst.Port() { + case 80: + // TODO(mpminardi): do we want to show an error message if the web client + // has been disabled instead of the more "basic" web UI? + if b.ShouldRunWebClient() { + return b.handleWebClientConn, opts + } + return b.HandleQuad100Port80Conn, opts + case DriveLocalPort: + return b.handleDriveConn, opts + } + } + + if f, ok := hookServeTCPHandlerForVIPService.GetOk(); ok { + if handler := f(b, dst, src); handler != nil { + return handler, opts + } + } + // Then handle external connections to the local IP. + if !b.isLocalIP(dst.Addr()) { + return nil, nil + } + if dst.Port() == 22 && b.ShouldRunSSH() { + // Use a higher keepalive idle time for SSH connections, as they are + // typically long lived and idle connections are more likely to be + // intentional. Ideally we would turn this off entirely, but we can't + // tell the difference between a long lived connection that is idle + // vs a connection that is dead because the peer has gone away. + // We pick 72h as that is typically sufficient for a long weekend. + opts = append(opts, ptr.To(tcpip.KeepaliveIdleOption(72*time.Hour))) + return b.handleSSHConn, opts + } + // TODO(will,sonia): allow customizing web client port ? + if dst.Port() == webClientPort && b.ShouldExposeRemoteWebClient() { + return b.handleWebClientConn, opts + } + if port, ok := b.GetPeerAPIPort(dst.Addr()); ok && dst.Port() == port { + return func(c net.Conn) error { + b.handlePeerAPIConn(src, dst, c) + return nil + }, opts + } + if f, ok := hookTCPHandlerForServe.GetOk(); ok { + if handler := f(b, dst.Port(), src, nil); handler != nil { + return handler, opts + } + } + return nil, nil +} diff --git a/ipn/ipnlocal/network-lock.go b/ipn/ipnlocal/network-lock.go index 10f0cc8278109..4990824453c47 100644 --- a/ipn/ipnlocal/network-lock.go +++ b/ipn/ipnlocal/network-lock.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package ipnlocal import ( @@ -56,6 +58,53 @@ type tkaState struct { filtered []ipnstate.TKAPeer } +func (b *LocalBackend) initTKALocked() error { + cp := b.pm.CurrentProfile() + if cp.ID() == "" { + b.tka = nil + return nil + } + if b.tka != nil { + if b.tka.profile == cp.ID() { + // Already initialized. + return nil + } + // As we're switching profiles, we need to reset the TKA to nil. + b.tka = nil + } + root := b.TailscaleVarRoot() + if root == "" { + b.tka = nil + b.logf("network-lock unavailable; no state directory") + return nil + } + + chonkDir := b.chonkPathLocked() + if _, err := os.Stat(chonkDir); err == nil { + // The directory exists, which means network-lock has been initialized. + storage, err := tka.ChonkDir(chonkDir) + if err != nil { + return fmt.Errorf("opening tailchonk: %v", err) + } + authority, err := tka.Open(storage) + if err != nil { + return fmt.Errorf("initializing tka: %v", err) + } + if err := authority.Compact(storage, tkaCompactionDefaults); err != nil { + b.logf("tka compaction failed: %v", err) + } + + b.tka = &tkaState{ + profile: cp.ID(), + authority: authority, + storage: storage, + } + b.logf("tka initialized at head %x", authority.Head()) + } + + return nil +} + // tkaFilterNetmapLocked checks the signatures on each node key, dropping // nodes from the netmap whose signature does not verify. // diff --git a/ipn/ipnlocal/network-lock_test.go b/ipn/ipnlocal/network-lock_test.go index 443539aecc2cb..c7c4c905f5ca1 100644 --- a/ipn/ipnlocal/network-lock_test.go +++ b/ipn/ipnlocal/network-lock_test.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package ipnlocal import ( @@ -33,6 +35,8 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/persist" "tailscale.com/types/tkatype" + "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" "tailscale.com/util/set" ) @@ -43,12 +47,15 @@ func (f observerFunc) SetControlClientStatus(_ controlclient.Client, s controlcl f(s) } -func fakeControlClient(t *testing.T, c *http.Client) *controlclient.Auto { +func fakeControlClient(t *testing.T, c *http.Client) (*controlclient.Auto, *eventbus.Bus) { hi := hostinfo.New() ni := tailcfg.NetInfo{LinkType: "wired"} hi.NetInfo = &ni + bus := eventbustest.NewBus(t) k := key.NewMachine() + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) opts := controlclient.Options{ ServerURL: "https://example.com", Hostinfo: hi, @@ -58,14 +65,15 @@ func fakeControlClient(t *testing.T, c *http.Client) *controlclient.Auto { HTTPTestClient: c, NoiseTestClient: c, Observer: observerFunc(func(controlclient.Status) {}), - Dialer: tsdial.NewDialer(netmon.NewStatic()), + Dialer: dialer, + Bus: bus, } cc, err := controlclient.NewNoStart(opts) if err != nil { t.Fatal(err) } - return cc + return cc, bus } func fakeNoiseServer(t *testing.T, handler http.HandlerFunc) (*httptest.Server, *http.Client) { @@ -153,8 +161,8 @@ func TestTKAEnablementFlow(t *testing.T) { defer ts.Close() temp := t.TempDir() - cc := fakeControlClient(t, client) - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + cc, bus := fakeControlClient(t, client) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(bus))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -194,7 +202,7 @@ func TestTKADisablementFlow(t *testing.T) { nlPriv := key.NewNLPrivate() key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -262,7 +270,7 @@ func TestTKADisablementFlow(t *testing.T) { })) defer ts.Close() - cc := fakeControlClient(t, client) + cc, _ := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -386,7 +394,7 @@ func TestTKASync(t *testing.T) { t.Run(tc.name, func(t *testing.T) { nodePriv := key.NewNode() nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -513,7 +521,7 @@ func TestTKASync(t *testing.T) { defer ts.Close() // Setup the client. - cc := fakeControlClient(t, client) + cc, _ := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -702,7 +710,7 @@ func TestTKADisable(t *testing.T) { disablementSecret := bytes.Repeat([]byte{0xa5}, 32) nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -764,7 +772,7 @@ func TestTKADisable(t *testing.T) { })) defer ts.Close() - cc := fakeControlClient(t, client) + cc, _ := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -793,7 +801,7 @@ func TestTKASign(t *testing.T) { toSign := key.NewNode() nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -855,7 +863,7 @@ func TestTKASign(t *testing.T) { } })) defer ts.Close() - cc := fakeControlClient(t, client) + cc, _ := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -882,7 +890,7 @@ func TestTKAForceDisable(t *testing.T) { nlPriv := key.NewNLPrivate() key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2} - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -935,7 +943,7 @@ func TestTKAForceDisable(t *testing.T) { })) defer ts.Close() - cc := fakeControlClient(t, client) + cc, _ := fakeControlClient(t, client) sys := tsd.NewSystem() sys.Set(pm.Store()) @@ -980,7 +988,7 @@ func TestTKAAffectedSigs(t *testing.T) { // toSign := key.NewNode() nlPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -1071,7 +1079,7 @@ func TestTKAAffectedSigs(t *testing.T) { } })) defer ts.Close() - cc := fakeControlClient(t, client) + cc, _ := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -1113,7 +1121,7 @@ func TestTKARecoverCompromisedKeyFlow(t *testing.T) { cosignPriv := key.NewNLPrivate() compromisedPriv := key.NewNLPrivate() - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, @@ -1183,7 +1191,7 @@ func TestTKARecoverCompromisedKeyFlow(t *testing.T) { } })) defer ts.Close() - cc := fakeControlClient(t, client) + cc, _ := fakeControlClient(t, client) b := LocalBackend{ varRoot: temp, cc: cc, @@ -1204,7 +1212,7 @@ func TestTKARecoverCompromisedKeyFlow(t *testing.T) { // Cosign using the cosigning key. { - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) must.Do(pm.SetPrefs((&ipn.Prefs{ Persist: &persist.Persist{ PrivateNodeKey: nodePriv, diff --git a/ipn/ipnlocal/node_backend.go b/ipn/ipnlocal/node_backend.go index 4319ed372222f..3408d4cbb325d 100644 --- a/ipn/ipnlocal/node_backend.go +++ b/ipn/ipnlocal/node_backend.go @@ -12,6 +12,7 @@ import ( "sync/atomic" "go4.org/netipx" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/net/dns" "tailscale.com/net/tsaddr" @@ -64,6 +65,8 @@ import ( // Even if they're tied to the local node, instead of moving them here, we should extract the entire feature // into a separate package and have it install proper hooks. type nodeBackend struct { + logf logger.Logf + ctx context.Context // canceled by [nodeBackend.shutdown] ctxCancel context.CancelCauseFunc // cancels ctx @@ -103,9 +106,10 @@ type nodeBackend struct { nodeByAddr map[netip.Addr]tailcfg.NodeID } -func newNodeBackend(ctx context.Context, bus *eventbus.Bus) *nodeBackend { +func newNodeBackend(ctx context.Context, logf logger.Logf, bus *eventbus.Bus) *nodeBackend { ctx, ctxCancel := context.WithCancelCause(ctx) nb := &nodeBackend{ + logf: logf, ctx: ctx, ctxCancel: ctxCancel, eventClient: bus.Client("ipnlocal.nodeBackend"), @@ -258,6 +262,12 @@ func (nb *nodeBackend) PeersForTest() []tailcfg.NodeView { return ret } +func (nb *nodeBackend) CollectServices() bool { + nb.mu.Lock() + defer nb.mu.Unlock() + return nb.netMap != nil && nb.netMap.CollectServices +} + // AppendMatchingPeers returns base with all peers that match pred appended. // // It acquires b.mu to read the netmap but releases it before calling pred. @@ -352,6 +362,40 @@ func (nb *nodeBackend) PeerAPIBase(p tailcfg.NodeView) string { return peerAPIBase(nm, p) } +// PeerIsReachable reports whether the current node can reach p. If the ctx is +// done, this function may return a result based on stale reachability data. +func (nb *nodeBackend) PeerIsReachable(ctx context.Context, p tailcfg.NodeView) bool { + if !nb.SelfHasCap(tailcfg.NodeAttrClientSideReachability) { + // Legacy behavior is to always trust the control plane, which + // isn’t always correct because the peer could be slow to check + // in so that control marks it as offline. + // See tailscale/corp#32686. + return p.Online().Get() + } + + nb.mu.Lock() + nm := nb.netMap + nb.mu.Unlock() + + if self := nm.SelfNode; self.Valid() && self.ID() == p.ID() { + // This node can always reach itself. + return true + } + return nb.peerIsReachable(ctx, p) +} + +func (nb *nodeBackend) peerIsReachable(ctx context.Context, p tailcfg.NodeView) bool { + // TODO(sfllaw): The following does not actually test for client-side + // reachability. This would require a mechanism that tracks whether the + // current node can actually reach this peer, either because they are + // already communicating or because they can ping each other. + // + // Instead, it makes the client ignore p.Online completely. + // + // See tailscale/corp#32686. + return true +} + func nodeIP(n tailcfg.NodeView, pred func(netip.Addr) bool) netip.Addr { for _, pfx := range n.Addresses().All() { if pfx.IsSingleIP() && pred(pfx.Addr()) { @@ -513,13 +557,16 @@ func (nb *nodeBackend) setFilter(f *filter.Filter) { nb.filterPub.Publish(magicsock.FilterUpdate{Filter: f}) } -func (nb *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, logf logger.Logf, versionOS string) *dns.Config { +func (nb *nodeBackend) dnsConfigForNetmap(prefs ipn.PrefsView, selfExpired bool, versionOS string) *dns.Config { nb.mu.Lock() defer nb.mu.Unlock() - return dnsConfigForNetmap(nb.netMap, nb.peers, prefs, selfExpired, logf, versionOS) + return dnsConfigForNetmap(nb.netMap, nb.peers, prefs, selfExpired, nb.logf, versionOS) } func (nb *nodeBackend) exitNodeCanProxyDNS(exitNodeID tailcfg.StableNodeID) (dohURL string, ok bool) { + if !buildfeatures.HasUseExitNode { + return "", false + } nb.mu.Lock() defer nb.mu.Unlock() return exitNodeCanProxyDNS(nb.netMap, nb.peers, exitNodeID) @@ -624,6 +671,9 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. if nm == nil { return nil } + if !buildfeatures.HasDNS { + return &dns.Config{} + } // If the current node's key is expired, then we don't program any DNS // configuration into the operating system. This ensures that if the @@ -756,18 +806,20 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. // If we're using an exit node and that exit node is new enough (1.19.x+) // to run a DoH DNS proxy, then send all our DNS traffic through it, // unless we find resolvers with UseWithExitNode set, in which case we use that. - if dohURL, ok := exitNodeCanProxyDNS(nm, peers, prefs.ExitNodeID()); ok { - filtered := useWithExitNodeResolvers(nm.DNS.Resolvers) - if len(filtered) > 0 { - addDefault(filtered) - } else { - // If no default global resolvers with the override - // are configured, configure the exit node's resolver. - addDefault([]*dnstype.Resolver{{Addr: dohURL}}) - } + if buildfeatures.HasUseExitNode { + if dohURL, ok := exitNodeCanProxyDNS(nm, peers, prefs.ExitNodeID()); ok { + filtered := useWithExitNodeResolvers(nm.DNS.Resolvers) + if len(filtered) > 0 { + addDefault(filtered) + } else { + // If no default global resolvers with the override + // are configured, configure the exit node's resolver. + addDefault([]*dnstype.Resolver{{Addr: dohURL}}) + } - addSplitDNSRoutes(useWithExitNodeRoutes(nm.DNS.Routes)) - return dcfg + addSplitDNSRoutes(useWithExitNodeRoutes(nm.DNS.Routes)) + return dcfg + } } // If the user has set default resolvers ("override local DNS"), prefer to @@ -775,7 +827,7 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg. // node resolvers, use those as the default. if len(nm.DNS.Resolvers) > 0 { addDefault(nm.DNS.Resolvers) - } else { + } else if buildfeatures.HasUseExitNode { if resolvers, ok := wireguardExitNodeDNSResolvers(nm, peers, prefs.ExitNodeID()); ok { addDefault(resolvers) } diff --git a/ipn/ipnlocal/node_backend_test.go b/ipn/ipnlocal/node_backend_test.go index dc67d327c8041..f6698bd4bc920 100644 --- a/ipn/ipnlocal/node_backend_test.go +++ b/ipn/ipnlocal/node_backend_test.go @@ -9,11 +9,15 @@ import ( "testing" "time" + "tailscale.com/tailcfg" + "tailscale.com/tstest" + "tailscale.com/types/netmap" + "tailscale.com/types/ptr" "tailscale.com/util/eventbus" ) func TestNodeBackendReadiness(t *testing.T) { - nb := newNodeBackend(t.Context(), eventbus.New()) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) // The node backend is not ready until [nodeBackend.ready] is called, // and [nodeBackend.Wait] should fail with [context.DeadlineExceeded]. @@ -44,7 +48,7 @@ func TestNodeBackendReadiness(t *testing.T) { } func TestNodeBackendShutdown(t *testing.T) { - nb := newNodeBackend(t.Context(), eventbus.New()) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) shutdownCause := errors.New("test shutdown") @@ -82,7 +86,7 @@ func TestNodeBackendShutdown(t *testing.T) { } func TestNodeBackendReadyAfterShutdown(t *testing.T) { - nb := newNodeBackend(t.Context(), eventbus.New()) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) shutdownCause := errors.New("test shutdown") nb.shutdown(shutdownCause) @@ -94,7 +98,7 @@ func TestNodeBackendReadyAfterShutdown(t *testing.T) { func TestNodeBackendParentContextCancellation(t *testing.T) { ctx, cancelCtx := context.WithCancel(context.Background()) - nb := newNodeBackend(ctx, eventbus.New()) + nb := newNodeBackend(ctx, tstest.WhileTestRunningLogger(t), eventbus.New()) cancelCtx() @@ -111,7 +115,7 @@ func TestNodeBackendParentContextCancellation(t *testing.T) { } func TestNodeBackendConcurrentReadyAndShutdown(t *testing.T) { - nb := newNodeBackend(t.Context(), eventbus.New()) + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) // Calling [nodeBackend.ready] and [nodeBackend.shutdown] concurrently // should not cause issues, and [nodeBackend.Wait] should unblock, @@ -121,3 +125,68 @@ func TestNodeBackendConcurrentReadyAndShutdown(t *testing.T) { nb.Wait(context.Background()) } + +func TestNodeBackendReachability(t *testing.T) { + for _, tc := range []struct { + name string + + // Cap sets [tailcfg.NodeAttrClientSideReachability] on the self + // node. + // + // When disabled, the client relies on the control plane sending + // an accurate peer.Online flag. When enabled, the client + // ignores peer.Online and determines whether it can reach the + // peer node. + cap bool + + peer tailcfg.Node + want bool + }{ + { + name: "disabled/offline", + cap: false, + peer: tailcfg.Node{ + Online: ptr.To(false), + }, + want: false, + }, + { + name: "disabled/online", + cap: false, + peer: tailcfg.Node{ + Online: ptr.To(true), + }, + want: true, + }, + { + name: "enabled/offline", + cap: true, + peer: tailcfg.Node{ + Online: ptr.To(false), + }, + want: true, + }, + { + name: "enabled/online", + cap: true, + peer: tailcfg.Node{ + Online: ptr.To(true), + }, + want: true, + }, + } { + t.Run(tc.name, func(t *testing.T) { + nb := newNodeBackend(t.Context(), tstest.WhileTestRunningLogger(t), eventbus.New()) + nb.netMap = &netmap.NetworkMap{} + if tc.cap { + nb.netMap.AllCaps.Make() + nb.netMap.AllCaps.Add(tailcfg.NodeAttrClientSideReachability) + } + + got := nb.PeerIsReachable(t.Context(), tc.peer.View()) + if got != tc.want { + t.Errorf("got %v, want %v", got, tc.want) + } + }) + } +} diff --git a/ipn/ipnlocal/peerapi.go b/ipn/ipnlocal/peerapi.go index 89554f0ff9eb1..a045086d468fa 100644 --- a/ipn/ipnlocal/peerapi.go +++ b/ipn/ipnlocal/peerapi.go @@ -16,7 +16,6 @@ import ( "net/http" "net/netip" "os" - "path/filepath" "runtime" "slices" "strconv" @@ -26,11 +25,11 @@ import ( "golang.org/x/net/dns/dnsmessage" "golang.org/x/net/http/httpguts" - "tailscale.com/drive" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/hostinfo" - "tailscale.com/ipn" "tailscale.com/net/netaddr" "tailscale.com/net/netmon" "tailscale.com/net/netutil" @@ -39,20 +38,11 @@ import ( "tailscale.com/types/netmap" "tailscale.com/types/views" "tailscale.com/util/clientmetric" - "tailscale.com/util/httpm" "tailscale.com/wgengine/filter" ) -const ( - taildrivePrefix = "/v0/drive" -) - var initListenConfig func(*net.ListenConfig, netip.Addr, *netmon.State, string) error -// addH2C is non-nil on platforms where we want to add H2C -// ("cleartext" HTTP/2) support to the peerAPI. -var addH2C func(*http.Server) - // peerDNSQueryHandler is implemented by tsdns.Resolver. type peerDNSQueryHandler interface { HandlePeerDNSQuery(context.Context, []byte, netip.AddrPort, func(name string) bool) (res []byte, err error) @@ -142,6 +132,9 @@ type peerAPIListener struct { } func (pln *peerAPIListener) Close() error { + if !buildfeatures.HasPeerAPIServer { + return nil + } if pln.ln != nil { return pln.ln.Close() } @@ -149,6 +142,9 @@ func (pln *peerAPIListener) Close() error { } func (pln *peerAPIListener) serve() { + if !buildfeatures.HasPeerAPIServer { + return + } if pln.ln == nil { return } @@ -202,11 +198,11 @@ func (pln *peerAPIListener) ServeConn(src netip.AddrPort, c net.Conn) { peerUser: peerUser, } httpServer := &http.Server{ - Handler: h, - } - if addH2C != nil { - addH2C(httpServer) + Handler: h, + Protocols: new(http.Protocols), } + httpServer.Protocols.SetHTTP1(true) + httpServer.Protocols.SetUnencryptedHTTP2(true) // over WireGuard; "unencrypted" means no TLS go httpServer.Serve(netutil.NewOneConnListener(c, nil)) } @@ -225,6 +221,7 @@ type peerAPIHandler struct { type PeerAPIHandler interface { Peer() tailcfg.NodeView PeerCaps() tailcfg.PeerCapMap + CanDebug() bool // can remote node can debug this node (internal state, etc) Self() tailcfg.NodeView LocalBackend() *LocalBackend IsSelfUntagged() bool // whether the peer is untagged and the same as this user @@ -329,6 +326,9 @@ func peerAPIRequestShouldGetSecurityHeaders(r *http.Request) bool { // // It panics if the path is already registered. func RegisterPeerAPIHandler(path string, f func(PeerAPIHandler, http.ResponseWriter, *http.Request)) { + if !buildfeatures.HasPeerAPIServer { + return + } if _, ok := peerAPIHandlers[path]; ok { panic(fmt.Sprintf("duplicate PeerAPI handler %q", path)) } @@ -347,6 +347,10 @@ var ( ) func (h *peerAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasPeerAPIServer { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } if err := h.validatePeerAPIRequest(r); err != nil { metricInvalidRequests.Add(1) h.logf("invalid request from %v: %v", h.remoteAddr, err) @@ -364,44 +368,35 @@ func (h *peerAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } } - if strings.HasPrefix(r.URL.Path, "/dns-query") { + if buildfeatures.HasDNS && strings.HasPrefix(r.URL.Path, "/dns-query") { metricDNSCalls.Add(1) h.handleDNSQuery(w, r) return } - if strings.HasPrefix(r.URL.Path, taildrivePrefix) { - h.handleServeDrive(w, r) - return - } - switch r.URL.Path { - case "/v0/goroutines": - h.handleServeGoroutines(w, r) - return - case "/v0/env": - h.handleServeEnv(w, r) - return - case "/v0/metrics": - h.handleServeMetrics(w, r) - return - case "/v0/magicsock": - h.handleServeMagicsock(w, r) - return - case "/v0/dnsfwd": - h.handleServeDNSFwd(w, r) - return - case "/v0/interfaces": - h.handleServeInterfaces(w, r) - return - case "/v0/doctor": - h.handleServeDoctor(w, r) - return - case "/v0/sockstats": - h.handleServeSockStats(w, r) - return - case "/v0/ingress": - metricIngressCalls.Add(1) - h.handleServeIngress(w, r) - return + if buildfeatures.HasDebug { + switch r.URL.Path { + case "/v0/goroutines": + h.handleServeGoroutines(w, r) + return + case "/v0/env": + h.handleServeEnv(w, r) + return + case "/v0/metrics": + h.handleServeMetrics(w, r) + return + case "/v0/magicsock": + h.handleServeMagicsock(w, r) + return + case "/v0/dnsfwd": + h.handleServeDNSFwd(w, r) + return + case "/v0/interfaces": + h.handleServeInterfaces(w, r) + return + case "/v0/sockstats": + h.handleServeSockStats(w, r) + return + } } if ph, ok := peerAPIHandlers[r.URL.Path]; ok { ph(h, w, r) @@ -424,67 +419,6 @@ This is my Tailscale device. Your device is %v. } } -func (h *peerAPIHandler) handleServeIngress(w http.ResponseWriter, r *http.Request) { - // http.Errors only useful if hitting endpoint manually - // otherwise rely on log lines when debugging ingress connections - // as connection is hijacked for bidi and is encrypted tls - if !h.canIngress() { - h.logf("ingress: denied; no ingress cap from %v", h.remoteAddr) - http.Error(w, "denied; no ingress cap", http.StatusForbidden) - return - } - logAndError := func(code int, publicMsg string) { - h.logf("ingress: bad request from %v: %s", h.remoteAddr, publicMsg) - http.Error(w, publicMsg, code) - } - bad := func(publicMsg string) { - logAndError(http.StatusBadRequest, publicMsg) - } - if r.Method != "POST" { - logAndError(http.StatusMethodNotAllowed, "only POST allowed") - return - } - srcAddrStr := r.Header.Get("Tailscale-Ingress-Src") - if srcAddrStr == "" { - bad("Tailscale-Ingress-Src header not set") - return - } - srcAddr, err := netip.ParseAddrPort(srcAddrStr) - if err != nil { - bad("Tailscale-Ingress-Src header invalid; want ip:port") - return - } - target := ipn.HostPort(r.Header.Get("Tailscale-Ingress-Target")) - if target == "" { - bad("Tailscale-Ingress-Target header not set") - return - } - if _, _, err := net.SplitHostPort(string(target)); err != nil { - bad("Tailscale-Ingress-Target header invalid; want host:port") - return - } - - getConnOrReset := func() (net.Conn, bool) { - conn, _, err := w.(http.Hijacker).Hijack() - if err != nil { - h.logf("ingress: failed hijacking conn") - http.Error(w, "failed hijacking conn", http.StatusInternalServerError) - return nil, false - } - io.WriteString(conn, "HTTP/1.1 101 Switching Protocols\r\n\r\n") - return &ipn.FunnelConn{ - Conn: conn, - Src: srcAddr, - Target: target, - }, true - } - sendRST := func() { - http.Error(w, "denied", http.StatusForbidden) - } - - h.ps.b.HandleIngressTCPConn(h.peerNode, target, srcAddr, getConnOrReset, sendRST) -} - func (h *peerAPIHandler) handleServeInterfaces(w http.ResponseWriter, r *http.Request) { if !h.canDebug() { http.Error(w, "denied; no debug access", http.StatusForbidden) @@ -532,24 +466,6 @@ func (h *peerAPIHandler) handleServeInterfaces(w http.ResponseWriter, r *http.Re fmt.Fprintln(w, "") } -func (h *peerAPIHandler) handleServeDoctor(w http.ResponseWriter, r *http.Request) { - if !h.canDebug() { - http.Error(w, "denied; no debug access", http.StatusForbidden) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - fmt.Fprintln(w, "

Doctor Output

") - - fmt.Fprintln(w, "
")
-
-	h.ps.b.Doctor(r.Context(), func(format string, args ...any) {
-		line := fmt.Sprintf(format, args...)
-		fmt.Fprintln(w, html.EscapeString(line))
-	})
-
-	fmt.Fprintln(w, "
") -} - func (h *peerAPIHandler) handleServeSockStats(w http.ResponseWriter, r *http.Request) { if !h.canDebug() { http.Error(w, "denied; no debug access", http.StatusForbidden) @@ -648,6 +564,8 @@ func (h *peerAPIHandler) handleServeSockStats(w http.ResponseWriter, r *http.Req fmt.Fprintln(w, "") } +func (h *peerAPIHandler) CanDebug() bool { return h.canDebug() } + // canDebug reports whether h can debug this node (goroutines, metrics, // magicsock internal state, etc). func (h *peerAPIHandler) canDebug() bool { @@ -731,6 +649,10 @@ func (h *peerAPIHandler) handleServeMetrics(w http.ResponseWriter, r *http.Reque } func (h *peerAPIHandler) handleServeDNSFwd(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDNS { + http.NotFound(w, r) + return + } if !h.canDebug() { http.Error(w, "denied; no debug access", http.StatusForbidden) return @@ -744,6 +666,9 @@ func (h *peerAPIHandler) handleServeDNSFwd(w http.ResponseWriter, r *http.Reques } func (h *peerAPIHandler) replyToDNSQueries() bool { + if !buildfeatures.HasDNS { + return false + } if h.isSelf { // If the peer is owned by the same user, just allow it // without further checks. @@ -795,7 +720,7 @@ func (h *peerAPIHandler) replyToDNSQueries() bool { // handleDNSQuery implements a DoH server (RFC 8484) over the peerapi. // It's not over HTTPS as the spec dictates, but rather HTTP-over-WireGuard. func (h *peerAPIHandler) handleDNSQuery(w http.ResponseWriter, r *http.Request) { - if h.ps.resolver == nil { + if !buildfeatures.HasDNS || h.ps.resolver == nil { http.Error(w, "DNS not wired up", http.StatusNotImplemented) return } @@ -836,7 +761,7 @@ func (h *peerAPIHandler) handleDNSQuery(w http.ResponseWriter, r *http.Request) // TODO(raggi): consider pushing the integration down into the resolver // instead to avoid re-parsing the DNS response for improved performance in // the future. - if h.ps.b.OfferingAppConnector() { + if buildfeatures.HasAppConnectors && h.ps.b.OfferingAppConnector() { if err := h.ps.b.ObserveDNSResponse(res); err != nil { h.logf("ObserveDNSResponse error: %v", err) // This is not fatal, we probably just failed to parse the upstream @@ -1018,90 +943,6 @@ func (rbw *requestBodyWrapper) Read(b []byte) (int, error) { return n, err } -func (h *peerAPIHandler) handleServeDrive(w http.ResponseWriter, r *http.Request) { - h.logfv1("taildrive: got %s request from %s", r.Method, h.peerNode.Key().ShortString()) - if !h.ps.b.DriveSharingEnabled() { - h.logf("taildrive: not enabled") - http.Error(w, "taildrive not enabled", http.StatusNotFound) - return - } - - capsMap := h.PeerCaps() - driveCaps, ok := capsMap[tailcfg.PeerCapabilityTaildrive] - if !ok { - h.logf("taildrive: not permitted") - http.Error(w, "taildrive not permitted", http.StatusForbidden) - return - } - - rawPerms := make([][]byte, 0, len(driveCaps)) - for _, cap := range driveCaps { - rawPerms = append(rawPerms, []byte(cap)) - } - - p, err := drive.ParsePermissions(rawPerms) - if err != nil { - h.logf("taildrive: error parsing permissions: %v", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - fs, ok := h.ps.b.sys.DriveForRemote.GetOK() - if !ok { - h.logf("taildrive: not supported on platform") - http.Error(w, "taildrive not supported on platform", http.StatusNotFound) - return - } - wr := &httpResponseWrapper{ - ResponseWriter: w, - } - bw := &requestBodyWrapper{ - ReadCloser: r.Body, - } - r.Body = bw - - defer func() { - switch wr.statusCode { - case 304: - // 304s are particularly chatty so skip logging. - default: - log := h.logf - if r.Method != httpm.PUT && r.Method != httpm.GET { - log = h.logfv1 - } - contentType := "unknown" - if ct := wr.Header().Get("Content-Type"); ct != "" { - contentType = ct - } - - log("taildrive: share: %s from %s to %s: status-code=%d ext=%q content-type=%q tx=%.f rx=%.f", r.Method, h.peerNode.Key().ShortString(), h.selfNode.Key().ShortString(), wr.statusCode, parseDriveFileExtensionForLog(r.URL.Path), contentType, roundTraffic(wr.contentLength), roundTraffic(bw.bytesRead)) - } - }() - - r.URL.Path = strings.TrimPrefix(r.URL.Path, taildrivePrefix) - fs.ServeHTTPWithPerms(p, wr, r) -} - -// parseDriveFileExtensionForLog parses the file extension, if available. -// If a file extension is not present or parsable, the file extension is -// set to "unknown". If the file extension contains a double quote, it is -// replaced with "removed". -// All whitespace is removed from a parsed file extension. -// File extensions including the leading ., e.g. ".gif". -func parseDriveFileExtensionForLog(path string) string { - fileExt := "unknown" - if fe := filepath.Ext(path); fe != "" { - if strings.Contains(fe, "\"") { - // Do not log include file extensions with quotes within them. - return "removed" - } - // Remove white space from user defined inputs. - fileExt = strings.ReplaceAll(fe, " ", "") - } - - return fileExt -} - // peerAPIURL returns an HTTP URL for the peer's peerapi service, // without a trailing slash. // @@ -1194,6 +1035,5 @@ var ( metricInvalidRequests = clientmetric.NewCounter("peerapi_invalid_requests") // Non-debug PeerAPI endpoints. - metricDNSCalls = clientmetric.NewCounter("peerapi_dns") - metricIngressCalls = clientmetric.NewCounter("peerapi_ingress") + metricDNSCalls = clientmetric.NewCounter("peerapi_dns") ) diff --git a/ipn/ipnlocal/peerapi_drive.go b/ipn/ipnlocal/peerapi_drive.go new file mode 100644 index 0000000000000..8dffacd9a2513 --- /dev/null +++ b/ipn/ipnlocal/peerapi_drive.go @@ -0,0 +1,110 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_drive + +package ipnlocal + +import ( + "net/http" + "path/filepath" + "strings" + + "tailscale.com/drive" + "tailscale.com/tailcfg" + "tailscale.com/util/httpm" +) + +const ( + taildrivePrefix = "/v0/drive" +) + +func init() { + peerAPIHandlerPrefixes[taildrivePrefix] = handleServeDrive +} + +func handleServeDrive(hi PeerAPIHandler, w http.ResponseWriter, r *http.Request) { + h := hi.(*peerAPIHandler) + + h.logfv1("taildrive: got %s request from %s", r.Method, h.peerNode.Key().ShortString()) + if !h.ps.b.DriveSharingEnabled() { + h.logf("taildrive: not enabled") + http.Error(w, "taildrive not enabled", http.StatusNotFound) + return + } + + capsMap := h.PeerCaps() + driveCaps, ok := capsMap[tailcfg.PeerCapabilityTaildrive] + if !ok { + h.logf("taildrive: not permitted") + http.Error(w, "taildrive not permitted", http.StatusForbidden) + return + } + + rawPerms := make([][]byte, 0, len(driveCaps)) + for _, cap := range driveCaps { + rawPerms = append(rawPerms, []byte(cap)) + } + + p, err := drive.ParsePermissions(rawPerms) + if err != nil { + h.logf("taildrive: error parsing permissions: %v", err) + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + fs, ok := h.ps.b.sys.DriveForRemote.GetOK() + if !ok { + h.logf("taildrive: not supported on platform") + http.Error(w, "taildrive not supported on platform", http.StatusNotFound) + return + } + wr := &httpResponseWrapper{ + ResponseWriter: w, + } + bw := &requestBodyWrapper{ + ReadCloser: r.Body, + } + r.Body = bw + + defer func() { + switch wr.statusCode { + case 304: + // 304s are particularly chatty so skip logging. + default: + log := h.logf + if r.Method != httpm.PUT && r.Method != httpm.GET { + log = h.logfv1 + } + contentType := "unknown" + if ct := wr.Header().Get("Content-Type"); ct != "" { + contentType = ct + } + + log("taildrive: share: %s from %s to %s: status-code=%d ext=%q content-type=%q tx=%.f rx=%.f", r.Method, h.peerNode.Key().ShortString(), h.selfNode.Key().ShortString(), wr.statusCode, parseDriveFileExtensionForLog(r.URL.Path), contentType, roundTraffic(wr.contentLength), roundTraffic(bw.bytesRead)) + } + }() + + r.URL.Path = strings.TrimPrefix(r.URL.Path, taildrivePrefix) + fs.ServeHTTPWithPerms(p, wr, r) +} + +// parseDriveFileExtensionForLog parses the file extension, if available. +// If a file extension is not present or parsable, the file extension is +// set to "unknown". If the file extension contains a double quote, it is +// replaced with "removed". +// All whitespace is removed from a parsed file extension. +// File extensions including the leading ., e.g. ".gif". +func parseDriveFileExtensionForLog(path string) string { + fileExt := "unknown" + if fe := filepath.Ext(path); fe != "" { + if strings.Contains(fe, "\"") { + // Do not log include file extensions with quotes within them. + return "removed" + } + // Remove white space from user defined inputs. + fileExt = strings.ReplaceAll(fe, " ", "") + } + + return fileExt +} diff --git a/ipn/ipnlocal/peerapi_h2c.go b/ipn/ipnlocal/peerapi_h2c.go deleted file mode 100644 index fbfa8639808ae..0000000000000 --- a/ipn/ipnlocal/peerapi_h2c.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !ios && !android && !js - -package ipnlocal - -import ( - "net/http" - - "golang.org/x/net/http2" - "golang.org/x/net/http2/h2c" -) - -func init() { - addH2C = func(s *http.Server) { - h2s := &http2.Server{} - s.Handler = h2c.NewHandler(s.Handler, h2s) - } -} diff --git a/ipn/ipnlocal/peerapi_test.go b/ipn/ipnlocal/peerapi_test.go index 5654cf27799e2..3c9f57f1fcf6a 100644 --- a/ipn/ipnlocal/peerapi_test.go +++ b/ipn/ipnlocal/peerapi_test.go @@ -23,8 +23,10 @@ import ( "tailscale.com/tailcfg" "tailscale.com/tsd" "tailscale.com/tstest" + "tailscale.com/types/appctype" "tailscale.com/types/logger" "tailscale.com/types/netmap" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" "tailscale.com/util/usermetric" "tailscale.com/wgengine" @@ -194,10 +196,9 @@ func TestPeerAPIReplyToDNSQueries(t *testing.T) { h.isSelf = false h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - sys := tsd.NewSystem() - t.Cleanup(sys.Bus.Get().Close) + sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) - ht := new(health.Tracker) + ht := health.NewTracker(sys.Bus.Get()) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) reg := new(usermetric.Registry) eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) @@ -249,19 +250,18 @@ func TestPeerAPIPrettyReplyCNAME(t *testing.T) { var h peerAPIHandler h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - sys := tsd.NewSystem() - t.Cleanup(sys.Bus.Get().Close) + sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) - ht := new(health.Tracker) + ht := health.NewTracker(sys.Bus.Get()) reg := new(usermetric.Registry) eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) - var a *appc.AppConnector - if shouldStore { - a = appc.NewAppConnector(t.Logf, &appctest.RouteCollector{}, &appc.RouteInfo{}, fakeStoreRoutes) - } else { - a = appc.NewAppConnector(t.Logf, &appctest.RouteCollector{}, nil, nil) - } + a := appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + EventBus: sys.Bus.Get(), + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) sys.Set(pm.Store()) sys.Set(eng) @@ -319,25 +319,25 @@ func TestPeerAPIPrettyReplyCNAME(t *testing.T) { func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { for _, shouldStore := range []bool{false, true} { - ctx := context.Background() var h peerAPIHandler h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - sys := tsd.NewSystem() - t.Cleanup(sys.Bus.Get().Close) + sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) + bw := eventbustest.NewWatcher(t, sys.Bus.Get()) rc := &appctest.RouteCollector{} - ht := new(health.Tracker) + ht := health.NewTracker(sys.Bus.Get()) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) reg := new(usermetric.Registry) eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) - var a *appc.AppConnector - if shouldStore { - a = appc.NewAppConnector(t.Logf, rc, &appc.RouteInfo{}, fakeStoreRoutes) - } else { - a = appc.NewAppConnector(t.Logf, rc, nil, nil) - } + a := appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + EventBus: sys.Bus.Get(), + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) sys.Set(pm.Store()) sys.Set(eng) @@ -347,7 +347,7 @@ func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { h.ps = &peerAPIServer{b: b} h.ps.b.appConnector.UpdateDomains([]string{"example.com"}) - h.ps.b.appConnector.Wait(ctx) + a.Wait(t.Context()) h.ps.resolver = &fakeResolver{build: func(b *dnsmessage.Builder) { b.AResource( @@ -377,12 +377,18 @@ func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) { if w.Code != http.StatusOK { t.Errorf("unexpected status code: %v", w.Code) } - h.ps.b.appConnector.Wait(ctx) + a.Wait(t.Context()) wantRoutes := []netip.Prefix{netip.MustParsePrefix("192.0.0.8/32")} if !slices.Equal(rc.Routes(), wantRoutes) { t.Errorf("got %v; want %v", rc.Routes(), wantRoutes) } + + if err := eventbustest.Expect(bw, + eqUpdate(appctype.RouteUpdate{Advertise: mustPrefix("192.0.0.8/32")}), + ); err != nil { + t.Error(err) + } } } @@ -392,20 +398,21 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { var h peerAPIHandler h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345") - sys := tsd.NewSystem() - t.Cleanup(sys.Bus.Get().Close) + sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) + bw := eventbustest.NewWatcher(t, sys.Bus.Get()) - ht := new(health.Tracker) + ht := health.NewTracker(sys.Bus.Get()) reg := new(usermetric.Registry) rc := &appctest.RouteCollector{} eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0, ht, reg, sys.Bus.Get(), sys.Set) pm := must.Get(newProfileManager(new(mem.Store), t.Logf, ht)) - var a *appc.AppConnector - if shouldStore { - a = appc.NewAppConnector(t.Logf, rc, &appc.RouteInfo{}, fakeStoreRoutes) - } else { - a = appc.NewAppConnector(t.Logf, rc, nil, nil) - } + a := appc.NewAppConnector(appc.Config{ + Logf: t.Logf, + EventBus: sys.Bus.Get(), + RouteAdvertiser: rc, + HasStoredRoutes: shouldStore, + }) + t.Cleanup(a.Close) sys.Set(pm.Store()) sys.Set(eng) @@ -415,7 +422,7 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { h.ps = &peerAPIServer{b: b} h.ps.b.appConnector.UpdateDomains([]string{"www.example.com"}) - h.ps.b.appConnector.Wait(ctx) + a.Wait(ctx) h.ps.resolver = &fakeResolver{build: func(b *dnsmessage.Builder) { b.CNAMEResource( @@ -456,12 +463,18 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) { if w.Code != http.StatusOK { t.Errorf("unexpected status code: %v", w.Code) } - h.ps.b.appConnector.Wait(ctx) + a.Wait(ctx) wantRoutes := []netip.Prefix{netip.MustParsePrefix("192.0.0.8/32")} if !slices.Equal(rc.Routes(), wantRoutes) { t.Errorf("got %v; want %v", rc.Routes(), wantRoutes) } + + if err := eventbustest.Expect(bw, + eqUpdate(appctype.RouteUpdate{Advertise: mustPrefix("192.0.0.8/32")}), + ); err != nil { + t.Error(err) + } } } diff --git a/ipn/ipnlocal/prefs_metrics.go b/ipn/ipnlocal/prefs_metrics.go index fa768ba3ce238..34c5f5504fac4 100644 --- a/ipn/ipnlocal/prefs_metrics.go +++ b/ipn/ipnlocal/prefs_metrics.go @@ -6,6 +6,7 @@ package ipnlocal import ( "errors" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/tailcfg" "tailscale.com/util/clientmetric" @@ -85,6 +86,9 @@ func (e *prefsMetricsEditEvent) record() error { // false otherwise. The caller is responsible for ensuring that the id belongs to // an exit node. func (e *prefsMetricsEditEvent) exitNodeType(id tailcfg.StableNodeID) (props []exitNodeProperty, isNode bool) { + if !buildfeatures.HasUseExitNode { + return nil, false + } var peer tailcfg.NodeView if peer, isNode = e.node.PeerByStableID(id); isNode { diff --git a/ipn/ipnlocal/profiles.go b/ipn/ipnlocal/profiles.go index 1d312cfa606b3..9c217637890cc 100644 --- a/ipn/ipnlocal/profiles.go +++ b/ipn/ipnlocal/profiles.go @@ -13,14 +13,17 @@ import ( "slices" "strings" - "tailscale.com/clientupdate" "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/ipn/ipnext" "tailscale.com/tailcfg" + "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/types/persist" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" ) var debug = envknob.RegisterBool("TS_DEBUG_PROFILES") @@ -644,8 +647,8 @@ func (pm *profileManager) setProfileAsUserDefault(profile ipn.LoginProfileView) return pm.WriteState(k, []byte(profile.Key())) } -func (pm *profileManager) loadSavedPrefs(key ipn.StateKey) (ipn.PrefsView, error) { - bs, err := pm.store.ReadState(key) +func (pm *profileManager) loadSavedPrefs(k ipn.StateKey) (ipn.PrefsView, error) { + bs, err := pm.store.ReadState(k) if err == ipn.ErrStateNotExist || len(bs) == 0 { return defaultPrefs, nil } @@ -653,10 +656,18 @@ func (pm *profileManager) loadSavedPrefs(key ipn.StateKey) (ipn.PrefsView, error return ipn.PrefsView{}, err } savedPrefs := ipn.NewPrefs() + + // if supported by the platform, create an empty hardware attestation key to use when deserializing + // to avoid type exceptions from json.Unmarshaling into an interface{}. + hw, _ := key.NewEmptyHardwareAttestationKey() + savedPrefs.Persist = &persist.Persist{ + AttestationKey: hw, + } + if err := ipn.PrefsFromBytes(bs, savedPrefs); err != nil { return ipn.PrefsView{}, fmt.Errorf("parsing saved prefs: %v", err) } - pm.logf("using backend prefs for %q: %v", key, savedPrefs.Pretty()) + pm.logf("using backend prefs for %q: %v", k, savedPrefs.Pretty()) // Ignore any old stored preferences for https://login.tailscale.com // as the control server that would override the new default of @@ -673,7 +684,7 @@ func (pm *profileManager) loadSavedPrefs(key ipn.StateKey) (ipn.PrefsView, error // cause any EditPrefs calls to fail (other than disabling auto-updates). // // Reset AutoUpdate.Apply if we detect such invalid prefs. - if savedPrefs.AutoUpdate.Apply.EqualBool(true) && !clientupdate.CanAutoUpdate() { + if savedPrefs.AutoUpdate.Apply.EqualBool(true) && !feature.CanAutoUpdate() { savedPrefs.AutoUpdate.Apply.Clear() } @@ -838,7 +849,9 @@ func (pm *profileManager) CurrentPrefs() ipn.PrefsView { // ReadStartupPrefsForTest reads the startup prefs from disk. It is only used for testing. func ReadStartupPrefsForTest(logf logger.Logf, store ipn.StateStore) (ipn.PrefsView, error) { - ht := new(health.Tracker) // in tests, don't care about the health status + bus := eventbus.New() + defer bus.Close() + ht := health.NewTracker(bus) // in tests, don't care about the health status pm, err := newProfileManager(store, logf, ht) if err != nil { return ipn.PrefsView{}, err diff --git a/ipn/ipnlocal/profiles_test.go b/ipn/ipnlocal/profiles_test.go index 52b095be1a5fe..deeab2ade9b15 100644 --- a/ipn/ipnlocal/profiles_test.go +++ b/ipn/ipnlocal/profiles_test.go @@ -12,7 +12,8 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "tailscale.com/clientupdate" + _ "tailscale.com/clientupdate" // for feature registration side effects + "tailscale.com/feature" "tailscale.com/health" "tailscale.com/ipn" "tailscale.com/ipn/store/mem" @@ -20,13 +21,14 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/persist" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" ) func TestProfileCurrentUserSwitch(t *testing.T) { store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -63,7 +65,7 @@ func TestProfileCurrentUserSwitch(t *testing.T) { t.Fatalf("CurrentPrefs() = %v, want emptyPrefs", pm.CurrentPrefs().Pretty()) } - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -81,7 +83,7 @@ func TestProfileCurrentUserSwitch(t *testing.T) { func TestProfileList(t *testing.T) { store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -149,6 +151,7 @@ func TestProfileDupe(t *testing.T) { ID: tailcfg.UserID(user), LoginName: fmt.Sprintf("user%d@example.com", user), }, + AttestationKey: nil, } } user1Node1 := newPersist(1, 1) @@ -285,7 +288,7 @@ func TestProfileDupe(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -318,7 +321,7 @@ func TestProfileDupe(t *testing.T) { func TestProfileManagement(t *testing.T) { store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -416,7 +419,7 @@ func TestProfileManagement(t *testing.T) { t.Logf("Recreate profile manager from store") // Recreate the profile manager to ensure that it can load the profiles // from the store at startup. - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -432,7 +435,7 @@ func TestProfileManagement(t *testing.T) { t.Logf("Recreate profile manager from store after deleting default profile") // Recreate the profile manager to ensure that it can load the profiles // from the store at startup. - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -463,7 +466,7 @@ func TestProfileManagement(t *testing.T) { wantCurProfile = "user@2.example.com" checkProfiles(t) - if !clientupdate.CanAutoUpdate() { + if !feature.CanAutoUpdate() { t.Logf("Save an invalid AutoUpdate pref value") prefs := pm.CurrentPrefs().AsStruct() prefs.AutoUpdate.Apply.Set(true) @@ -474,7 +477,7 @@ func TestProfileManagement(t *testing.T) { t.Fatal("SetPrefs failed to save auto-update setting") } // Re-load profiles to trigger migration for invalid auto-update value. - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatal(err) } @@ -496,7 +499,7 @@ func TestProfileManagementWindows(t *testing.T) { store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "windows") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "windows") if err != nil { t.Fatal(err) } @@ -565,7 +568,7 @@ func TestProfileManagementWindows(t *testing.T) { t.Logf("Recreate profile manager from store, should reset prefs") // Recreate the profile manager to ensure that it can load the profiles // from the store at startup. - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "windows") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "windows") if err != nil { t.Fatal(err) } @@ -588,7 +591,7 @@ func TestProfileManagementWindows(t *testing.T) { } // Recreate the profile manager to ensure that it starts with test profile. - pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "windows") + pm, err = newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "windows") if err != nil { t.Fatal(err) } @@ -1091,7 +1094,7 @@ func TestProfileStateChangeCallback(t *testing.T) { t.Parallel() store := new(mem.Store) - pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux") + pm, err := newProfileManagerWithGOOS(store, logger.Discard, health.NewTracker(eventbustest.NewBus(t)), "linux") if err != nil { t.Fatalf("newProfileManagerWithGOOS: %v", err) } diff --git a/ipn/ipnlocal/serve.go b/ipn/ipnlocal/serve.go index 36738b88119f5..3c967fd1e6403 100644 --- a/ipn/ipnlocal/serve.go +++ b/ipn/ipnlocal/serve.go @@ -1,6 +1,10 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_serve + +// TODO: move this whole file to its own package, out of ipnlocal. + package ipnlocal import ( @@ -12,6 +16,7 @@ import ( "errors" "fmt" "io" + "maps" "mime" "net" "net/http" @@ -28,19 +33,34 @@ import ( "time" "unicode/utf8" - "golang.org/x/net/http2" + "go4.org/mem" "tailscale.com/ipn" - "tailscale.com/logtail/backoff" "tailscale.com/net/netutil" "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/types/lazy" "tailscale.com/types/logger" + "tailscale.com/util/backoff" + "tailscale.com/util/clientmetric" "tailscale.com/util/ctxkey" "tailscale.com/util/mak" + "tailscale.com/util/slicesx" "tailscale.com/version" ) +func init() { + hookServeTCPHandlerForVIPService.Set((*LocalBackend).tcpHandlerForVIPService) + hookTCPHandlerForServe.Set((*LocalBackend).tcpHandlerForServe) + hookServeUpdateServeTCPPortNetMapAddrListenersLocked.Set((*LocalBackend).updateServeTCPPortNetMapAddrListenersLocked) + + hookServeSetTCPPortsInterceptedFromNetmapAndPrefsLocked.Set(serveSetTCPPortsInterceptedFromNetmapAndPrefsLocked) + hookServeClearVIPServicesTCPPortsInterceptedLocked.Set(func(b *LocalBackend) { + b.setVIPServicesTCPPortsInterceptedLocked(nil) + }) + + RegisterC2N("GET /vip-services", handleC2NVIPServicesGet) +} + const ( contentTypeHeader = "Content-Type" grpcBaseContentType = "application/grpc" @@ -222,6 +242,10 @@ func (s *localListener) handleListenersAccept(ln net.Listener) error { // // b.mu must be held. func (b *LocalBackend) updateServeTCPPortNetMapAddrListenersLocked(ports []uint16) { + if b.sys.IsNetstack() { + // don't listen on netmap addresses if we're in userspace mode + return + } // close existing listeners where port // is no longer in incoming ports list for ap, sl := range b.serveListeners { @@ -439,6 +463,38 @@ func (b *LocalBackend) HandleIngressTCPConn(ingressPeer tailcfg.NodeView, target handler(c) } +func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcfg.VIPService { + // keyed by service name + var services map[tailcfg.ServiceName]*tailcfg.VIPService + if b.serveConfig.Valid() { + for svc, config := range b.serveConfig.Services().All() { + mak.Set(&services, svc, &tailcfg.VIPService{ + Name: svc, + Ports: config.ServicePortRange(), + }) + } + } + + for _, s := range prefs.AdvertiseServices().All() { + sn := tailcfg.ServiceName(s) + if services == nil || services[sn] == nil { + mak.Set(&services, sn, &tailcfg.VIPService{ + Name: sn, + }) + } + services[sn].Active = true + } + + servicesList := slicesx.MapValues(services) + // [slicesx.MapValues] provides the values in an indeterminate order, but since we'll + // be hashing a representation of this list later we want it to be in a consistent + // order. + slices.SortFunc(servicesList, func(a, b *tailcfg.VIPService) int { + return strings.Compare(a.Name.String(), b.Name.String()) + }) + return servicesList +} + // tcpHandlerForVIPService returns a handler for a TCP connection to a VIP service // that is being served via the ipn.ServeConfig. It returns nil if the destination // address is not a VIP service or if the VIP service does not have a TCP handler set. @@ -704,8 +760,8 @@ type reverseProxy struct { insecure bool backend string lb *LocalBackend - httpTransport lazy.SyncValue[*http.Transport] // transport for non-h2c backends - h2cTransport lazy.SyncValue[*http2.Transport] // transport for h2c backends + httpTransport lazy.SyncValue[*http.Transport] // transport for non-h2c backends + h2cTransport lazy.SyncValue[*http.Transport] // transport for h2c backends // closed tracks whether proxy is closed/currently closing. closed atomic.Bool } @@ -713,9 +769,7 @@ type reverseProxy struct { // close ensures that any open backend connections get closed. func (rp *reverseProxy) close() { rp.closed.Store(true) - if h2cT := rp.h2cTransport.Get(func() *http2.Transport { - return nil - }); h2cT != nil { + if h2cT := rp.h2cTransport.Get(func() *http.Transport { return nil }); h2cT != nil { h2cT.CloseIdleConnections() } if httpTransport := rp.httpTransport.Get(func() *http.Transport { @@ -786,14 +840,17 @@ func (rp *reverseProxy) getTransport() *http.Transport { // getH2CTransport returns the Transport used for GRPC requests to the backend. // The Transport gets created lazily, at most once. -func (rp *reverseProxy) getH2CTransport() *http2.Transport { - return rp.h2cTransport.Get(func() *http2.Transport { - return &http2.Transport{ - AllowHTTP: true, - DialTLSContext: func(ctx context.Context, network string, addr string, _ *tls.Config) (net.Conn, error) { +func (rp *reverseProxy) getH2CTransport() http.RoundTripper { + return rp.h2cTransport.Get(func() *http.Transport { + var p http.Protocols + p.SetUnencryptedHTTP2(true) + tr := &http.Transport{ + Protocols: &p, + DialTLSContext: func(ctx context.Context, network string, addr string) (net.Conn, error) { return rp.lb.dialer.SystemDial(ctx, "tcp", rp.url.Host) }, } + return tr }) } @@ -1046,3 +1103,278 @@ func (b *LocalBackend) getTLSServeCertForPort(port uint16, forVIPService tailcfg return &cert, nil } } + +// setServeProxyHandlersLocked ensures there is an http proxy handler for each +// backend specified in serveConfig. It expects serveConfig to be valid and +// up-to-date, so should be called after reloadServeConfigLocked. +func (b *LocalBackend) setServeProxyHandlersLocked() { + if !b.serveConfig.Valid() { + return + } + var backends map[string]bool + for _, conf := range b.serveConfig.Webs() { + for _, h := range conf.Handlers().All() { + backend := h.Proxy() + if backend == "" { + // Only create proxy handlers for servers with a proxy backend. + continue + } + mak.Set(&backends, backend, true) + if _, ok := b.serveProxyHandlers.Load(backend); ok { + continue + } + + b.logf("serve: creating a new proxy handler for %s", backend) + p, err := b.proxyHandlerForBackend(backend) + if err != nil { + // The backend endpoint (h.Proxy) should have been validated by expandProxyTarget + // in the CLI, so just log the error here. + b.logf("[unexpected] could not create proxy for %v: %s", backend, err) + continue + } + b.serveProxyHandlers.Store(backend, p) + } + } + + // Clean up handlers for proxy backends that are no longer present + // in configuration. + b.serveProxyHandlers.Range(func(key, value any) bool { + backend := key.(string) + if !backends[backend] { + b.logf("serve: closing idle connections to %s", backend) + b.serveProxyHandlers.Delete(backend) + value.(*reverseProxy).close() + } + return true + }) +} + +// VIPServices returns the list of tailnet services that this node +// is serving as a destination for. +// The returned memory is owned by the caller. +func (b *LocalBackend) VIPServices() []*tailcfg.VIPService { + b.mu.Lock() + defer b.mu.Unlock() + return b.vipServicesFromPrefsLocked(b.pm.CurrentPrefs()) +} + +func handleC2NVIPServicesGet(b *LocalBackend, w http.ResponseWriter, r *http.Request) { + b.logf("c2n: GET /vip-services received") + var res tailcfg.C2NVIPServicesResponse + res.VIPServices = b.VIPServices() + res.ServicesHash = b.vipServiceHash(res.VIPServices) + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} + +var metricIngressCalls = clientmetric.NewCounter("peerapi_ingress") + +func init() { + RegisterPeerAPIHandler("/v0/ingress", handleServeIngress) + +} + +func handleServeIngress(ph PeerAPIHandler, w http.ResponseWriter, r *http.Request) { + h := ph.(*peerAPIHandler) + metricIngressCalls.Add(1) + + // http.Errors only useful if hitting endpoint manually + // otherwise rely on log lines when debugging ingress connections + // as connection is hijacked for bidi and is encrypted tls + if !h.canIngress() { + h.logf("ingress: denied; no ingress cap from %v", h.remoteAddr) + http.Error(w, "denied; no ingress cap", http.StatusForbidden) + return + } + logAndError := func(code int, publicMsg string) { + h.logf("ingress: bad request from %v: %s", h.remoteAddr, publicMsg) + http.Error(w, publicMsg, code) + } + bad := func(publicMsg string) { + logAndError(http.StatusBadRequest, publicMsg) + } + if r.Method != "POST" { + logAndError(http.StatusMethodNotAllowed, "only POST allowed") + return + } + srcAddrStr := r.Header.Get("Tailscale-Ingress-Src") + if srcAddrStr == "" { + bad("Tailscale-Ingress-Src header not set") + return + } + srcAddr, err := netip.ParseAddrPort(srcAddrStr) + if err != nil { + bad("Tailscale-Ingress-Src header invalid; want ip:port") + return + } + target := ipn.HostPort(r.Header.Get("Tailscale-Ingress-Target")) + if target == "" { + bad("Tailscale-Ingress-Target header not set") + return + } + if _, _, err := net.SplitHostPort(string(target)); err != nil { + bad("Tailscale-Ingress-Target header invalid; want host:port") + return + } + + getConnOrReset := func() (net.Conn, bool) { + conn, _, err := w.(http.Hijacker).Hijack() + if err != nil { + h.logf("ingress: failed hijacking conn") + http.Error(w, "failed hijacking conn", http.StatusInternalServerError) + return nil, false + } + io.WriteString(conn, "HTTP/1.1 101 Switching Protocols\r\n\r\n") + return &ipn.FunnelConn{ + Conn: conn, + Src: srcAddr, + Target: target, + }, true + } + sendRST := func() { + http.Error(w, "denied", http.StatusForbidden) + } + + h.ps.b.HandleIngressTCPConn(h.peerNode, target, srcAddr, getConnOrReset, sendRST) +} + +// wantIngressLocked reports whether this node has ingress configured. This bool +// is sent to the coordination server (in Hostinfo.WireIngress) as an +// optimization hint to know primarily which nodes are NOT using ingress, to +// avoid doing work for regular nodes. +// +// Even if the user's ServeConfig.AllowFunnel map was manually edited in raw +// mode and contains map entries with false values, sending true (from Len > 0) +// is still fine. This is only an optimization hint for the control plane and +// doesn't affect security or correctness. And we also don't expect people to +// modify their ServeConfig in raw mode. +func (b *LocalBackend) wantIngressLocked() bool { + return b.serveConfig.Valid() && b.serveConfig.HasAllowFunnel() +} + +// hasIngressEnabledLocked reports whether the node has any funnel endpoint enabled. This bool is sent to control (in +// Hostinfo.IngressEnabled) to determine whether 'Funnel' badge should be displayed on this node in the admin panel. +func (b *LocalBackend) hasIngressEnabledLocked() bool { + return b.serveConfig.Valid() && b.serveConfig.IsFunnelOn() +} + +// shouldWireInactiveIngressLocked reports whether the node is in a state where funnel is not actively enabled, but it +// seems that it is intended to be used with funnel. +func (b *LocalBackend) shouldWireInactiveIngressLocked() bool { + return b.serveConfig.Valid() && !b.hasIngressEnabledLocked() && b.wantIngressLocked() +} + +func serveSetTCPPortsInterceptedFromNetmapAndPrefsLocked(b *LocalBackend, prefs ipn.PrefsView) (handlePorts []uint16) { + var vipServicesPorts map[tailcfg.ServiceName][]uint16 + + b.reloadServeConfigLocked(prefs) + if b.serveConfig.Valid() { + servePorts := make([]uint16, 0, 3) + for port := range b.serveConfig.TCPs() { + if port > 0 { + servePorts = append(servePorts, uint16(port)) + } + } + handlePorts = append(handlePorts, servePorts...) + + for svc, cfg := range b.serveConfig.Services().All() { + servicePorts := make([]uint16, 0, 3) + for port := range cfg.TCP().All() { + if port > 0 { + servicePorts = append(servicePorts, uint16(port)) + } + } + if _, ok := vipServicesPorts[svc]; !ok { + mak.Set(&vipServicesPorts, svc, servicePorts) + } else { + mak.Set(&vipServicesPorts, svc, append(vipServicesPorts[svc], servicePorts...)) + } + } + + b.setServeProxyHandlersLocked() + + // don't listen on netmap addresses if we're in userspace mode + if !b.sys.IsNetstack() { + b.updateServeTCPPortNetMapAddrListenersLocked(servePorts) + } + } + + b.setVIPServicesTCPPortsInterceptedLocked(vipServicesPorts) + + return handlePorts +} + +// reloadServeConfigLocked reloads the serve config from the store or resets the +// serve config to nil if not logged in. The "changed" parameter, when false, instructs +// the method to only run the reset-logic and not reload the store from memory to ensure +// foreground sessions are not removed if they are not saved on disk. +func (b *LocalBackend) reloadServeConfigLocked(prefs ipn.PrefsView) { + if !b.currentNode().Self().Valid() || !prefs.Valid() || b.pm.CurrentProfile().ID() == "" { + // We're not logged in, so we don't have a profile. + // Don't try to load the serve config. + b.lastServeConfJSON = mem.B(nil) + b.serveConfig = ipn.ServeConfigView{} + return + } + + confKey := ipn.ServeConfigKey(b.pm.CurrentProfile().ID()) + // TODO(maisem,bradfitz): prevent reading the config from disk + // if the profile has not changed. + confj, err := b.store.ReadState(confKey) + if err != nil { + b.lastServeConfJSON = mem.B(nil) + b.serveConfig = ipn.ServeConfigView{} + return + } + if b.lastServeConfJSON.Equal(mem.B(confj)) { + return + } + b.lastServeConfJSON = mem.B(confj) + var conf ipn.ServeConfig + if err := json.Unmarshal(confj, &conf); err != nil { + b.logf("invalid ServeConfig %q in StateStore: %v", confKey, err) + b.serveConfig = ipn.ServeConfigView{} + return + } + + // remove inactive sessions + maps.DeleteFunc(conf.Foreground, func(sessionID string, sc *ipn.ServeConfig) bool { + _, ok := b.notifyWatchers[sessionID] + return !ok + }) + + b.serveConfig = conf.View() +} + +func (b *LocalBackend) setVIPServicesTCPPortsInterceptedLocked(svcPorts map[tailcfg.ServiceName][]uint16) { + if len(svcPorts) == 0 { + b.shouldInterceptVIPServicesTCPPortAtomic.Store(func(netip.AddrPort) bool { return false }) + return + } + nm := b.currentNode().NetMap() + if nm == nil { + b.logf("can't set intercept function for Service TCP Ports, netMap is nil") + return + } + vipServiceIPMap := nm.GetVIPServiceIPMap() + if len(vipServiceIPMap) == 0 { + // No approved VIP Services + return + } + + svcAddrPorts := make(map[netip.Addr]func(uint16) bool) + // Only set the intercept function if the service has been assigned a VIP. + for svcName, ports := range svcPorts { + addrs, ok := vipServiceIPMap[svcName] + if !ok { + continue + } + interceptFn := generateInterceptTCPPortFunc(ports) + for _, addr := range addrs { + svcAddrPorts[addr] = interceptFn + } + } + + b.shouldInterceptVIPServicesTCPPortAtomic.Store(generateInterceptVIPServicesTCPPortFunc(svcAddrPorts)) +} diff --git a/ipn/ipnlocal/serve_disabled.go b/ipn/ipnlocal/serve_disabled.go new file mode 100644 index 0000000000000..a97112941d844 --- /dev/null +++ b/ipn/ipnlocal/serve_disabled.go @@ -0,0 +1,34 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_serve + +// These are temporary (2025-09-13) stubs for when tailscaled is built with the +// ts_omit_serve build tag, disabling serve. +// +// TODO: move serve to a separate package, out of ipnlocal, and delete this +// file. One step at a time. + +package ipnlocal + +import ( + "tailscale.com/ipn" + "tailscale.com/tailcfg" +) + +const serveEnabled = false + +type localListener = struct{} + +func (b *LocalBackend) DeleteForegroundSession(sessionID string) error { + return nil +} + +type funnelFlow = struct{} + +func (*LocalBackend) hasIngressEnabledLocked() bool { return false } +func (*LocalBackend) shouldWireInactiveIngressLocked() bool { return false } + +func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcfg.VIPService { + return nil +} diff --git a/ipn/ipnlocal/serve_test.go b/ipn/ipnlocal/serve_test.go index e2561cba9ef22..b4461d12f2ad0 100644 --- a/ipn/ipnlocal/serve_test.go +++ b/ipn/ipnlocal/serve_test.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_serve + package ipnlocal import ( @@ -13,6 +15,7 @@ import ( "encoding/json" "errors" "fmt" + "io" "net/http" "net/http/httptest" "net/netip" @@ -33,6 +36,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/types/netmap" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/util/syspolicy/policyclient" @@ -240,11 +244,15 @@ func TestServeConfigForeground(t *testing.T) { err := b.SetServeConfig(&ipn.ServeConfig{ Foreground: map[string]*ipn.ServeConfig{ - session1: {TCP: map[uint16]*ipn.TCPPortHandler{ - 443: {TCPForward: "http://localhost:3000"}}, + session1: { + TCP: map[uint16]*ipn.TCPPortHandler{ + 443: {TCPForward: "http://localhost:3000"}, + }, }, - session2: {TCP: map[uint16]*ipn.TCPPortHandler{ - 999: {TCPForward: "http://localhost:4000"}}, + session2: { + TCP: map[uint16]*ipn.TCPPortHandler{ + 999: {TCPForward: "http://localhost:4000"}, + }, }, }, }, "") @@ -267,8 +275,10 @@ func TestServeConfigForeground(t *testing.T) { 5000: {TCPForward: "http://localhost:5000"}, }, Foreground: map[string]*ipn.ServeConfig{ - session2: {TCP: map[uint16]*ipn.TCPPortHandler{ - 999: {TCPForward: "http://localhost:4000"}}, + session2: { + TCP: map[uint16]*ipn.TCPPortHandler{ + 999: {TCPForward: "http://localhost:4000"}, + }, }, }, }, "") @@ -491,7 +501,6 @@ func TestServeConfigServices(t *testing.T) { } }) } - } func TestServeConfigETag(t *testing.T) { @@ -659,6 +668,7 @@ func TestServeHTTPProxyPath(t *testing.T) { }) } } + func TestServeHTTPProxyHeaders(t *testing.T) { b := newTestBackend(t) @@ -859,7 +869,6 @@ func Test_reverseProxyConfiguration(t *testing.T) { wantsURL: mustCreateURL(t, "https://example3.com"), }, }) - } func mustCreateURL(t *testing.T, u string) url.URL { @@ -873,12 +882,13 @@ func mustCreateURL(t *testing.T, u string) url.URL { func newTestBackend(t *testing.T, opts ...any) *LocalBackend { var logf logger.Logf = logger.Discard - const debug = true + const debug = false if debug { logf = logger.WithPrefix(tstest.WhileTestRunningLogger(t), "... ") } - sys := tsd.NewSystem() + bus := eventbustest.NewBus(t) + sys := tsd.NewSystemWithBus(bus) for _, o := range opts { switch v := o.(type) { @@ -891,7 +901,7 @@ func newTestBackend(t *testing.T, opts ...any) *LocalBackend { e, err := wgengine.NewUserspaceEngine(logf, wgengine.Config{ SetSubsystem: sys.Set, - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), EventBus: sys.Bus.Get(), }) @@ -909,7 +919,7 @@ func newTestBackend(t *testing.T, opts ...any) *LocalBackend { dir := t.TempDir() b.SetVarRoot(dir) - pm := must.Get(newProfileManager(new(mem.Store), logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), logf, health.NewTracker(bus))) pm.currentProfile = (&ipn.LoginProfile{ID: "id0"}).View() b.pm = pm @@ -952,13 +962,13 @@ func newTestBackend(t *testing.T, opts ...any) *LocalBackend { func TestServeFileOrDirectory(t *testing.T) { td := t.TempDir() writeFile := func(suffix, contents string) { - if err := os.WriteFile(filepath.Join(td, suffix), []byte(contents), 0600); err != nil { + if err := os.WriteFile(filepath.Join(td, suffix), []byte(contents), 0o600); err != nil { t.Fatal(err) } } writeFile("foo", "this is foo") writeFile("bar", "this is bar") - os.MkdirAll(filepath.Join(td, "subdir"), 0700) + os.MkdirAll(filepath.Join(td, "subdir"), 0o700) writeFile("subdir/file-a", "this is A") writeFile("subdir/file-b", "this is B") writeFile("subdir/file-c", "this is C") @@ -1076,3 +1086,88 @@ func TestEncTailscaleHeaderValue(t *testing.T) { } } } + +func TestServeGRPCProxy(t *testing.T) { + const msg = "some-response\n" + backend := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Path-Was", r.RequestURI) + w.Header().Set("Proto-Was", r.Proto) + io.WriteString(w, msg) + })) + backend.EnableHTTP2 = true + backend.Config.Protocols = new(http.Protocols) + backend.Config.Protocols.SetHTTP1(true) + backend.Config.Protocols.SetUnencryptedHTTP2(true) + backend.Start() + defer backend.Close() + + backendURL := must.Get(url.Parse(backend.URL)) + + lb := newTestBackend(t) + rp := &reverseProxy{ + logf: t.Logf, + url: backendURL, + backend: backend.URL, + lb: lb, + } + + req := func(method, urlStr string, opt ...any) *http.Request { + req := httptest.NewRequest(method, urlStr, nil) + for _, o := range opt { + switch v := o.(type) { + case int: + req.ProtoMajor = v + case string: + req.Header.Set("Content-Type", v) + default: + panic(fmt.Sprintf("unsupported option type %T", v)) + } + } + return req + } + + tests := []struct { + name string + req *http.Request + wantPath string + wantProto string + wantBody string + }{ + { + name: "non-gRPC", + req: req("GET", "http://foo/bar"), + wantPath: "/bar", + wantProto: "HTTP/1.1", + }, + { + name: "gRPC-but-not-http2", + req: req("GET", "http://foo/bar", "application/grpc"), + wantPath: "/bar", + wantProto: "HTTP/1.1", + }, + { + name: "gRPC--http2", + req: req("GET", "http://foo/bar", 2, "application/grpc"), + wantPath: "/bar", + wantProto: "HTTP/2.0", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rec := httptest.NewRecorder() + rp.ServeHTTP(rec, tt.req) + + res := rec.Result() + got := must.Get(io.ReadAll(res.Body)) + if got, want := res.Header.Get("Path-Was"), tt.wantPath; want != got { + t.Errorf("Path-Was %q, want %q", got, want) + } + if got, want := res.Header.Get("Proto-Was"), tt.wantProto; want != got { + t.Errorf("Proto-Was %q, want %q", got, want) + } + if string(got) != msg { + t.Errorf("got body %q, want %q", got, msg) + } + }) + } +} diff --git a/ipn/ipnlocal/ssh.go b/ipn/ipnlocal/ssh.go index e48b1f2f1286e..e2c2f50671386 100644 --- a/ipn/ipnlocal/ssh.go +++ b/ipn/ipnlocal/ssh.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build (linux && !android) || (darwin && !ios) || freebsd || openbsd || plan9 +//go:build ((linux && !android) || (darwin && !ios) || freebsd || openbsd || plan9) && !ts_omit_ssh package ipnlocal diff --git a/ipn/ipnlocal/ssh_stub.go b/ipn/ipnlocal/ssh_stub.go index d129084e4c10c..6b2e36015c2d7 100644 --- a/ipn/ipnlocal/ssh_stub.go +++ b/ipn/ipnlocal/ssh_stub.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build ios || android || (!linux && !darwin && !freebsd && !openbsd && !plan9) +//go:build ts_omit_ssh || ios || android || (!linux && !darwin && !freebsd && !openbsd && !plan9) package ipnlocal diff --git a/ipn/ipnlocal/ssh_test.go b/ipn/ipnlocal/ssh_test.go index 6e93b34f05019..b24cd6732f605 100644 --- a/ipn/ipnlocal/ssh_test.go +++ b/ipn/ipnlocal/ssh_test.go @@ -13,6 +13,7 @@ import ( "tailscale.com/health" "tailscale.com/ipn/store/mem" "tailscale.com/tailcfg" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" ) @@ -50,7 +51,7 @@ type fakeSSHServer struct { } func TestGetSSHUsernames(t *testing.T) { - pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker))) + pm := must.Get(newProfileManager(new(mem.Store), t.Logf, health.NewTracker(eventbustest.NewBus(t)))) b := &LocalBackend{pm: pm, store: pm.Store()} b.sshServer = fakeSSHServer{} res, err := b.getSSHUsernames(new(tailcfg.C2NSSHUsernamesRequest)) diff --git a/ipn/ipnlocal/state_test.go b/ipn/ipnlocal/state_test.go index 4097a37735b5c..fca01f1056fcb 100644 --- a/ipn/ipnlocal/state_test.go +++ b/ipn/ipnlocal/state_test.go @@ -7,6 +7,7 @@ import ( "context" "errors" "fmt" + "math/rand/v2" "net/netip" "strings" "sync" @@ -39,6 +40,7 @@ import ( "tailscale.com/types/persist" "tailscale.com/types/preftype" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/wgengine" @@ -57,8 +59,9 @@ type notifyThrottler struct { // ch gets replaced frequently. Lock the mutex before getting or // setting it, but not while waiting on it. - mu sync.Mutex - ch chan ipn.Notify + mu sync.Mutex + ch chan ipn.Notify + putErr error // set by put if the channel is full } // expect tells the throttler to expect count upcoming notifications. @@ -79,7 +82,11 @@ func (nt *notifyThrottler) put(n ipn.Notify) { case ch <- n: return default: - nt.t.Fatalf("put: channel full: %v", n) + err := fmt.Errorf("put: channel full: %v", n) + nt.t.Log(err) + nt.mu.Lock() + nt.putErr = err + nt.mu.Unlock() } } @@ -89,8 +96,13 @@ func (nt *notifyThrottler) drain(count int) []ipn.Notify { nt.t.Helper() nt.mu.Lock() ch := nt.ch + putErr := nt.putErr nt.mu.Unlock() + if putErr != nil { + nt.t.Fatalf("drain: previous call to put errored: %s", putErr) + } + nn := []ipn.Notify{} for i := range count { select { @@ -113,10 +125,11 @@ func (nt *notifyThrottler) drain(count int) []ipn.Notify { // in the controlclient.Client, so by controlling it, we can check that // the state machine works as expected. type mockControl struct { - tb testing.TB - logf logger.Logf - opts controlclient.Options - paused atomic.Bool + tb testing.TB + logf logger.Logf + opts controlclient.Options + paused atomic.Bool + controlClientID int64 mu sync.Mutex persist *persist.Persist @@ -127,12 +140,13 @@ type mockControl struct { func newClient(tb testing.TB, opts controlclient.Options) *mockControl { return &mockControl{ - tb: tb, - authBlocked: true, - logf: opts.Logf, - opts: opts, - shutdown: make(chan struct{}), - persist: opts.Persist.Clone(), + tb: tb, + authBlocked: true, + logf: opts.Logf, + opts: opts, + shutdown: make(chan struct{}), + persist: opts.Persist.Clone(), + controlClientID: rand.Int64(), } } @@ -168,9 +182,17 @@ func (cc *mockControl) populateKeys() (newKeys bool) { return newKeys } +type sendOpt struct { + err error + url string + loginFinished bool + nm *netmap.NetworkMap +} + // send publishes a controlclient.Status notification upstream. // (In our tests here, upstream is the ipnlocal.Local instance.) -func (cc *mockControl) send(err error, url string, loginFinished bool, nm *netmap.NetworkMap) { +func (cc *mockControl) send(opts sendOpt) { + err, url, loginFinished, nm := opts.err, opts.url, opts.loginFinished, opts.nm if loginFinished { cc.mu.Lock() cc.authBlocked = false @@ -197,7 +219,17 @@ func (cc *mockControl) authenticated(nm *netmap.NetworkMap) { cc.persist.UserProfile = *selfUser.AsStruct() } cc.persist.NodeID = nm.SelfNode.StableID() - cc.send(nil, "", true, nm) + cc.send(sendOpt{loginFinished: true, nm: nm}) +} + +func (cc *mockControl) sendAuthURL(nm *netmap.NetworkMap) { + s := controlclient.Status{ + URL: "https://example.com/a/foo", + NetMap: nm, + Persist: cc.persist.View(), + } + s.SetStateForTest(controlclient.StateURLVisitRequired) + cc.opts.Observer.SetControlClientStatus(cc, s) } // called records that a particular function name was called. @@ -287,6 +319,10 @@ func (cc *mockControl) UpdateEndpoints(endpoints []tailcfg.Endpoint) { cc.called("UpdateEndpoints") } +func (cc *mockControl) ClientID() int64 { + return cc.controlClientID +} + func (b *LocalBackend) nonInteractiveLoginForStateTest() { b.mu.Lock() if b.cc == nil { @@ -320,6 +356,14 @@ func (b *LocalBackend) nonInteractiveLoginForStateTest() { // predictable, but maybe a bit less thorough. This is more of an overall // state machine test than a test of the wgengine+magicsock integration. func TestStateMachine(t *testing.T) { + runTestStateMachine(t, false) +} + +func TestStateMachineSeamless(t *testing.T) { + runTestStateMachine(t, true) +} + +func runTestStateMachine(t *testing.T, seamless bool) { envknob.Setenv("TAILSCALE_USE_WIP_CODE", "1") defer envknob.Setenv("TAILSCALE_USE_WIP_CODE", "") c := qt.New(t) @@ -328,7 +372,7 @@ func TestStateMachine(t *testing.T) { sys := tsd.NewSystem() store := new(testStateStorage) sys.Set(store) - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } @@ -340,7 +384,6 @@ func TestStateMachine(t *testing.T) { t.Fatalf("NewLocalBackend: %v", err) } t.Cleanup(b.Shutdown) - b.DisablePortMapperForTest() var cc, previousCC *mockControl b.SetControlClientGetterForTesting(func(opts controlclient.Options) (controlclient.Client, error) { @@ -445,7 +488,7 @@ func TestStateMachine(t *testing.T) { }, }) url1 := "https://localhost:1/1" - cc.send(nil, url1, false, nil) + cc.send(sendOpt{url: url1}) { cc.assertCalls() @@ -498,7 +541,7 @@ func TestStateMachine(t *testing.T) { t.Logf("\n\nLogin2 (url response)") notifies.expect(1) url2 := "https://localhost:1/2" - cc.send(nil, url2, false, nil) + cc.send(sendOpt{url: url2}) { cc.assertCalls() @@ -518,7 +561,14 @@ func TestStateMachine(t *testing.T) { notifies.expect(3) cc.persist.UserProfile.LoginName = "user1" cc.persist.NodeID = "node1" - cc.send(nil, "", true, &netmap.NetworkMap{}) + + // even if seamless is being enabled by default rather than by policy, this is + // the point where it will first get enabled. + if seamless { + sys.ControlKnobs().SeamlessKeyRenewal.Store(true) + } + + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{}}) { nn := notifies.drain(3) // Arguably it makes sense to unpause now, since the machine @@ -547,9 +597,9 @@ func TestStateMachine(t *testing.T) { // but the current code is brittle. // (ie. I suspect it would be better to change false->true in send() // below, and do the same in the real controlclient.) - cc.send(nil, "", false, &netmap.NetworkMap{ + cc.send(sendOpt{nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) { nn := notifies.drain(1) cc.assertCalls() @@ -710,7 +760,7 @@ func TestStateMachine(t *testing.T) { // an interactive login URL to visit. notifies.expect(2) url3 := "https://localhost:1/3" - cc.send(nil, url3, false, nil) + cc.send(sendOpt{url: url3}) { nn := notifies.drain(2) cc.assertCalls("Login") @@ -721,9 +771,9 @@ func TestStateMachine(t *testing.T) { notifies.expect(3) cc.persist.UserProfile.LoginName = "user2" cc.persist.NodeID = "node2" - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) t.Logf("\n\nLoginFinished3") { nn := notifies.drain(3) @@ -791,9 +841,9 @@ func TestStateMachine(t *testing.T) { // the control server at all when stopped). t.Logf("\n\nStart4 -> netmap") notifies.expect(0) - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) { notifies.drain(0) cc.assertCalls("pause") @@ -838,7 +888,7 @@ func TestStateMachine(t *testing.T) { notifies.expect(1) b.StartLoginInteractive(context.Background()) url4 := "https://localhost:1/4" - cc.send(nil, url4, false, nil) + cc.send(sendOpt{url: url4}) { nn := notifies.drain(1) // It might seem like WantRunning should switch to true here, @@ -860,9 +910,9 @@ func TestStateMachine(t *testing.T) { notifies.expect(3) cc.persist.UserProfile.LoginName = "user3" cc.persist.NodeID = "node3" - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) { nn := notifies.drain(3) // BUG: pause() being called here is a bad sign. @@ -908,9 +958,9 @@ func TestStateMachine(t *testing.T) { // Control server accepts our valid key from before. t.Logf("\n\nLoginFinished5") notifies.expect(0) - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) { notifies.drain(0) cc.assertCalls() @@ -923,10 +973,10 @@ func TestStateMachine(t *testing.T) { } t.Logf("\n\nExpireKey") notifies.expect(1) - cc.send(nil, "", false, &netmap.NetworkMap{ + cc.send(sendOpt{nm: &netmap.NetworkMap{ Expiry: time.Now().Add(-time.Minute), SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) { nn := notifies.drain(1) cc.assertCalls() @@ -938,10 +988,10 @@ func TestStateMachine(t *testing.T) { t.Logf("\n\nExtendKey") notifies.expect(1) - cc.send(nil, "", false, &netmap.NetworkMap{ + cc.send(sendOpt{nm: &netmap.NetworkMap{ Expiry: time.Now().Add(time.Minute), SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) { nn := notifies.drain(1) cc.assertCalls() @@ -966,7 +1016,7 @@ func TestEditPrefsHasNoKeys(t *testing.T) { logf := tstest.WhileTestRunningLogger(t) sys := tsd.NewSystem() sys.Set(new(mem.Store)) - e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + e, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } @@ -1076,9 +1126,9 @@ func TestWGEngineStatusRace(t *testing.T) { wantState(ipn.NeedsLogin) // Assert that we are logged in and authorized. - cc.send(nil, "", true, &netmap.NetworkMap{ + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), - }) + }}) wantState(ipn.Starting) // Simulate multiple concurrent callbacks from wgengine. @@ -1354,11 +1404,141 @@ func TestEngineReconfigOnStateChange(t *testing.T) { steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { mustDo(t)(lb.Start(ipn.Options{})) mustDo2(t)(lb.EditPrefs(connect)) - cc().authenticated(node3) - cc().send(nil, "", false, &netmap.NetworkMap{ + cc().authenticated(node1) + cc().send(sendOpt{nm: &netmap.NetworkMap{ + Expiry: time.Now().Add(-time.Minute), + }}) + }, + wantState: ipn.NeedsLogin, + wantCfg: &wgcfg.Config{}, + wantRouterCfg: &router.Config{}, + wantDNSCfg: &dns.Config{}, + }, + { + name: "Start/Connect/Login/InitReauth", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + + // Start the re-auth process: + lb.StartLoginInteractive(context.Background()) + cc().sendAuthURL(node1) + }, + // Without seamless renewal, even starting a reauth tears down everything: + wantState: ipn.Starting, + wantCfg: &wgcfg.Config{}, + wantRouterCfg: &router.Config{}, + wantDNSCfg: &dns.Config{}, + }, + { + name: "Start/Connect/Login/InitReauth/Login", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + + // Start the re-auth process: + lb.StartLoginInteractive(context.Background()) + cc().sendAuthURL(node1) + + // Complete the re-auth process: + cc().authenticated(node1) + }, + wantState: ipn.Starting, + wantCfg: &wgcfg.Config{ + Name: "tailscale", + NodeID: node1.SelfNode.StableID(), + Peers: []wgcfg.Peer{}, + Addresses: node1.SelfNode.Addresses().AsSlice(), + }, + wantRouterCfg: &router.Config{ + SNATSubnetRoutes: true, + NetfilterMode: preftype.NetfilterOn, + LocalAddrs: node1.SelfNode.Addresses().AsSlice(), + Routes: routesWithQuad100(), + }, + wantDNSCfg: &dns.Config{ + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node1), + }, + }, + { + name: "Seamless/Start/Connect/Login/InitReauth", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + lb.ControlKnobs().SeamlessKeyRenewal.Store(true) + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + + // Start the re-auth process: + lb.StartLoginInteractive(context.Background()) + cc().sendAuthURL(node1) + }, + // With seamless renewal, starting a reauth should leave everything up: + wantState: ipn.Starting, + wantCfg: &wgcfg.Config{ + Name: "tailscale", + NodeID: node1.SelfNode.StableID(), + Peers: []wgcfg.Peer{}, + Addresses: node1.SelfNode.Addresses().AsSlice(), + }, + wantRouterCfg: &router.Config{ + SNATSubnetRoutes: true, + NetfilterMode: preftype.NetfilterOn, + LocalAddrs: node1.SelfNode.Addresses().AsSlice(), + Routes: routesWithQuad100(), + }, + wantDNSCfg: &dns.Config{ + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node1), + }, + }, + { + name: "Seamless/Start/Connect/Login/InitReauth/Login", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + lb.ControlKnobs().SeamlessKeyRenewal.Store(true) + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + + // Start the re-auth process: + lb.StartLoginInteractive(context.Background()) + cc().sendAuthURL(node1) + + // Complete the re-auth process: + cc().authenticated(node1) + }, + wantState: ipn.Starting, + wantCfg: &wgcfg.Config{ + Name: "tailscale", + NodeID: node1.SelfNode.StableID(), + Peers: []wgcfg.Peer{}, + Addresses: node1.SelfNode.Addresses().AsSlice(), + }, + wantRouterCfg: &router.Config{ + SNATSubnetRoutes: true, + NetfilterMode: preftype.NetfilterOn, + LocalAddrs: node1.SelfNode.Addresses().AsSlice(), + Routes: routesWithQuad100(), + }, + wantDNSCfg: &dns.Config{ + Routes: map[dnsname.FQDN][]*dnstype.Resolver{}, + Hosts: hostsFor(node1), + }, + }, + { + name: "Seamless/Start/Connect/Login/Expire", + steps: func(t *testing.T, lb *LocalBackend, cc func() *mockControl) { + lb.ControlKnobs().SeamlessKeyRenewal.Store(true) + mustDo(t)(lb.Start(ipn.Options{})) + mustDo2(t)(lb.EditPrefs(connect)) + cc().authenticated(node1) + cc().send(sendOpt{nm: &netmap.NetworkMap{ Expiry: time.Now().Add(-time.Minute), - }) + }}) }, + // Even with seamless, if the key we are using expires, we want to disconnect: wantState: ipn.NeedsLogin, wantCfg: &wgcfg.Config{}, wantRouterCfg: &router.Config{}, @@ -1404,6 +1584,235 @@ func TestEngineReconfigOnStateChange(t *testing.T) { } } +// TestStateMachineURLRace tests that wgengine updates arriving in the middle of +// processing an auth URL doesn't result in the auth URL being cleared. +func TestStateMachineURLRace(t *testing.T) { + runTestStateMachineURLRace(t, false) +} + +func TestStateMachineURLRaceSeamless(t *testing.T) { + runTestStateMachineURLRace(t, true) +} + +func runTestStateMachineURLRace(t *testing.T, seamless bool) { + var cc *mockControl + b := newLocalBackendWithTestControl(t, true, func(tb testing.TB, opts controlclient.Options) controlclient.Client { + cc = newClient(t, opts) + return cc + }) + + nw := newNotificationWatcher(t, b, &ipnauth.TestActor{}) + + t.Logf("Start") + nw.watch(0, []wantedNotification{ + wantStateNotify(ipn.NeedsLogin)}) + b.Start(ipn.Options{ + UpdatePrefs: &ipn.Prefs{ + WantRunning: true, + ControlURL: "https://localhost:1/", + }, + }) + nw.check() + + t.Logf("LoginFinished") + cc.persist.UserProfile.LoginName = "user1" + cc.persist.NodeID = "node1" + + if seamless { + b.sys.ControlKnobs().SeamlessKeyRenewal.Store(true) + } + + nw.watch(0, []wantedNotification{ + wantStateNotify(ipn.Starting)}) + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), + }}) + nw.check() + + t.Logf("Running") + nw.watch(0, []wantedNotification{ + wantStateNotify(ipn.Running)}) + b.setWgengineStatus(&wgengine.Status{AsOf: time.Now(), DERPs: 1}, nil) + nw.check() + + t.Logf("Re-auth (StartLoginInteractive)") + b.StartLoginInteractive(t.Context()) + + stop := make(chan struct{}) + stopSpamming := sync.OnceFunc(func() { + stop <- struct{}{} + }) + // if seamless renewal is enabled, the engine won't be disabled, and we won't + // ever call stopSpamming, so make sure it does get called + defer stopSpamming() + + // Intercept updates between the engine and localBackend, so that we can see + // when the "stopped" update comes in and ensure we stop sending our "we're + // up" updates after that point. + b.e.SetStatusCallback(func(s *wgengine.Status, err error) { + // This is not one of our fake status updates, this is generated from the + // engine in response to LocalBackend calling RequestStatus. Stop spamming + // our fake statuses. + // + // TODO(zofrex): This is fragile, it works right now but would break if the + // calling pattern of RequestStatus changes. We should ensure that we keep + // sending "we're up" statuses right until Reconfig is called with + // zero-valued configs, and after that point only send "stopped" statuses. + stopSpamming() + + // Once stopSpamming returns we are guaranteed to not send any more updates, + // so we can now send the real update (indicating shutdown) and be certain + // it will be received after any fake updates we sent. This is possibly a + // stronger guarantee than we get from the real engine? + b.setWgengineStatus(s, err) + }) + + // time needs to be >= last time for the status to be accepted, send all our + // spam with the same stale time so that when a real update comes in it will + // definitely be accepted. + time := b.lastStatusTime + + // Flood localBackend with a lot of wgengine status updates, so if there are + // any race conditions in the multiple locks/unlocks that happen as we process + // the received auth URL, we will hit them. + go func() { + t.Logf("sending lots of fake wgengine status updates") + for { + select { + case <-stop: + t.Logf("stopping fake wgengine status updates") + return + default: + b.setWgengineStatus(&wgengine.Status{AsOf: time, DERPs: 1}, nil) + } + } + }() + + t.Logf("Re-auth (receive URL)") + url1 := "https://localhost:1/1" + cc.send(sendOpt{url: url1}) + + // Don't need to wait on anything else - once .send completes, authURL should + // be set, and once .send has completed, any opportunities for a WG engine + // status update to trample it have ended as well. + if b.authURL == "" { + t.Fatalf("expected authURL to be set") + } +} + +func TestWGEngineDownThenUpRace(t *testing.T) { + var cc *mockControl + b := newLocalBackendWithTestControl(t, true, func(tb testing.TB, opts controlclient.Options) controlclient.Client { + cc = newClient(t, opts) + return cc + }) + + nw := newNotificationWatcher(t, b, &ipnauth.TestActor{}) + + t.Logf("Start") + nw.watch(0, []wantedNotification{ + wantStateNotify(ipn.NeedsLogin)}) + b.Start(ipn.Options{ + UpdatePrefs: &ipn.Prefs{ + WantRunning: true, + ControlURL: "https://localhost:1/", + }, + }) + nw.check() + + t.Logf("LoginFinished") + cc.persist.UserProfile.LoginName = "user1" + cc.persist.NodeID = "node1" + + nw.watch(0, []wantedNotification{ + wantStateNotify(ipn.Starting)}) + cc.send(sendOpt{loginFinished: true, nm: &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{MachineAuthorized: true}).View(), + }}) + nw.check() + + nw.watch(0, []wantedNotification{ + wantStateNotify(ipn.Running)}) + b.setWgengineStatus(&wgengine.Status{AsOf: time.Now(), DERPs: 1}, nil) + nw.check() + + t.Logf("Re-auth (StartLoginInteractive)") + b.StartLoginInteractive(t.Context()) + + var timeLock sync.RWMutex + timestamp := b.lastStatusTime + + engineShutdown := make(chan struct{}) + gotShutdown := sync.OnceFunc(func() { + t.Logf("engineShutdown") + engineShutdown <- struct{}{} + }) + + b.e.SetStatusCallback(func(s *wgengine.Status, err error) { + timeLock.Lock() + if s.AsOf.After(timestamp) { + timestamp = s.AsOf + } + timeLock.Unlock() + + if err != nil || (s.DERPs == 0 && len(s.Peers) == 0) { + gotShutdown() + } else { + b.setWgengineStatus(s, err) + } + }) + + t.Logf("Re-auth (receive URL)") + url1 := "https://localhost:1/1" + + done := make(chan struct{}) + var wg sync.WaitGroup + + wg.Go(func() { + t.Log("cc.send starting") + cc.send(sendOpt{url: url1}) // will block until engine stops + t.Log("cc.send returned") + }) + + <-engineShutdown // will get called once cc.send is blocked + gotShutdown = sync.OnceFunc(func() { + t.Logf("engineShutdown") + engineShutdown <- struct{}{} + }) + + wg.Go(func() { + t.Log("StartLoginInteractive starting") + b.StartLoginInteractive(t.Context()) // will also block until engine stops + t.Log("StartLoginInteractive returned") + }) + + <-engineShutdown // will get called once StartLoginInteractive is blocked + + st := controlclient.Status{} + st.SetStateForTest(controlclient.StateAuthenticated) + b.SetControlClientStatus(cc, st) + + timeLock.RLock() + b.setWgengineStatus(&wgengine.Status{AsOf: timestamp}, nil) // engine is down event finally arrives + b.setWgengineStatus(&wgengine.Status{AsOf: timestamp, DERPs: 1}, nil) // engine is back up + timeLock.RUnlock() + + go func() { + wg.Wait() + done <- struct{}{} + }() + + t.Log("waiting for .send and .StartLoginInteractive to return") + + select { + case <-done: + case <-time.After(10 * time.Second): + t.Fatalf("timed out waiting") + } + + t.Log("both returned") +} + func buildNetmapWithPeers(self tailcfg.NodeView, peers ...tailcfg.NodeView) *netmap.NetworkMap { const ( firstAutoUserID = tailcfg.UserID(10000) @@ -1507,16 +1916,18 @@ func newLocalBackendWithMockEngineAndControl(t *testing.T, enableLogging bool) ( dialer := &tsdial.Dialer{Logf: logf} dialer.SetNetMon(netmon.NewStatic()) - sys := tsd.NewSystem() + bus := eventbustest.NewBus(t) + sys := tsd.NewSystemWithBus(bus) sys.Set(dialer) sys.Set(dialer.NetMon()) + dialer.SetBus(bus) magicConn, err := magicsock.NewConn(magicsock.Options{ Logf: logf, EventBus: sys.Bus.Get(), NetMon: dialer.NetMon(), Metrics: sys.UserMetricsRegistry(), - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), DisablePortMapper: true, }) if err != nil { diff --git a/ipn/ipnlocal/tailnetlock_disabled.go b/ipn/ipnlocal/tailnetlock_disabled.go new file mode 100644 index 0000000000000..85cf4bd3f4ea5 --- /dev/null +++ b/ipn/ipnlocal/tailnetlock_disabled.go @@ -0,0 +1,31 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_tailnetlock + +package ipnlocal + +import ( + "tailscale.com/ipn" + "tailscale.com/ipn/ipnstate" + "tailscale.com/tka" + "tailscale.com/types/netmap" +) + +type tkaState struct { + authority *tka.Authority +} + +func (b *LocalBackend) initTKALocked() error { + return nil +} + +func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap, prefs ipn.PrefsView) error { + return nil +} + +func (b *LocalBackend) tkaFilterNetmapLocked(nm *netmap.NetworkMap) {} + +func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus { + return &ipnstate.NetworkLockStatus{Enabled: false} +} diff --git a/ipn/ipnlocal/web_client.go b/ipn/ipnlocal/web_client.go index 7cfb30ca4efeb..a3c9387e46fce 100644 --- a/ipn/ipnlocal/web_client.go +++ b/ipn/ipnlocal/web_client.go @@ -19,11 +19,11 @@ import ( "tailscale.com/client/local" "tailscale.com/client/web" - "tailscale.com/logtail/backoff" "tailscale.com/net/netutil" "tailscale.com/tailcfg" "tailscale.com/tsconst" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/mak" ) diff --git a/ipn/ipnlocal/web_client_stub.go b/ipn/ipnlocal/web_client_stub.go index 5f37560cc6ddb..787867b4f450e 100644 --- a/ipn/ipnlocal/web_client_stub.go +++ b/ipn/ipnlocal/web_client_stub.go @@ -8,15 +8,13 @@ package ipnlocal import ( "errors" "net" - - "tailscale.com/client/local" ) const webClientPort = 5252 type webClient struct{} -func (b *LocalBackend) ConfigureWebClient(lc *local.Client) {} +func (b *LocalBackend) ConfigureWebClient(any) {} func (b *LocalBackend) webClientGetOrInit() error { return errors.New("not implemented") diff --git a/ipn/ipnserver/actor.go b/ipn/ipnserver/actor.go index 9d86d2c825fda..628e3c37cfc0b 100644 --- a/ipn/ipnserver/actor.go +++ b/ipn/ipnserver/actor.go @@ -12,6 +12,7 @@ import ( "runtime" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn" "tailscale.com/ipn/ipnauth" "tailscale.com/types/logger" @@ -145,7 +146,11 @@ func (a *actor) Username() (string, error) { defer tok.Close() return tok.Username() case "darwin", "linux", "illumos", "solaris", "openbsd": - uid, ok := a.ci.Creds().UserID() + creds := a.ci.Creds() + if creds == nil { + return "", errors.New("peer credentials not implemented on this OS") + } + uid, ok := creds.UserID() if !ok { return "", errors.New("missing user ID") } @@ -233,6 +238,11 @@ func connIsLocalAdmin(logf logger.Logf, ci *ipnauth.ConnIdentity, operatorUID st // Linux. fallthrough case "linux": + if !buildfeatures.HasUnixSocketIdentity { + // Everybody is an admin if support for unix socket identities + // is omitted for the build. + return true + } uid, ok := ci.Creds().UserID() if !ok { return false diff --git a/ipn/ipnserver/proxyconnect.go b/ipn/ipnserver/proxyconnect.go index 030c4efe4a6b0..7d41273bdc52a 100644 --- a/ipn/ipnserver/proxyconnect.go +++ b/ipn/ipnserver/proxyconnect.go @@ -10,6 +10,8 @@ import ( "net" "net/http" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/logpolicy" ) @@ -23,6 +25,10 @@ import ( // precludes that from working and instead the GUI fails to dial out. // So, go through tailscaled (with a CONNECT request) instead. func (s *Server) handleProxyConnectConn(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasOutboundProxy { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } ctx := r.Context() if r.Method != "CONNECT" { panic("[unexpected] miswired") diff --git a/ipn/ipnserver/server.go b/ipn/ipnserver/server.go index fdbd82b0b9e33..d473252e134a8 100644 --- a/ipn/ipnserver/server.go +++ b/ipn/ipnserver/server.go @@ -15,6 +15,7 @@ import ( "net" "net/http" "os/user" + "runtime" "strconv" "strings" "sync" @@ -23,15 +24,17 @@ import ( "tailscale.com/client/tailscale/apitype" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/localapi" "tailscale.com/net/netmon" "tailscale.com/types/logger" "tailscale.com/types/logid" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/set" - "tailscale.com/util/systemd" "tailscale.com/util/testenv" ) @@ -40,6 +43,7 @@ import ( type Server struct { lb atomic.Pointer[ipnlocal.LocalBackend] logf logger.Logf + bus *eventbus.Bus netMon *netmon.Monitor // must be non-nil backendLogID logid.PublicID @@ -118,6 +122,10 @@ func (s *Server) awaitBackend(ctx context.Context) (_ *ipnlocal.LocalBackend, ok // This is primarily for the Windows GUI, because wintun can take awhile to // come up. See https://github.com/tailscale/tailscale/issues/6522. func (s *Server) serveServerStatus(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug && runtime.GOOS != "windows" { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotFound) + return + } ctx := r.Context() w.Header().Set("Content-Type", "application/json") @@ -380,6 +388,9 @@ func isAllDigit(s string) bool { // connection. It's intended to give your non-root webserver access // (www-data, caddy, nginx, etc) to certs. func (a *actor) CanFetchCerts() bool { + if !buildfeatures.HasACME { + return false + } if a.ci.IsUnixSock() && a.ci.Creds() != nil { connUID, ok := a.ci.Creds().UserID() if ok && connUID == userIDFromString(envknob.String("TS_PERMIT_CERT_UID")) { @@ -396,6 +407,10 @@ func (a *actor) CanFetchCerts() bool { // // onDone must be called when the HTTP request is done. func (s *Server) addActiveHTTPRequest(req *http.Request, actor ipnauth.Actor) (onDone func(), err error) { + if runtime.GOOS != "windows" && !buildfeatures.HasUnixSocketIdentity { + return func() {}, nil + } + if actor == nil { return nil, errors.New("internal error: nil actor") } @@ -446,13 +461,14 @@ func (s *Server) addActiveHTTPRequest(req *http.Request, actor ipnauth.Actor) (o // // At some point, either before or after Run, the Server's SetLocalBackend // method must also be called before Server can do anything useful. -func New(logf logger.Logf, logID logid.PublicID, netMon *netmon.Monitor) *Server { +func New(logf logger.Logf, logID logid.PublicID, bus *eventbus.Bus, netMon *netmon.Monitor) *Server { if netMon == nil { panic("nil netMon") } return &Server{ backendLogID: logID, logf: logf, + bus: bus, netMon: netMon, } } @@ -494,17 +510,25 @@ func (s *Server) Run(ctx context.Context, ln net.Listener) error { runDone := make(chan struct{}) defer close(runDone) - // When the context is closed or when we return, whichever is first, close our listener + ec := s.bus.Client("ipnserver.Server") + defer ec.Close() + shutdownSub := eventbus.Subscribe[localapi.Shutdown](ec) + + // When the context is closed, a [localapi.Shutdown] event is received, + // or when we return, whichever is first, close our listener // and all open connections. go func() { select { + case <-shutdownSub.Events(): case <-ctx.Done(): case <-runDone: } ln.Close() }() - systemd.Ready() + if ready, ok := feature.HookSystemdReady.GetOk(); ok { + ready() + } hs := &http.Server{ Handler: http.HandlerFunc(s.serveHTTP), @@ -527,6 +551,10 @@ func (s *Server) Run(ctx context.Context, ln net.Listener) error { // Windows and via $DEBUG_LISTENER/debug/ipn when tailscaled's --debug flag // is used to run a debug server. func (s *Server) ServeHTMLStatus(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotFound) + return + } lb := s.lb.Load() if lb == nil { http.Error(w, "no LocalBackend", http.StatusServiceUnavailable) diff --git a/ipn/ipnserver/server_test.go b/ipn/ipnserver/server_test.go index 903cb6b738331..713db9e50085e 100644 --- a/ipn/ipnserver/server_test.go +++ b/ipn/ipnserver/server_test.go @@ -5,6 +5,7 @@ package ipnserver_test import ( "context" + "errors" "runtime" "strconv" "sync" @@ -14,7 +15,10 @@ import ( "tailscale.com/envknob" "tailscale.com/ipn" "tailscale.com/ipn/lapitest" + "tailscale.com/tsd" "tailscale.com/types/ptr" + "tailscale.com/util/syspolicy/pkey" + "tailscale.com/util/syspolicy/policytest" ) func TestUserConnectDisconnectNonWindows(t *testing.T) { @@ -253,6 +257,62 @@ func TestBlockWhileIdentityInUse(t *testing.T) { } } +func TestShutdownViaLocalAPI(t *testing.T) { + t.Parallel() + + errAccessDeniedByPolicy := errors.New("Access denied: shutdown access denied by policy") + + tests := []struct { + name string + allowTailscaledRestart *bool + wantErr error + }{ + { + name: "AllowTailscaledRestart/NotConfigured", + allowTailscaledRestart: nil, + wantErr: errAccessDeniedByPolicy, + }, + { + name: "AllowTailscaledRestart/False", + allowTailscaledRestart: ptr.To(false), + wantErr: errAccessDeniedByPolicy, + }, + { + name: "AllowTailscaledRestart/True", + allowTailscaledRestart: ptr.To(true), + wantErr: nil, // shutdown should be allowed + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + sys := tsd.NewSystem() + + var pol policytest.Config + if tt.allowTailscaledRestart != nil { + pol.Set(pkey.AllowTailscaledRestart, *tt.allowTailscaledRestart) + } + sys.Set(pol) + + server := lapitest.NewServer(t, lapitest.WithSys(sys)) + lc := server.ClientWithName("User") + + err := lc.ShutdownTailscaled(t.Context()) + checkError(t, err, tt.wantErr) + }) + } +} + +func checkError(tb testing.TB, got, want error) { + tb.Helper() + if (want == nil) != (got == nil) || + (want != nil && got != nil && want.Error() != got.Error() && !errors.Is(got, want)) { + tb.Fatalf("gotErr: %v; wantErr: %v", got, want) + } +} + func setGOOSForTest(tb testing.TB, goos string) { tb.Helper() envknob.Setenv("TS_DEBUG_FAKE_GOOS", goos) diff --git a/ipn/lapitest/backend.go b/ipn/lapitest/backend.go index ddf48fb2893d8..7a1c276a7b229 100644 --- a/ipn/lapitest/backend.go +++ b/ipn/lapitest/backend.go @@ -33,7 +33,7 @@ func newBackend(opts *options) *ipnlocal.LocalBackend { sys.Set(&mem.Store{}) } - e, err := wgengine.NewFakeUserspaceEngine(opts.Logf(), sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + e, err := wgengine.NewFakeUserspaceEngine(opts.Logf(), sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { opts.tb.Fatalf("NewFakeUserspaceEngine: %v", err) } @@ -45,7 +45,6 @@ func newBackend(opts *options) *ipnlocal.LocalBackend { tb.Fatalf("NewLocalBackend: %v", err) } tb.Cleanup(b.Shutdown) - b.DisablePortMapperForTest() b.SetControlClientGetterForTesting(opts.MakeControlClient) return b } diff --git a/ipn/lapitest/server.go b/ipn/lapitest/server.go index d477dc1828549..457a338ab9f5a 100644 --- a/ipn/lapitest/server.go +++ b/ipn/lapitest/server.go @@ -236,7 +236,7 @@ func (s *Server) Close() { func newUnstartedIPNServer(opts *options) *ipnserver.Server { opts.TB().Helper() lb := opts.Backend() - server := ipnserver.New(opts.Logf(), logid.PublicID{}, lb.NetMon()) + server := ipnserver.New(opts.Logf(), logid.PublicID{}, lb.EventBus(), lb.NetMon()) server.SetLocalBackend(lb) return server } diff --git a/ipn/localapi/cert.go b/ipn/localapi/cert.go index 323406f7ba650..2313631cc3229 100644 --- a/ipn/localapi/cert.go +++ b/ipn/localapi/cert.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios && !android && !js +//go:build !ios && !android && !js && !ts_omit_acme package localapi @@ -14,6 +14,10 @@ import ( "tailscale.com/ipn/ipnlocal" ) +func init() { + Register("cert/", (*Handler).serveCert) +} + func (h *Handler) serveCert(w http.ResponseWriter, r *http.Request) { if !h.PermitWrite && !h.PermitCert { http.Error(w, "cert access denied", http.StatusForbidden) diff --git a/ipn/localapi/debug.go b/ipn/localapi/debug.go new file mode 100644 index 0000000000000..b3b919d31ede2 --- /dev/null +++ b/ipn/localapi/debug.go @@ -0,0 +1,465 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_debug + +package localapi + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "net/netip" + "reflect" + "slices" + "strconv" + "sync" + "time" + + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" + "tailscale.com/ipn" + "tailscale.com/types/logger" + "tailscale.com/util/eventbus" + "tailscale.com/util/httpm" +) + +func init() { + Register("component-debug-logging", (*Handler).serveComponentDebugLogging) + Register("debug", (*Handler).serveDebug) + Register("dev-set-state-store", (*Handler).serveDevSetStateStore) + Register("debug-bus-events", (*Handler).serveDebugBusEvents) + Register("debug-bus-graph", (*Handler).serveEventBusGraph) + Register("debug-derp-region", (*Handler).serveDebugDERPRegion) + Register("debug-dial-types", (*Handler).serveDebugDialTypes) + Register("debug-log", (*Handler).serveDebugLog) + Register("debug-packet-filter-matches", (*Handler).serveDebugPacketFilterMatches) + Register("debug-packet-filter-rules", (*Handler).serveDebugPacketFilterRules) + Register("debug-peer-endpoint-changes", (*Handler).serveDebugPeerEndpointChanges) +} + +func (h *Handler) serveDebugPeerEndpointChanges(w http.ResponseWriter, r *http.Request) { + if !h.PermitRead { + http.Error(w, "status access denied", http.StatusForbidden) + return + } + + ipStr := r.FormValue("ip") + if ipStr == "" { + http.Error(w, "missing 'ip' parameter", http.StatusBadRequest) + return + } + ip, err := netip.ParseAddr(ipStr) + if err != nil { + http.Error(w, "invalid IP", http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/json") + chs, err := h.b.GetPeerEndpointChanges(r.Context(), ip) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + e := json.NewEncoder(w) + e.SetIndent("", "\t") + e.Encode(chs) +} + +func (h *Handler) serveComponentDebugLogging(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + component := r.FormValue("component") + secs, _ := strconv.Atoi(r.FormValue("secs")) + err := h.b.SetComponentDebugLogging(component, h.clock.Now().Add(time.Duration(secs)*time.Second)) + var res struct { + Error string + } + if err != nil { + res.Error = err.Error() + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) +} + +func (h *Handler) serveDebugDialTypes(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug-dial-types access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) + return + } + + ip := r.FormValue("ip") + port := r.FormValue("port") + network := r.FormValue("network") + + addr := ip + ":" + port + if _, err := netip.ParseAddrPort(addr); err != nil { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "invalid address %q: %v", addr, err) + return + } + + ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) + defer cancel() + + var bareDialer net.Dialer + + dialer := h.b.Dialer() + + var peerDialer net.Dialer + peerDialer.Control = dialer.PeerDialControlFunc() + + // Kick off a dial with each available dialer in parallel. + dialers := []struct { + name string + dial func(context.Context, string, string) (net.Conn, error) + }{ + {"SystemDial", dialer.SystemDial}, + {"UserDial", dialer.UserDial}, + {"PeerDial", peerDialer.DialContext}, + {"BareDial", bareDialer.DialContext}, + } + type result struct { + name string + conn net.Conn + err error + } + results := make(chan result, len(dialers)) + + var wg sync.WaitGroup + for _, dialer := range dialers { + dialer := dialer // loop capture + + wg.Add(1) + go func() { + defer wg.Done() + conn, err := dialer.dial(ctx, network, addr) + results <- result{dialer.name, conn, err} + }() + } + + wg.Wait() + for range len(dialers) { + res := <-results + fmt.Fprintf(w, "[%s] connected=%v err=%v\n", res.name, res.conn != nil, res.err) + if res.conn != nil { + res.conn.Close() + } + } +} + +func (h *Handler) serveDebug(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, "debug not supported in this build", http.StatusNotImplemented) + return + } + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "POST required", http.StatusMethodNotAllowed) + return + } + // The action is normally in a POST form parameter, but + // some actions (like "notify") want a full JSON body, so + // permit some to have their action in a header. + var action string + switch v := r.Header.Get("Debug-Action"); v { + case "notify": + action = v + default: + action = r.FormValue("action") + } + var err error + switch action { + case "derp-set-homeless": + h.b.MagicConn().SetHomeless(true) + case "derp-unset-homeless": + h.b.MagicConn().SetHomeless(false) + case "rebind": + err = h.b.DebugRebind() + case "restun": + err = h.b.DebugReSTUN() + case "notify": + var n ipn.Notify + err = json.NewDecoder(r.Body).Decode(&n) + if err != nil { + break + } + h.b.DebugNotify(n) + case "notify-last-netmap": + h.b.DebugNotifyLastNetMap() + case "break-tcp-conns": + err = h.b.DebugBreakTCPConns() + case "break-derp-conns": + err = h.b.DebugBreakDERPConns() + case "force-netmap-update": + h.b.DebugForceNetmapUpdate() + case "control-knobs": + k := h.b.ControlKnobs() + w.Header().Set("Content-Type", "application/json") + err = json.NewEncoder(w).Encode(k.AsDebugJSON()) + if err == nil { + return + } + case "pick-new-derp": + err = h.b.DebugPickNewDERP() + case "force-prefer-derp": + var n int + err = json.NewDecoder(r.Body).Decode(&n) + if err != nil { + break + } + h.b.DebugForcePreferDERP(n) + case "peer-relay-servers": + servers := h.b.DebugPeerRelayServers().Slice() + slices.SortFunc(servers, func(a, b netip.Addr) int { + return a.Compare(b) + }) + err = json.NewEncoder(w).Encode(servers) + if err == nil { + return + } + case "": + err = fmt.Errorf("missing parameter 'action'") + default: + err = fmt.Errorf("unknown action %q", action) + } + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "text/plain") + io.WriteString(w, "done\n") +} + +func (h *Handler) serveDevSetStateStore(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "POST required", http.StatusMethodNotAllowed) + return + } + if err := h.b.SetDevStateStore(r.FormValue("key"), r.FormValue("value")); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "text/plain") + io.WriteString(w, "done\n") +} + +func (h *Handler) serveDebugPacketFilterRules(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + nm := h.b.NetMap() + if nm == nil { + http.Error(w, "no netmap", http.StatusNotFound) + return + } + w.Header().Set("Content-Type", "application/json") + + enc := json.NewEncoder(w) + enc.SetIndent("", "\t") + enc.Encode(nm.PacketFilterRules) +} + +func (h *Handler) serveDebugPacketFilterMatches(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "debug access denied", http.StatusForbidden) + return + } + nm := h.b.NetMap() + if nm == nil { + http.Error(w, "no netmap", http.StatusNotFound) + return + } + w.Header().Set("Content-Type", "application/json") + + enc := json.NewEncoder(w) + enc.SetIndent("", "\t") + enc.Encode(nm.PacketFilter) +} + +// debugEventError provides the JSON encoding of internal errors from event processing. +type debugEventError struct { + Error string +} + +// serveDebugBusEvents taps into the tailscaled/utils/eventbus and streams +// events to the client. +func (h *Handler) serveDebugBusEvents(w http.ResponseWriter, r *http.Request) { + // Require write access (~root) as the logs could contain something + // sensitive. + if !h.PermitWrite { + http.Error(w, "event bus access denied", http.StatusForbidden) + return + } + if r.Method != httpm.GET { + http.Error(w, "GET required", http.StatusMethodNotAllowed) + return + } + + bus, ok := h.LocalBackend().Sys().Bus.GetOK() + if !ok { + http.Error(w, "event bus not running", http.StatusNoContent) + return + } + + f, ok := w.(http.Flusher) + if !ok { + http.Error(w, "streaming unsupported", http.StatusInternalServerError) + return + } + + io.WriteString(w, `{"Event":"[event listener connected]\n"}`+"\n") + f.Flush() + + mon := bus.Debugger().WatchBus() + defer mon.Close() + + i := 0 + for { + select { + case <-r.Context().Done(): + fmt.Fprintf(w, `{"Event":"[event listener closed]\n"}`) + return + case <-mon.Done(): + return + case event := <-mon.Events(): + data := eventbus.DebugEvent{ + Count: i, + Type: reflect.TypeOf(event.Event).String(), + Event: event.Event, + From: event.From.Name(), + } + for _, client := range event.To { + data.To = append(data.To, client.Name()) + } + + if msg, err := json.Marshal(data); err != nil { + data.Event = debugEventError{Error: fmt.Sprintf( + "failed to marshal JSON for %T", event.Event, + )} + if errMsg, err := json.Marshal(data); err != nil { + fmt.Fprintf(w, + `{"Count": %d, "Event":"[ERROR] failed to marshal JSON for %T\n"}`, + i, event.Event) + } else { + w.Write(errMsg) + } + } else { + w.Write(msg) + } + f.Flush() + i++ + } + } +} + +// serveEventBusGraph taps into the event bus and dumps out the active graph of +// publishers and subscribers. It does not represent anything about the messages +// exchanged. +func (h *Handler) serveEventBusGraph(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.GET { + http.Error(w, "GET required", http.StatusMethodNotAllowed) + return + } + + bus, ok := h.LocalBackend().Sys().Bus.GetOK() + if !ok { + http.Error(w, "event bus not running", http.StatusPreconditionFailed) + return + } + + debugger := bus.Debugger() + clients := debugger.Clients() + + graph := map[string]eventbus.DebugTopic{} + + for _, client := range clients { + for _, pub := range debugger.PublishTypes(client) { + topic, ok := graph[pub.Name()] + if !ok { + topic = eventbus.DebugTopic{Name: pub.Name()} + } + topic.Publisher = client.Name() + graph[pub.Name()] = topic + } + for _, sub := range debugger.SubscribeTypes(client) { + topic, ok := graph[sub.Name()] + if !ok { + topic = eventbus.DebugTopic{Name: sub.Name()} + } + topic.Subscribers = append(topic.Subscribers, client.Name()) + graph[sub.Name()] = topic + } + } + + // The top level map is not really needed for the client, convert to a list. + topics := eventbus.DebugTopics{} + for _, v := range graph { + topics.Topics = append(topics.Topics, v) + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(topics) +} + +func (h *Handler) serveDebugLog(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasLogTail { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } + if !h.PermitRead { + http.Error(w, "debug-log access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) + return + } + defer h.b.TryFlushLogs() // kick off upload after we're done logging + + type logRequestJSON struct { + Lines []string + Prefix string + } + + var logRequest logRequestJSON + if err := json.NewDecoder(r.Body).Decode(&logRequest); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + prefix := logRequest.Prefix + if prefix == "" { + prefix = "debug-log" + } + logf := logger.WithPrefix(h.logf, prefix+": ") + + // We can write logs too fast for logtail to handle, even when + // opting-out of rate limits. Limit ourselves to at most one message + // per 20ms and a burst of 60 log lines, which should be fast enough to + // not block for too long but slow enough that we can upload all lines. + logf = logger.SlowLoggerWithClock(r.Context(), logf, 20*time.Millisecond, 60, h.clock.Now) + + for _, line := range logRequest.Lines { + logf("%s", line) + } + + w.WriteHeader(http.StatusNoContent) +} diff --git a/ipn/localapi/debugderp.go b/ipn/localapi/debugderp.go index 017b906922835..3edbc0856c8a3 100644 --- a/ipn/localapi/debugderp.go +++ b/ipn/localapi/debugderp.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_debug + package localapi import ( diff --git a/ipn/localapi/localapi.go b/ipn/localapi/localapi.go index 2dc75c0d936b3..9e7c16891fc20 100644 --- a/ipn/localapi/localapi.go +++ b/ipn/localapi/localapi.go @@ -7,9 +7,6 @@ package localapi import ( "bytes" "cmp" - "context" - "crypto/sha256" - "encoding/hex" "encoding/json" "errors" "fmt" @@ -18,9 +15,6 @@ import ( "net/http" "net/netip" "net/url" - "os" - "path" - "reflect" "runtime" "slices" "strconv" @@ -30,9 +24,9 @@ import ( "golang.org/x/net/dns/dnsmessage" "tailscale.com/client/tailscale/apitype" - "tailscale.com/clientupdate" - "tailscale.com/drive" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health/healthmsg" "tailscale.com/hostinfo" "tailscale.com/ipn" @@ -40,24 +34,21 @@ import ( "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/ipnstate" "tailscale.com/logtail" - "tailscale.com/net/netmon" "tailscale.com/net/netutil" - "tailscale.com/net/portmapper" "tailscale.com/tailcfg" - "tailscale.com/tka" "tailscale.com/tstime" - "tailscale.com/types/dnstype" + "tailscale.com/types/appctype" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/types/ptr" - "tailscale.com/types/tkatype" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" "tailscale.com/util/httpm" "tailscale.com/util/mak" "tailscale.com/util/osdiag" "tailscale.com/util/rands" + "tailscale.com/util/syspolicy/pkey" "tailscale.com/version" "tailscale.com/wgengine/magicsock" ) @@ -76,79 +67,95 @@ type LocalAPIHandler func(*Handler, http.ResponseWriter, *http.Request) // then it's a prefix match. var handler = map[string]LocalAPIHandler{ // The prefix match handlers end with a slash: - "cert/": (*Handler).serveCert, "profiles/": (*Handler).serveProfiles, // The other /localapi/v0/NAME handlers are exact matches and contain only NAME // without a trailing slash: - "alpha-set-device-attrs": (*Handler).serveSetDeviceAttrs, // see tailscale/corp#24690 - "bugreport": (*Handler).serveBugReport, - "check-ip-forwarding": (*Handler).serveCheckIPForwarding, - "check-prefs": (*Handler).serveCheckPrefs, - "check-reverse-path-filtering": (*Handler).serveCheckReversePathFiltering, - "check-udp-gro-forwarding": (*Handler).serveCheckUDPGROForwarding, - "component-debug-logging": (*Handler).serveComponentDebugLogging, - "debug": (*Handler).serveDebug, - "debug-bus-events": (*Handler).serveDebugBusEvents, - "debug-bus-graph": (*Handler).serveEventBusGraph, - "debug-derp-region": (*Handler).serveDebugDERPRegion, - "debug-dial-types": (*Handler).serveDebugDialTypes, - "debug-log": (*Handler).serveDebugLog, - "debug-packet-filter-matches": (*Handler).serveDebugPacketFilterMatches, - "debug-packet-filter-rules": (*Handler).serveDebugPacketFilterRules, - "debug-peer-endpoint-changes": (*Handler).serveDebugPeerEndpointChanges, - "debug-portmap": (*Handler).serveDebugPortmap, - "derpmap": (*Handler).serveDERPMap, - "dev-set-state-store": (*Handler).serveDevSetStateStore, - "dial": (*Handler).serveDial, - "disconnect-control": (*Handler).disconnectControl, - "dns-osconfig": (*Handler).serveDNSOSConfig, - "dns-query": (*Handler).serveDNSQuery, - "drive/fileserver-address": (*Handler).serveDriveServerAddr, - "drive/shares": (*Handler).serveShares, - "goroutines": (*Handler).serveGoroutines, - "handle-push-message": (*Handler).serveHandlePushMessage, - "id-token": (*Handler).serveIDToken, - "login-interactive": (*Handler).serveLoginInteractive, - "logout": (*Handler).serveLogout, - "logtap": (*Handler).serveLogTap, - "metrics": (*Handler).serveMetrics, - "ping": (*Handler).servePing, - "pprof": (*Handler).servePprof, - "prefs": (*Handler).servePrefs, - "query-feature": (*Handler).serveQueryFeature, - "reload-config": (*Handler).reloadConfig, - "reset-auth": (*Handler).serveResetAuth, - "serve-config": (*Handler).serveServeConfig, - "set-dns": (*Handler).serveSetDNS, - "set-expiry-sooner": (*Handler).serveSetExpirySooner, - "set-gui-visible": (*Handler).serveSetGUIVisible, - "set-push-device-token": (*Handler).serveSetPushDeviceToken, - "set-udp-gro-forwarding": (*Handler).serveSetUDPGROForwarding, - "set-use-exit-node-enabled": (*Handler).serveSetUseExitNodeEnabled, - "start": (*Handler).serveStart, - "status": (*Handler).serveStatus, - "suggest-exit-node": (*Handler).serveSuggestExitNode, - "tka/affected-sigs": (*Handler).serveTKAAffectedSigs, - "tka/cosign-recovery-aum": (*Handler).serveTKACosignRecoveryAUM, - "tka/disable": (*Handler).serveTKADisable, - "tka/force-local-disable": (*Handler).serveTKALocalDisable, - "tka/generate-recovery-aum": (*Handler).serveTKAGenerateRecoveryAUM, - "tka/init": (*Handler).serveTKAInit, - "tka/log": (*Handler).serveTKALog, - "tka/modify": (*Handler).serveTKAModify, - "tka/sign": (*Handler).serveTKASign, - "tka/status": (*Handler).serveTKAStatus, - "tka/submit-recovery-aum": (*Handler).serveTKASubmitRecoveryAUM, - "tka/verify-deeplink": (*Handler).serveTKAVerifySigningDeeplink, - "tka/wrap-preauth-key": (*Handler).serveTKAWrapPreauthKey, - "update/check": (*Handler).serveUpdateCheck, - "update/install": (*Handler).serveUpdateInstall, - "update/progress": (*Handler).serveUpdateProgress, - "upload-client-metrics": (*Handler).serveUploadClientMetrics, - "usermetrics": (*Handler).serveUserMetrics, - "watch-ipn-bus": (*Handler).serveWatchIPNBus, - "whois": (*Handler).serveWhoIs, + "check-prefs": (*Handler).serveCheckPrefs, + "derpmap": (*Handler).serveDERPMap, + "goroutines": (*Handler).serveGoroutines, + "login-interactive": (*Handler).serveLoginInteractive, + "logout": (*Handler).serveLogout, + "ping": (*Handler).servePing, + "prefs": (*Handler).servePrefs, + "reload-config": (*Handler).reloadConfig, + "reset-auth": (*Handler).serveResetAuth, + "set-expiry-sooner": (*Handler).serveSetExpirySooner, + "shutdown": (*Handler).serveShutdown, + "start": (*Handler).serveStart, + "status": (*Handler).serveStatus, + "whois": (*Handler).serveWhoIs, +} + +func init() { + if buildfeatures.HasAppConnectors { + Register("appc-route-info", (*Handler).serveGetAppcRouteInfo) + } + if buildfeatures.HasAdvertiseRoutes { + Register("check-ip-forwarding", (*Handler).serveCheckIPForwarding) + Register("check-udp-gro-forwarding", (*Handler).serveCheckUDPGROForwarding) + Register("set-udp-gro-forwarding", (*Handler).serveSetUDPGROForwarding) + } + if buildfeatures.HasUseExitNode && runtime.GOOS == "linux" { + Register("check-reverse-path-filtering", (*Handler).serveCheckReversePathFiltering) + } + if buildfeatures.HasClientMetrics { + Register("upload-client-metrics", (*Handler).serveUploadClientMetrics) + } + if buildfeatures.HasClientUpdate { + Register("update/check", (*Handler).serveUpdateCheck) + } + if buildfeatures.HasUseExitNode { + Register("suggest-exit-node", (*Handler).serveSuggestExitNode) + Register("set-use-exit-node-enabled", (*Handler).serveSetUseExitNodeEnabled) + } + if buildfeatures.HasACME { + Register("set-dns", (*Handler).serveSetDNS) + } + if buildfeatures.HasDebug { + Register("bugreport", (*Handler).serveBugReport) + Register("pprof", (*Handler).servePprof) + } + if buildfeatures.HasDebug || buildfeatures.HasServe { + Register("watch-ipn-bus", (*Handler).serveWatchIPNBus) + } + if buildfeatures.HasDNS { + Register("dns-osconfig", (*Handler).serveDNSOSConfig) + Register("dns-query", (*Handler).serveDNSQuery) + } + if buildfeatures.HasUserMetrics { + Register("usermetrics", (*Handler).serveUserMetrics) + } + if buildfeatures.HasServe { + Register("query-feature", (*Handler).serveQueryFeature) + } + if buildfeatures.HasOutboundProxy || buildfeatures.HasSSH { + Register("dial", (*Handler).serveDial) + } + if buildfeatures.HasClientMetrics || buildfeatures.HasDebug { + Register("metrics", (*Handler).serveMetrics) + } + if buildfeatures.HasDebug || buildfeatures.HasAdvertiseRoutes { + Register("disconnect-control", (*Handler).disconnectControl) + } + // Alpha/experimental/debug features. These should be moved to + // their own features if/when they graduate. + if buildfeatures.HasDebug { + Register("id-token", (*Handler).serveIDToken) + Register("alpha-set-device-attrs", (*Handler).serveSetDeviceAttrs) // see tailscale/corp#24690 + Register("handle-push-message", (*Handler).serveHandlePushMessage) + Register("set-push-device-token", (*Handler).serveSetPushDeviceToken) + } + if buildfeatures.HasDebug || runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + Register("set-gui-visible", (*Handler).serveSetGUIVisible) + } + if buildfeatures.HasLogTail { + // TODO(bradfitz): separate out logtail tap functionality from upload + // functionality to make this possible? But seems unlikely people would + // want just this. They could "tail -f" or "journalctl -f" their logs + // themselves. + Register("logtap", (*Handler).serveLogTap) + } } // Register registers a new LocalAPI handler for the given name. @@ -417,7 +424,7 @@ func (h *Handler) serveBugReport(w http.ResponseWriter, r *http.Request) { // OS-specific details h.logf.JSON(1, "UserBugReportOS", osdiag.SupportInfo(osdiag.LogSupportInfoReasonBugReport)) - // Tailnet lock details + // Tailnet Lock details st := h.b.NetworkLockStatus() if st.Enabled { h.logf.JSON(1, "UserBugReportTailnetLockStatus", st) @@ -427,7 +434,9 @@ func (h *Handler) serveBugReport(w http.ResponseWriter, r *http.Request) { } if defBool(r.URL.Query().Get("diagnose"), false) { - h.b.Doctor(r.Context(), logger.WithPrefix(h.logf, "diag: ")) + if f, ok := ipnlocal.HookDoctor.GetOk(); ok { + f(r.Context(), h.b, logger.WithPrefix(h.logf, "diag: ")) + } } w.Header().Set("Content-Type", "text/plain") fmt.Fprintln(w, startMarker) @@ -652,512 +661,6 @@ func (h *Handler) serveUserMetrics(w http.ResponseWriter, r *http.Request) { h.b.UserMetricsRegistry().Handler(w, r) } -func (h *Handler) serveDebug(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "POST required", http.StatusMethodNotAllowed) - return - } - // The action is normally in a POST form parameter, but - // some actions (like "notify") want a full JSON body, so - // permit some to have their action in a header. - var action string - switch v := r.Header.Get("Debug-Action"); v { - case "notify": - action = v - default: - action = r.FormValue("action") - } - var err error - switch action { - case "derp-set-homeless": - h.b.MagicConn().SetHomeless(true) - case "derp-unset-homeless": - h.b.MagicConn().SetHomeless(false) - case "rebind": - err = h.b.DebugRebind() - case "restun": - err = h.b.DebugReSTUN() - case "notify": - var n ipn.Notify - err = json.NewDecoder(r.Body).Decode(&n) - if err != nil { - break - } - h.b.DebugNotify(n) - case "notify-last-netmap": - h.b.DebugNotifyLastNetMap() - case "break-tcp-conns": - err = h.b.DebugBreakTCPConns() - case "break-derp-conns": - err = h.b.DebugBreakDERPConns() - case "force-netmap-update": - h.b.DebugForceNetmapUpdate() - case "control-knobs": - k := h.b.ControlKnobs() - w.Header().Set("Content-Type", "application/json") - err = json.NewEncoder(w).Encode(k.AsDebugJSON()) - if err == nil { - return - } - case "pick-new-derp": - err = h.b.DebugPickNewDERP() - case "force-prefer-derp": - var n int - err = json.NewDecoder(r.Body).Decode(&n) - if err != nil { - break - } - h.b.DebugForcePreferDERP(n) - case "peer-relay-servers": - servers := h.b.DebugPeerRelayServers().Slice() - slices.SortFunc(servers, func(a, b netip.Addr) int { - return a.Compare(b) - }) - err = json.NewEncoder(w).Encode(servers) - if err == nil { - return - } - case "": - err = fmt.Errorf("missing parameter 'action'") - default: - err = fmt.Errorf("unknown action %q", action) - } - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - w.Header().Set("Content-Type", "text/plain") - io.WriteString(w, "done\n") -} - -func (h *Handler) serveDevSetStateStore(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "POST required", http.StatusMethodNotAllowed) - return - } - if err := h.b.SetDevStateStore(r.FormValue("key"), r.FormValue("value")); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "text/plain") - io.WriteString(w, "done\n") -} - -func (h *Handler) serveDebugPacketFilterRules(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - nm := h.b.NetMap() - if nm == nil { - http.Error(w, "no netmap", http.StatusNotFound) - return - } - w.Header().Set("Content-Type", "application/json") - - enc := json.NewEncoder(w) - enc.SetIndent("", "\t") - enc.Encode(nm.PacketFilterRules) -} - -func (h *Handler) serveDebugPacketFilterMatches(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - nm := h.b.NetMap() - if nm == nil { - http.Error(w, "no netmap", http.StatusNotFound) - return - } - w.Header().Set("Content-Type", "application/json") - - enc := json.NewEncoder(w) - enc.SetIndent("", "\t") - enc.Encode(nm.PacketFilter) -} - -func (h *Handler) serveDebugPortmap(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - w.Header().Set("Content-Type", "text/plain") - - dur, err := time.ParseDuration(r.FormValue("duration")) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - gwSelf := r.FormValue("gateway_and_self") - - // Update portmapper debug flags - debugKnobs := &portmapper.DebugKnobs{VerboseLogs: true} - switch r.FormValue("type") { - case "": - case "pmp": - debugKnobs.DisablePCP = true - debugKnobs.DisableUPnP = true - case "pcp": - debugKnobs.DisablePMP = true - debugKnobs.DisableUPnP = true - case "upnp": - debugKnobs.DisablePCP = true - debugKnobs.DisablePMP = true - default: - http.Error(w, "unknown portmap debug type", http.StatusBadRequest) - return - } - - if defBool(r.FormValue("log_http"), false) { - debugKnobs.LogHTTP = true - } - - var ( - logLock sync.Mutex - handlerDone bool - ) - logf := func(format string, args ...any) { - if !strings.HasSuffix(format, "\n") { - format = format + "\n" - } - - logLock.Lock() - defer logLock.Unlock() - - // The portmapper can call this log function after the HTTP - // handler returns, which is not allowed and can cause a panic. - // If this happens, ignore the log lines since this typically - // occurs due to a client disconnect. - if handlerDone { - return - } - - // Write and flush each line to the client so that output is streamed - fmt.Fprintf(w, format, args...) - if f, ok := w.(http.Flusher); ok { - f.Flush() - } - } - defer func() { - logLock.Lock() - handlerDone = true - logLock.Unlock() - }() - - ctx, cancel := context.WithTimeout(r.Context(), dur) - defer cancel() - - done := make(chan bool, 1) - - var c *portmapper.Client - c = portmapper.NewClient(portmapper.Config{ - Logf: logger.WithPrefix(logf, "portmapper: "), - NetMon: h.b.NetMon(), - DebugKnobs: debugKnobs, - ControlKnobs: h.b.ControlKnobs(), - EventBus: h.eventBus, - OnChange: func() { - logf("portmapping changed.") - logf("have mapping: %v", c.HaveMapping()) - - if ext, ok := c.GetCachedMappingOrStartCreatingOne(); ok { - logf("cb: mapping: %v", ext) - select { - case done <- true: - default: - } - return - } - logf("cb: no mapping") - }, - }) - defer c.Close() - - bus := eventbus.New() - defer bus.Close() - netMon, err := netmon.New(bus, logger.WithPrefix(logf, "monitor: ")) - if err != nil { - logf("error creating monitor: %v", err) - return - } - - gatewayAndSelfIP := func() (gw, self netip.Addr, ok bool) { - if a, b, ok := strings.Cut(gwSelf, "/"); ok { - gw = netip.MustParseAddr(a) - self = netip.MustParseAddr(b) - return gw, self, true - } - return netMon.GatewayAndSelfIP() - } - - c.SetGatewayLookupFunc(gatewayAndSelfIP) - - gw, selfIP, ok := gatewayAndSelfIP() - if !ok { - logf("no gateway or self IP; %v", netMon.InterfaceState()) - return - } - logf("gw=%v; self=%v", gw, selfIP) - - uc, err := net.ListenPacket("udp", "0.0.0.0:0") - if err != nil { - return - } - defer uc.Close() - c.SetLocalPort(uint16(uc.LocalAddr().(*net.UDPAddr).Port)) - - res, err := c.Probe(ctx) - if err != nil { - logf("error in Probe: %v", err) - return - } - logf("Probe: %+v", res) - - if !res.PCP && !res.PMP && !res.UPnP { - logf("no portmapping services available") - return - } - - if ext, ok := c.GetCachedMappingOrStartCreatingOne(); ok { - logf("mapping: %v", ext) - } else { - logf("no mapping") - } - - select { - case <-done: - case <-ctx.Done(): - if r.Context().Err() == nil { - logf("serveDebugPortmap: context done: %v", ctx.Err()) - } else { - h.logf("serveDebugPortmap: context done: %v", ctx.Err()) - } - } -} - -// EventError provides the JSON encoding of internal errors from event processing. -type EventError struct { - Error string -} - -// serveDebugBusEvents taps into the tailscaled/utils/eventbus and streams -// events to the client. -func (h *Handler) serveDebugBusEvents(w http.ResponseWriter, r *http.Request) { - // Require write access (~root) as the logs could contain something - // sensitive. - if !h.PermitWrite { - http.Error(w, "event bus access denied", http.StatusForbidden) - return - } - if r.Method != httpm.GET { - http.Error(w, "GET required", http.StatusMethodNotAllowed) - return - } - - bus, ok := h.LocalBackend().Sys().Bus.GetOK() - if !ok { - http.Error(w, "event bus not running", http.StatusNoContent) - return - } - - f, ok := w.(http.Flusher) - if !ok { - http.Error(w, "streaming unsupported", http.StatusInternalServerError) - return - } - - io.WriteString(w, `{"Event":"[event listener connected]\n"}`+"\n") - f.Flush() - - mon := bus.Debugger().WatchBus() - defer mon.Close() - - i := 0 - for { - select { - case <-r.Context().Done(): - fmt.Fprintf(w, `{"Event":"[event listener closed]\n"}`) - return - case <-mon.Done(): - return - case event := <-mon.Events(): - data := eventbus.DebugEvent{ - Count: i, - Type: reflect.TypeOf(event.Event).String(), - Event: event.Event, - From: event.From.Name(), - } - for _, client := range event.To { - data.To = append(data.To, client.Name()) - } - - if msg, err := json.Marshal(data); err != nil { - data.Event = EventError{Error: fmt.Sprintf( - "failed to marshal JSON for %T", event.Event, - )} - if errMsg, err := json.Marshal(data); err != nil { - fmt.Fprintf(w, - `{"Count": %d, "Event":"[ERROR] failed to marshal JSON for %T\n"}`, - i, event.Event) - } else { - w.Write(errMsg) - } - } else { - w.Write(msg) - } - f.Flush() - i++ - } - } -} - -// serveEventBusGraph taps into the event bus and dumps out the active graph of -// publishers and subscribers. It does not represent anything about the messages -// exchanged. -func (h *Handler) serveEventBusGraph(w http.ResponseWriter, r *http.Request) { - if r.Method != httpm.GET { - http.Error(w, "GET required", http.StatusMethodNotAllowed) - return - } - - bus, ok := h.LocalBackend().Sys().Bus.GetOK() - if !ok { - http.Error(w, "event bus not running", http.StatusPreconditionFailed) - return - } - - debugger := bus.Debugger() - clients := debugger.Clients() - - graph := map[string]eventbus.DebugTopic{} - - for _, client := range clients { - for _, pub := range debugger.PublishTypes(client) { - topic, ok := graph[pub.Name()] - if !ok { - topic = eventbus.DebugTopic{Name: pub.Name()} - } - topic.Publisher = client.Name() - graph[pub.Name()] = topic - } - for _, sub := range debugger.SubscribeTypes(client) { - topic, ok := graph[sub.Name()] - if !ok { - topic = eventbus.DebugTopic{Name: sub.Name()} - } - topic.Subscribers = append(topic.Subscribers, client.Name()) - graph[sub.Name()] = topic - } - } - - // The top level map is not really needed for the client, convert to a list. - topics := eventbus.DebugTopics{} - for _, v := range graph { - topics.Topics = append(topics.Topics, v) - } - - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(topics) -} - -func (h *Handler) serveComponentDebugLogging(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug access denied", http.StatusForbidden) - return - } - component := r.FormValue("component") - secs, _ := strconv.Atoi(r.FormValue("secs")) - err := h.b.SetComponentDebugLogging(component, h.clock.Now().Add(time.Duration(secs)*time.Second)) - var res struct { - Error string - } - if err != nil { - res.Error = err.Error() - } - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(res) -} - -func (h *Handler) serveDebugDialTypes(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "debug-dial-types access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) - return - } - - ip := r.FormValue("ip") - port := r.FormValue("port") - network := r.FormValue("network") - - addr := ip + ":" + port - if _, err := netip.ParseAddrPort(addr); err != nil { - w.WriteHeader(http.StatusBadRequest) - fmt.Fprintf(w, "invalid address %q: %v", addr, err) - return - } - - ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) - defer cancel() - - var bareDialer net.Dialer - - dialer := h.b.Dialer() - - var peerDialer net.Dialer - peerDialer.Control = dialer.PeerDialControlFunc() - - // Kick off a dial with each available dialer in parallel. - dialers := []struct { - name string - dial func(context.Context, string, string) (net.Conn, error) - }{ - {"SystemDial", dialer.SystemDial}, - {"UserDial", dialer.UserDial}, - {"PeerDial", peerDialer.DialContext}, - {"BareDial", bareDialer.DialContext}, - } - type result struct { - name string - conn net.Conn - err error - } - results := make(chan result, len(dialers)) - - var wg sync.WaitGroup - for _, dialer := range dialers { - dialer := dialer // loop capture - - wg.Add(1) - go func() { - defer wg.Done() - conn, err := dialer.dial(ctx, network, addr) - results <- result{dialer.name, conn, err} - }() - } - - wg.Wait() - for range len(dialers) { - res := <-results - fmt.Fprintf(w, "[%s] connected=%v err=%v\n", res.name, res.conn != nil, res.err) - if res.conn != nil { - res.conn.Close() - } - } -} - // servePprofFunc is the implementation of Handler.servePprof, after auth, // for platforms where we want to link it in. var servePprofFunc func(http.ResponseWriter, *http.Request) @@ -1178,7 +681,7 @@ func (h *Handler) servePprof(w http.ResponseWriter, r *http.Request) { // disconnectControl is the handler for local API /disconnect-control endpoint that shuts down control client, so that // node no longer communicates with control. Doing this makes control consider this node inactive. This can be used -// before shutting down a replica of HA subnet router or app connector deployments to ensure that control tells the +// before shutting down a replica of HA subnet router or app connector deployments to ensure that control tells the // peers to switch over to another replica whilst still maintaining th existing peer connections. func (h *Handler) disconnectControl(w http.ResponseWriter, r *http.Request) { if !h.PermitWrite { @@ -1229,89 +732,6 @@ func (h *Handler) serveResetAuth(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNoContent) } -func (h *Handler) serveServeConfig(w http.ResponseWriter, r *http.Request) { - switch r.Method { - case httpm.GET: - if !h.PermitRead { - http.Error(w, "serve config denied", http.StatusForbidden) - return - } - config := h.b.ServeConfig() - bts, err := json.Marshal(config) - if err != nil { - http.Error(w, "error encoding config: "+err.Error(), http.StatusInternalServerError) - return - } - sum := sha256.Sum256(bts) - etag := hex.EncodeToString(sum[:]) - w.Header().Set("Etag", etag) - w.Header().Set("Content-Type", "application/json") - w.Write(bts) - case httpm.POST: - if !h.PermitWrite { - http.Error(w, "serve config denied", http.StatusForbidden) - return - } - configIn := new(ipn.ServeConfig) - if err := json.NewDecoder(r.Body).Decode(configIn); err != nil { - WriteErrorJSON(w, fmt.Errorf("decoding config: %w", err)) - return - } - - // require a local admin when setting a path handler - // TODO: roll-up this Windows-specific check into either PermitWrite - // or a global admin escalation check. - if err := authorizeServeConfigForGOOSAndUserContext(runtime.GOOS, configIn, h); err != nil { - http.Error(w, err.Error(), http.StatusUnauthorized) - return - } - - etag := r.Header.Get("If-Match") - if err := h.b.SetServeConfig(configIn, etag); err != nil { - if errors.Is(err, ipnlocal.ErrETagMismatch) { - http.Error(w, err.Error(), http.StatusPreconditionFailed) - return - } - WriteErrorJSON(w, fmt.Errorf("updating config: %w", err)) - return - } - w.WriteHeader(http.StatusOK) - default: - http.Error(w, "method not allowed", http.StatusMethodNotAllowed) - } -} - -func authorizeServeConfigForGOOSAndUserContext(goos string, configIn *ipn.ServeConfig, h *Handler) error { - switch goos { - case "windows", "linux", "darwin", "illumos", "solaris": - default: - return nil - } - // Only check for local admin on tailscaled-on-mac (based on "sudo" - // permissions). On sandboxed variants (MacSys and AppStore), tailscaled - // cannot serve files outside of the sandbox and this check is not - // relevant. - if goos == "darwin" && version.IsSandboxedMacOS() { - return nil - } - if !configIn.HasPathHandler() { - return nil - } - if h.Actor.IsLocalAdmin(h.b.OperatorUserID()) { - return nil - } - switch goos { - case "windows": - return errors.New("must be a Windows local admin to serve a path") - case "linux", "darwin", "illumos", "solaris": - return errors.New("must be root, or be an operator and able to run 'sudo tailscale' to serve a path") - default: - // We filter goos at the start of the func, this default case - // should never happen. - panic("unreachable") - } -} - func (h *Handler) serveCheckIPForwarding(w http.ResponseWriter, r *http.Request) { if !h.PermitRead { http.Error(w, "IP forwarding check access denied", http.StatusForbidden) @@ -1373,6 +793,10 @@ func (h *Handler) serveCheckUDPGROForwarding(w http.ResponseWriter, r *http.Requ } func (h *Handler) serveSetUDPGROForwarding(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasGRO { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } if !h.PermitWrite { http.Error(w, "UDP GRO forwarding set access denied", http.StatusForbidden) return @@ -1406,34 +830,6 @@ func (h *Handler) serveStatus(w http.ResponseWriter, r *http.Request) { e.Encode(st) } -func (h *Handler) serveDebugPeerEndpointChanges(w http.ResponseWriter, r *http.Request) { - if !h.PermitRead { - http.Error(w, "status access denied", http.StatusForbidden) - return - } - - ipStr := r.FormValue("ip") - if ipStr == "" { - http.Error(w, "missing 'ip' parameter", http.StatusBadRequest) - return - } - ip, err := netip.ParseAddr(ipStr) - if err != nil { - http.Error(w, "invalid IP", http.StatusBadRequest) - return - } - w.Header().Set("Content-Type", "application/json") - chs, err := h.b.GetPeerEndpointChanges(r.Context(), ip) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - e := json.NewEncoder(w) - e.SetIndent("", "\t") - e.Encode(chs) -} - // InUseOtherUserIPNStream reports whether r is a request for the watch-ipn-bus // handler. If so, it writes an ipn.Notify InUseOtherUser message to the user // and returns true. Otherwise it returns false, in which case it doesn't write @@ -1574,11 +970,13 @@ func (h *Handler) servePrefs(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusBadRequest) return } - if err := h.b.MaybeClearAppConnector(mp); err != nil { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusInternalServerError) - json.NewEncoder(w).Encode(resJSON{Error: err.Error()}) - return + if buildfeatures.HasAppConnectors { + if err := h.b.MaybeClearAppConnector(mp); err != nil { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusInternalServerError) + json.NewEncoder(w).Encode(resJSON{Error: err.Error()}) + return + } } var err error prefs, err = h.b.EditPrefsAs(mp, h.Actor) @@ -1897,25 +1295,6 @@ func (h *Handler) serveUploadClientMetrics(w http.ResponseWriter, r *http.Reques json.NewEncoder(w).Encode(struct{}{}) } -func (h *Handler) serveTKAStatus(w http.ResponseWriter, r *http.Request) { - if !h.PermitRead { - http.Error(w, "lock status access denied", http.StatusForbidden) - return - } - if r.Method != httpm.GET { - http.Error(w, "use GET", http.StatusMethodNotAllowed) - return - } - - j, err := json.MarshalIndent(h.b.NetworkLockStatus(), "", "\t") - if err != nil { - http.Error(w, "JSON encoding error", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(j) -} - func (h *Handler) serveSetGUIVisible(w http.ResponseWriter, r *http.Request) { if r.Method != httpm.POST { http.Error(w, "use POST", http.StatusMethodNotAllowed) @@ -1938,6 +1317,10 @@ func (h *Handler) serveSetGUIVisible(w http.ResponseWriter, r *http.Request) { } func (h *Handler) serveSetUseExitNodeEnabled(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasUseExitNode { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } if r.Method != httpm.POST { http.Error(w, "use POST", http.StatusMethodNotAllowed) return @@ -1963,366 +1346,6 @@ func (h *Handler) serveSetUseExitNodeEnabled(w http.ResponseWriter, r *http.Requ e.Encode(prefs) } -func (h *Handler) serveTKASign(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "lock sign access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type signRequest struct { - NodeKey key.NodePublic - RotationPublic []byte - } - var req signRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - if err := h.b.NetworkLockSign(req.NodeKey, req.RotationPublic); err != nil { - http.Error(w, "signing failed: "+err.Error(), http.StatusInternalServerError) - return - } - - w.WriteHeader(http.StatusOK) -} - -func (h *Handler) serveTKAInit(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "lock init access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type initRequest struct { - Keys []tka.Key - DisablementValues [][]byte - SupportDisablement []byte - } - var req initRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - if !h.b.NetworkLockAllowed() { - http.Error(w, "Tailnet Lock is not supported on your pricing plan", http.StatusForbidden) - return - } - - if err := h.b.NetworkLockInit(req.Keys, req.DisablementValues, req.SupportDisablement); err != nil { - http.Error(w, "initialization failed: "+err.Error(), http.StatusInternalServerError) - return - } - - j, err := json.MarshalIndent(h.b.NetworkLockStatus(), "", "\t") - if err != nil { - http.Error(w, "JSON encoding error", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(j) -} - -func (h *Handler) serveTKAModify(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "network-lock modify access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type modifyRequest struct { - AddKeys []tka.Key - RemoveKeys []tka.Key - } - var req modifyRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - if err := h.b.NetworkLockModify(req.AddKeys, req.RemoveKeys); err != nil { - http.Error(w, "network-lock modify failed: "+err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(204) -} - -func (h *Handler) serveTKAWrapPreauthKey(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "network-lock modify access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type wrapRequest struct { - TSKey string - TKAKey string // key.NLPrivate.MarshalText - } - var req wrapRequest - if err := json.NewDecoder(http.MaxBytesReader(w, r.Body, 12*1024)).Decode(&req); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - var priv key.NLPrivate - if err := priv.UnmarshalText([]byte(req.TKAKey)); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - wrappedKey, err := h.b.NetworkLockWrapPreauthKey(req.TSKey, priv) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusOK) - w.Write([]byte(wrappedKey)) -} - -func (h *Handler) serveTKAVerifySigningDeeplink(w http.ResponseWriter, r *http.Request) { - if !h.PermitRead { - http.Error(w, "signing deeplink verification access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type verifyRequest struct { - URL string - } - var req verifyRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON for verifyRequest body", http.StatusBadRequest) - return - } - - res := h.b.NetworkLockVerifySigningDeeplink(req.URL) - j, err := json.MarshalIndent(res, "", "\t") - if err != nil { - http.Error(w, "JSON encoding error", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(j) -} - -func (h *Handler) serveTKADisable(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "network-lock modify access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - body := io.LimitReader(r.Body, 1024*1024) - secret, err := io.ReadAll(body) - if err != nil { - http.Error(w, "reading secret", http.StatusBadRequest) - return - } - - if err := h.b.NetworkLockDisable(secret); err != nil { - http.Error(w, "network-lock disable failed: "+err.Error(), http.StatusBadRequest) - return - } - w.WriteHeader(http.StatusOK) -} - -func (h *Handler) serveTKALocalDisable(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "network-lock modify access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - // Require a JSON stanza for the body as an additional CSRF protection. - var req struct{} - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - if err := h.b.NetworkLockForceLocalDisable(); err != nil { - http.Error(w, "network-lock local disable failed: "+err.Error(), http.StatusBadRequest) - return - } - w.WriteHeader(http.StatusOK) -} - -func (h *Handler) serveTKALog(w http.ResponseWriter, r *http.Request) { - if r.Method != httpm.GET { - http.Error(w, "use GET", http.StatusMethodNotAllowed) - return - } - - limit := 50 - if limitStr := r.FormValue("limit"); limitStr != "" { - l, err := strconv.Atoi(limitStr) - if err != nil { - http.Error(w, "parsing 'limit' parameter: "+err.Error(), http.StatusBadRequest) - return - } - limit = int(l) - } - - updates, err := h.b.NetworkLockLog(limit) - if err != nil { - http.Error(w, "reading log failed: "+err.Error(), http.StatusInternalServerError) - return - } - - j, err := json.MarshalIndent(updates, "", "\t") - if err != nil { - http.Error(w, "JSON encoding error", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(j) -} - -func (h *Handler) serveTKAAffectedSigs(w http.ResponseWriter, r *http.Request) { - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - keyID, err := io.ReadAll(http.MaxBytesReader(w, r.Body, 2048)) - if err != nil { - http.Error(w, "reading body", http.StatusBadRequest) - return - } - - sigs, err := h.b.NetworkLockAffectedSigs(keyID) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - j, err := json.MarshalIndent(sigs, "", "\t") - if err != nil { - http.Error(w, "JSON encoding error", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(j) -} - -func (h *Handler) serveTKAGenerateRecoveryAUM(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - type verifyRequest struct { - Keys []tkatype.KeyID - ForkFrom string - } - var req verifyRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON for verifyRequest body", http.StatusBadRequest) - return - } - - var forkFrom tka.AUMHash - if req.ForkFrom != "" { - if err := forkFrom.UnmarshalText([]byte(req.ForkFrom)); err != nil { - http.Error(w, "decoding fork-from: "+err.Error(), http.StatusBadRequest) - return - } - } - - res, err := h.b.NetworkLockGenerateRecoveryAUM(req.Keys, forkFrom) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/octet-stream") - w.Write(res.Serialize()) -} - -func (h *Handler) serveTKACosignRecoveryAUM(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - body := io.LimitReader(r.Body, 1024*1024) - aumBytes, err := io.ReadAll(body) - if err != nil { - http.Error(w, "reading AUM", http.StatusBadRequest) - return - } - var aum tka.AUM - if err := aum.Unserialize(aumBytes); err != nil { - http.Error(w, "decoding AUM", http.StatusBadRequest) - return - } - - res, err := h.b.NetworkLockCosignRecoveryAUM(&aum) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/octet-stream") - w.Write(res.Serialize()) -} - -func (h *Handler) serveTKASubmitRecoveryAUM(w http.ResponseWriter, r *http.Request) { - if !h.PermitWrite { - http.Error(w, "access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "use POST", http.StatusMethodNotAllowed) - return - } - - body := io.LimitReader(r.Body, 1024*1024) - aumBytes, err := io.ReadAll(body) - if err != nil { - http.Error(w, "reading AUM", http.StatusBadRequest) - return - } - var aum tka.AUM - if err := aum.Unserialize(aumBytes); err != nil { - http.Error(w, "decoding AUM", http.StatusBadRequest) - return - } - - if err := h.b.NetworkLockSubmitRecoveryAUM(&aum); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusOK) -} - // serveProfiles serves profile switching-related endpoints. Supported methods // and paths are: // - GET /profiles/: list all profiles (JSON-encoded array of ipn.LoginProfiles) @@ -2478,47 +1501,6 @@ func defBool(a string, def bool) bool { return v } -func (h *Handler) serveDebugLog(w http.ResponseWriter, r *http.Request) { - if !h.PermitRead { - http.Error(w, "debug-log access denied", http.StatusForbidden) - return - } - if r.Method != httpm.POST { - http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) - return - } - defer h.b.TryFlushLogs() // kick off upload after we're done logging - - type logRequestJSON struct { - Lines []string - Prefix string - } - - var logRequest logRequestJSON - if err := json.NewDecoder(r.Body).Decode(&logRequest); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - prefix := logRequest.Prefix - if prefix == "" { - prefix = "debug-log" - } - logf := logger.WithPrefix(h.logf, prefix+": ") - - // We can write logs too fast for logtail to handle, even when - // opting-out of rate limits. Limit ourselves to at most one message - // per 20ms and a burst of 60 log lines, which should be fast enough to - // not block for too long but slow enough that we can upload all lines. - logf = logger.SlowLoggerWithClock(r.Context(), logf, 20*time.Millisecond, 60, h.clock.Now) - - for _, line := range logRequest.Lines { - logf("%s", line) - } - - w.WriteHeader(http.StatusNoContent) -} - // serveUpdateCheck returns the ClientVersion from Status, which contains // information on whether an update is available, and if so, what version, // *if* we support auto-updates on this platform. If we don't, this endpoint @@ -2530,13 +1512,6 @@ func (h *Handler) serveUpdateCheck(w http.ResponseWriter, r *http.Request) { http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) return } - - if !clientupdate.CanAutoUpdate() { - // if we don't support auto-update, just say that we're up to date - json.NewEncoder(w).Encode(tailcfg.ClientVersion{RunningLatest: true}) - return - } - cv := h.b.StatusWithoutPeers().ClientVersion // ipnstate.Status documentation notes that ClientVersion may be nil on some // platforms where this information is unavailable. In that case, return a @@ -2549,40 +1524,13 @@ func (h *Handler) serveUpdateCheck(w http.ResponseWriter, r *http.Request) { json.NewEncoder(w).Encode(cv) } -// serveUpdateInstall sends a request to the LocalBackend to start a Tailscale -// self-update. A successful response does not indicate whether the update -// succeeded, only that the request was accepted. Clients should use -// serveUpdateProgress after pinging this endpoint to check how the update is -// going. -func (h *Handler) serveUpdateInstall(w http.ResponseWriter, r *http.Request) { - if r.Method != httpm.POST { - http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) - return - } - - w.WriteHeader(http.StatusAccepted) - - go h.b.DoSelfUpdate() -} - -// serveUpdateProgress returns the status of an in-progress Tailscale self-update. -// This is provided as a slice of ipnstate.UpdateProgress structs with various -// log messages in order from oldest to newest. If an update is not in progress, -// the returned slice will be empty. -func (h *Handler) serveUpdateProgress(w http.ResponseWriter, r *http.Request) { - if r.Method != httpm.GET { - http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) - return - } - - ups := h.b.GetSelfUpdateProgress() - - json.NewEncoder(w).Encode(ups) -} - // serveDNSOSConfig serves the current system DNS configuration as a JSON object, if // supported by the OS. func (h *Handler) serveDNSOSConfig(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDNS { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } if r.Method != httpm.GET { http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) return @@ -2626,6 +1574,10 @@ func (h *Handler) serveDNSOSConfig(w http.ResponseWriter, r *http.Request) { // // The response if successful is a DNSQueryResponse JSON object. func (h *Handler) serveDNSQuery(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDNS { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } if r.Method != httpm.GET { http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) return @@ -2640,7 +1592,7 @@ func (h *Handler) serveDNSQuery(w http.ResponseWriter, r *http.Request) { queryType := q.Get("type") qt := dnsmessage.TypeA if queryType != "" { - t, err := dnstype.DNSMessageTypeForString(queryType) + t, err := dnsMessageTypeForString(queryType) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return @@ -2661,134 +1613,114 @@ func (h *Handler) serveDNSQuery(w http.ResponseWriter, r *http.Request) { }) } -// serveDriveServerAddr handles updates of the Taildrive file server address. -func (h *Handler) serveDriveServerAddr(w http.ResponseWriter, r *http.Request) { - if r.Method != httpm.PUT { - http.Error(w, "only PUT allowed", http.StatusMethodNotAllowed) +// dnsMessageTypeForString returns the dnsmessage.Type for the given string. +// For example, DNSMessageTypeForString("A") returns dnsmessage.TypeA. +func dnsMessageTypeForString(s string) (t dnsmessage.Type, err error) { + s = strings.TrimSpace(strings.ToUpper(s)) + switch s { + case "AAAA": + return dnsmessage.TypeAAAA, nil + case "ALL": + return dnsmessage.TypeALL, nil + case "A": + return dnsmessage.TypeA, nil + case "CNAME": + return dnsmessage.TypeCNAME, nil + case "HINFO": + return dnsmessage.TypeHINFO, nil + case "MINFO": + return dnsmessage.TypeMINFO, nil + case "MX": + return dnsmessage.TypeMX, nil + case "NS": + return dnsmessage.TypeNS, nil + case "OPT": + return dnsmessage.TypeOPT, nil + case "PTR": + return dnsmessage.TypePTR, nil + case "SOA": + return dnsmessage.TypeSOA, nil + case "SRV": + return dnsmessage.TypeSRV, nil + case "TXT": + return dnsmessage.TypeTXT, nil + case "WKS": + return dnsmessage.TypeWKS, nil + } + return 0, errors.New("unknown DNS message type: " + s) +} + +// serveSuggestExitNode serves a POST endpoint for returning a suggested exit node. +func (h *Handler) serveSuggestExitNode(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasUseExitNode { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) return } - - b, err := io.ReadAll(r.Body) + if r.Method != httpm.GET { + http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) + return + } + res, err := h.b.SuggestExitNode() if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) + WriteErrorJSON(w, err) return } - - h.b.DriveSetServerAddr(string(b)) - w.WriteHeader(http.StatusCreated) + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(res) } -// serveShares handles the management of Taildrive shares. -// -// PUT - adds or updates an existing share -// DELETE - removes a share -// GET - gets a list of all shares, sorted by name -// POST - renames an existing share -func (h *Handler) serveShares(w http.ResponseWriter, r *http.Request) { - if !h.b.DriveSharingEnabled() { - http.Error(w, `taildrive sharing not enabled, please add the attribute "drive:share" to this node in your ACLs' "nodeAttrs" section`, http.StatusForbidden) +// Shutdown is an eventbus value published when tailscaled shutdown +// is requested via LocalAPI. Its only consumer is [ipnserver.Server]. +type Shutdown struct{} + +// serveShutdown shuts down tailscaled. It requires write access +// and the [pkey.AllowTailscaledRestart] policy to be enabled. +// See tailscale/corp#32674. +func (h *Handler) serveShutdown(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.POST { + http.Error(w, "only POST allowed", http.StatusMethodNotAllowed) return } - switch r.Method { - case httpm.PUT: - var share drive.Share - err := json.NewDecoder(r.Body).Decode(&share) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - share.Path = path.Clean(share.Path) - fi, err := os.Stat(share.Path) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - if !fi.IsDir() { - http.Error(w, "not a directory", http.StatusBadRequest) - return - } - if drive.AllowShareAs() { - // share as the connected user - username, err := h.Actor.Username() - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - share.As = username - } - err = h.b.DriveSetShare(&share) - if err != nil { - if errors.Is(err, drive.ErrInvalidShareName) { - http.Error(w, "invalid share name", http.StatusBadRequest) - return - } - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusCreated) - case httpm.DELETE: - b, err := io.ReadAll(r.Body) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - err = h.b.DriveRemoveShare(string(b)) - if err != nil { - if os.IsNotExist(err) { - http.Error(w, "share not found", http.StatusNotFound) - return - } - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusNoContent) - case httpm.POST: - var names [2]string - err := json.NewDecoder(r.Body).Decode(&names) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - err = h.b.DriveRenameShare(names[0], names[1]) - if err != nil { - if os.IsNotExist(err) { - http.Error(w, "share not found", http.StatusNotFound) - return - } - if os.IsExist(err) { - http.Error(w, "share name already used", http.StatusBadRequest) - return - } - if errors.Is(err, drive.ErrInvalidShareName) { - http.Error(w, "invalid share name", http.StatusBadRequest) - return - } - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusNoContent) - case httpm.GET: - shares := h.b.DriveGetShares() - err := json.NewEncoder(w).Encode(shares) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - default: - http.Error(w, "unsupported method", http.StatusMethodNotAllowed) + + if !h.PermitWrite { + http.Error(w, "shutdown access denied", http.StatusForbidden) + return } + + polc := h.b.Sys().PolicyClientOrDefault() + if permitShutdown, _ := polc.GetBoolean(pkey.AllowTailscaledRestart, false); !permitShutdown { + http.Error(w, "shutdown access denied by policy", http.StatusForbidden) + return + } + + ec := h.eventBus.Client("localapi.Handler") + defer ec.Close() + + w.WriteHeader(http.StatusOK) + if f, ok := w.(http.Flusher); ok { + f.Flush() + } + + eventbus.Publish[Shutdown](ec).Publish(Shutdown{}) } -// serveSuggestExitNode serves a POST endpoint for returning a suggested exit node. -func (h *Handler) serveSuggestExitNode(w http.ResponseWriter, r *http.Request) { +func (h *Handler) serveGetAppcRouteInfo(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasAppConnectors { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } if r.Method != httpm.GET { http.Error(w, "only GET allowed", http.StatusMethodNotAllowed) return } - res, err := h.b.SuggestExitNode() + res, err := h.b.ReadRouteInfo() if err != nil { - WriteErrorJSON(w, err) - return + if errors.Is(err, ipn.ErrStateNotExist) { + res = &appctype.RouteInfo{} + } else { + WriteErrorJSON(w, err) + return + } } w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(res) diff --git a/ipn/localapi/localapi_drive.go b/ipn/localapi/localapi_drive.go new file mode 100644 index 0000000000000..eb765ec2eabba --- /dev/null +++ b/ipn/localapi/localapi_drive.go @@ -0,0 +1,141 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_drive + +package localapi + +import ( + "encoding/json" + "errors" + "io" + "net/http" + "os" + "path" + + "tailscale.com/drive" + "tailscale.com/util/httpm" +) + +func init() { + Register("drive/fileserver-address", (*Handler).serveDriveServerAddr) + Register("drive/shares", (*Handler).serveShares) +} + +// serveDriveServerAddr handles updates of the Taildrive file server address. +func (h *Handler) serveDriveServerAddr(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.PUT { + http.Error(w, "only PUT allowed", http.StatusMethodNotAllowed) + return + } + + b, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + h.b.DriveSetServerAddr(string(b)) + w.WriteHeader(http.StatusCreated) +} + +// serveShares handles the management of Taildrive shares. +// +// PUT - adds or updates an existing share +// DELETE - removes a share +// GET - gets a list of all shares, sorted by name +// POST - renames an existing share +func (h *Handler) serveShares(w http.ResponseWriter, r *http.Request) { + if !h.b.DriveSharingEnabled() { + http.Error(w, `taildrive sharing not enabled, please add the attribute "drive:share" to this node in your ACLs' "nodeAttrs" section`, http.StatusForbidden) + return + } + switch r.Method { + case httpm.PUT: + var share drive.Share + err := json.NewDecoder(r.Body).Decode(&share) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + share.Path = path.Clean(share.Path) + fi, err := os.Stat(share.Path) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if !fi.IsDir() { + http.Error(w, "not a directory", http.StatusBadRequest) + return + } + if drive.AllowShareAs() { + // share as the connected user + username, err := h.Actor.Username() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + share.As = username + } + err = h.b.DriveSetShare(&share) + if err != nil { + if errors.Is(err, drive.ErrInvalidShareName) { + http.Error(w, "invalid share name", http.StatusBadRequest) + return + } + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusCreated) + case httpm.DELETE: + b, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + err = h.b.DriveRemoveShare(string(b)) + if err != nil { + if os.IsNotExist(err) { + http.Error(w, "share not found", http.StatusNotFound) + return + } + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusNoContent) + case httpm.POST: + var names [2]string + err := json.NewDecoder(r.Body).Decode(&names) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + err = h.b.DriveRenameShare(names[0], names[1]) + if err != nil { + if os.IsNotExist(err) { + http.Error(w, "share not found", http.StatusNotFound) + return + } + if os.IsExist(err) { + http.Error(w, "share name already used", http.StatusBadRequest) + return + } + if errors.Is(err, drive.ErrInvalidShareName) { + http.Error(w, "invalid share name", http.StatusBadRequest) + return + } + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusNoContent) + case httpm.GET: + shares := h.b.DriveGetShares() + err := json.NewEncoder(w).Encode(shares) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + default: + http.Error(w, "unsupported method", http.StatusMethodNotAllowed) + } +} diff --git a/ipn/localapi/localapi_test.go b/ipn/localapi/localapi_test.go index 970f798d05005..fa24717f7a942 100644 --- a/ipn/localapi/localapi_test.go +++ b/ipn/localapi/localapi_test.go @@ -35,6 +35,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/slicesx" "tailscale.com/wgengine" ) @@ -158,7 +159,6 @@ func TestWhoIsArgTypes(t *testing.T) { t.Fatalf("backend called with %v; want %v", k, keyStr) } return match() - }, peerCaps: map[netip.Addr]tailcfg.PeerCapMap{ netip.MustParseAddr("100.101.102.103"): map[tailcfg.PeerCapability][]tailcfg.RawMessage{ @@ -336,10 +336,10 @@ func TestServeWatchIPNBus(t *testing.T) { func newTestLocalBackend(t testing.TB) *ipnlocal.LocalBackend { var logf logger.Logf = logger.Discard - sys := tsd.NewSystem() + sys := tsd.NewSystemWithBus(eventbustest.NewBus(t)) store := new(mem.Store) sys.Set(store) - eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatalf("NewFakeUserspaceEngine: %v", err) } diff --git a/ipn/localapi/pprof.go b/ipn/localapi/pprof.go index 8c9429b31385a..9476f721fb1ce 100644 --- a/ipn/localapi/pprof.go +++ b/ipn/localapi/pprof.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios && !android && !js +//go:build !ios && !android && !js && !ts_omit_debug // We don't include it on mobile where we're more memory constrained and // there's no CLI to get at the results anyway. diff --git a/ipn/localapi/serve.go b/ipn/localapi/serve.go new file mode 100644 index 0000000000000..56c8b486cf93c --- /dev/null +++ b/ipn/localapi/serve.go @@ -0,0 +1,108 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_serve + +package localapi + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "net/http" + "runtime" + + "tailscale.com/ipn" + "tailscale.com/ipn/ipnlocal" + "tailscale.com/util/httpm" + "tailscale.com/version" +) + +func init() { + Register("serve-config", (*Handler).serveServeConfig) +} + +func (h *Handler) serveServeConfig(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case httpm.GET: + if !h.PermitRead { + http.Error(w, "serve config denied", http.StatusForbidden) + return + } + config := h.b.ServeConfig() + bts, err := json.Marshal(config) + if err != nil { + http.Error(w, "error encoding config: "+err.Error(), http.StatusInternalServerError) + return + } + sum := sha256.Sum256(bts) + etag := hex.EncodeToString(sum[:]) + w.Header().Set("Etag", etag) + w.Header().Set("Content-Type", "application/json") + w.Write(bts) + case httpm.POST: + if !h.PermitWrite { + http.Error(w, "serve config denied", http.StatusForbidden) + return + } + configIn := new(ipn.ServeConfig) + if err := json.NewDecoder(r.Body).Decode(configIn); err != nil { + WriteErrorJSON(w, fmt.Errorf("decoding config: %w", err)) + return + } + + // require a local admin when setting a path handler + // TODO: roll-up this Windows-specific check into either PermitWrite + // or a global admin escalation check. + if err := authorizeServeConfigForGOOSAndUserContext(runtime.GOOS, configIn, h); err != nil { + http.Error(w, err.Error(), http.StatusUnauthorized) + return + } + + etag := r.Header.Get("If-Match") + if err := h.b.SetServeConfig(configIn, etag); err != nil { + if errors.Is(err, ipnlocal.ErrETagMismatch) { + http.Error(w, err.Error(), http.StatusPreconditionFailed) + return + } + WriteErrorJSON(w, fmt.Errorf("updating config: %w", err)) + return + } + w.WriteHeader(http.StatusOK) + default: + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + } +} + +func authorizeServeConfigForGOOSAndUserContext(goos string, configIn *ipn.ServeConfig, h *Handler) error { + switch goos { + case "windows", "linux", "darwin", "illumos", "solaris": + default: + return nil + } + // Only check for local admin on tailscaled-on-mac (based on "sudo" + // permissions). On sandboxed variants (MacSys and AppStore), tailscaled + // cannot serve files outside of the sandbox and this check is not + // relevant. + if goos == "darwin" && version.IsSandboxedMacOS() { + return nil + } + if !configIn.HasPathHandler() { + return nil + } + if h.Actor.IsLocalAdmin(h.b.OperatorUserID()) { + return nil + } + switch goos { + case "windows": + return errors.New("must be a Windows local admin to serve a path") + case "linux", "darwin", "illumos", "solaris": + return errors.New("must be root, or be an operator and able to run 'sudo tailscale' to serve a path") + default: + // We filter goos at the start of the func, this default case + // should never happen. + panic("unreachable") + } +} diff --git a/ipn/localapi/syspolicy_api.go b/ipn/localapi/syspolicy_api.go index a438d352b52e1..edb82e042f2ce 100644 --- a/ipn/localapi/syspolicy_api.go +++ b/ipn/localapi/syspolicy_api.go @@ -17,7 +17,7 @@ import ( ) func init() { - handler["policy/"] = (*Handler).servePolicy + Register("policy/", (*Handler).servePolicy) } func (h *Handler) servePolicy(w http.ResponseWriter, r *http.Request) { diff --git a/ipn/localapi/tailnetlock.go b/ipn/localapi/tailnetlock.go new file mode 100644 index 0000000000000..4baadb7339871 --- /dev/null +++ b/ipn/localapi/tailnetlock.go @@ -0,0 +1,413 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_tailnetlock + +package localapi + +import ( + "encoding/json" + "io" + "net/http" + "strconv" + + "tailscale.com/tka" + "tailscale.com/types/key" + "tailscale.com/types/tkatype" + "tailscale.com/util/httpm" +) + +func init() { + Register("tka/affected-sigs", (*Handler).serveTKAAffectedSigs) + Register("tka/cosign-recovery-aum", (*Handler).serveTKACosignRecoveryAUM) + Register("tka/disable", (*Handler).serveTKADisable) + Register("tka/force-local-disable", (*Handler).serveTKALocalDisable) + Register("tka/generate-recovery-aum", (*Handler).serveTKAGenerateRecoveryAUM) + Register("tka/init", (*Handler).serveTKAInit) + Register("tka/log", (*Handler).serveTKALog) + Register("tka/modify", (*Handler).serveTKAModify) + Register("tka/sign", (*Handler).serveTKASign) + Register("tka/status", (*Handler).serveTKAStatus) + Register("tka/submit-recovery-aum", (*Handler).serveTKASubmitRecoveryAUM) + Register("tka/verify-deeplink", (*Handler).serveTKAVerifySigningDeeplink) + Register("tka/wrap-preauth-key", (*Handler).serveTKAWrapPreauthKey) +} + +func (h *Handler) serveTKAStatus(w http.ResponseWriter, r *http.Request) { + if !h.PermitRead { + http.Error(w, "lock status access denied", http.StatusForbidden) + return + } + if r.Method != httpm.GET { + http.Error(w, "use GET", http.StatusMethodNotAllowed) + return + } + + j, err := json.MarshalIndent(h.b.NetworkLockStatus(), "", "\t") + if err != nil { + http.Error(w, "JSON encoding error", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(j) +} + +func (h *Handler) serveTKASign(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "lock sign access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type signRequest struct { + NodeKey key.NodePublic + RotationPublic []byte + } + var req signRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + if err := h.b.NetworkLockSign(req.NodeKey, req.RotationPublic); err != nil { + http.Error(w, "signing failed: "+err.Error(), http.StatusInternalServerError) + return + } + + w.WriteHeader(http.StatusOK) +} + +func (h *Handler) serveTKAInit(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "lock init access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type initRequest struct { + Keys []tka.Key + DisablementValues [][]byte + SupportDisablement []byte + } + var req initRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + if !h.b.NetworkLockAllowed() { + http.Error(w, "Tailnet Lock is not supported on your pricing plan", http.StatusForbidden) + return + } + + if err := h.b.NetworkLockInit(req.Keys, req.DisablementValues, req.SupportDisablement); err != nil { + http.Error(w, "initialization failed: "+err.Error(), http.StatusInternalServerError) + return + } + + j, err := json.MarshalIndent(h.b.NetworkLockStatus(), "", "\t") + if err != nil { + http.Error(w, "JSON encoding error", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(j) +} + +func (h *Handler) serveTKAModify(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "network-lock modify access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type modifyRequest struct { + AddKeys []tka.Key + RemoveKeys []tka.Key + } + var req modifyRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + if err := h.b.NetworkLockModify(req.AddKeys, req.RemoveKeys); err != nil { + http.Error(w, "network-lock modify failed: "+err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(204) +} + +func (h *Handler) serveTKAWrapPreauthKey(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "network-lock modify access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type wrapRequest struct { + TSKey string + TKAKey string // key.NLPrivate.MarshalText + } + var req wrapRequest + if err := json.NewDecoder(http.MaxBytesReader(w, r.Body, 12*1024)).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + var priv key.NLPrivate + if err := priv.UnmarshalText([]byte(req.TKAKey)); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + wrappedKey, err := h.b.NetworkLockWrapPreauthKey(req.TSKey, priv) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusOK) + w.Write([]byte(wrappedKey)) +} + +func (h *Handler) serveTKAVerifySigningDeeplink(w http.ResponseWriter, r *http.Request) { + if !h.PermitRead { + http.Error(w, "signing deeplink verification access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type verifyRequest struct { + URL string + } + var req verifyRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON for verifyRequest body", http.StatusBadRequest) + return + } + + res := h.b.NetworkLockVerifySigningDeeplink(req.URL) + j, err := json.MarshalIndent(res, "", "\t") + if err != nil { + http.Error(w, "JSON encoding error", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(j) +} + +func (h *Handler) serveTKADisable(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "network-lock modify access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + body := io.LimitReader(r.Body, 1024*1024) + secret, err := io.ReadAll(body) + if err != nil { + http.Error(w, "reading secret", http.StatusBadRequest) + return + } + + if err := h.b.NetworkLockDisable(secret); err != nil { + http.Error(w, "network-lock disable failed: "+err.Error(), http.StatusBadRequest) + return + } + w.WriteHeader(http.StatusOK) +} + +func (h *Handler) serveTKALocalDisable(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "network-lock modify access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + // Require a JSON stanza for the body as an additional CSRF protection. + var req struct{} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + if err := h.b.NetworkLockForceLocalDisable(); err != nil { + http.Error(w, "network-lock local disable failed: "+err.Error(), http.StatusBadRequest) + return + } + w.WriteHeader(http.StatusOK) +} + +func (h *Handler) serveTKALog(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.GET { + http.Error(w, "use GET", http.StatusMethodNotAllowed) + return + } + + limit := 50 + if limitStr := r.FormValue("limit"); limitStr != "" { + l, err := strconv.Atoi(limitStr) + if err != nil { + http.Error(w, "parsing 'limit' parameter: "+err.Error(), http.StatusBadRequest) + return + } + limit = int(l) + } + + updates, err := h.b.NetworkLockLog(limit) + if err != nil { + http.Error(w, "reading log failed: "+err.Error(), http.StatusInternalServerError) + return + } + + j, err := json.MarshalIndent(updates, "", "\t") + if err != nil { + http.Error(w, "JSON encoding error", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(j) +} + +func (h *Handler) serveTKAAffectedSigs(w http.ResponseWriter, r *http.Request) { + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + keyID, err := io.ReadAll(http.MaxBytesReader(w, r.Body, 2048)) + if err != nil { + http.Error(w, "reading body", http.StatusBadRequest) + return + } + + sigs, err := h.b.NetworkLockAffectedSigs(keyID) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + j, err := json.MarshalIndent(sigs, "", "\t") + if err != nil { + http.Error(w, "JSON encoding error", http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(j) +} + +func (h *Handler) serveTKAGenerateRecoveryAUM(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + type verifyRequest struct { + Keys []tkatype.KeyID + ForkFrom string + } + var req verifyRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON for verifyRequest body", http.StatusBadRequest) + return + } + + var forkFrom tka.AUMHash + if req.ForkFrom != "" { + if err := forkFrom.UnmarshalText([]byte(req.ForkFrom)); err != nil { + http.Error(w, "decoding fork-from: "+err.Error(), http.StatusBadRequest) + return + } + } + + res, err := h.b.NetworkLockGenerateRecoveryAUM(req.Keys, forkFrom) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/octet-stream") + w.Write(res.Serialize()) +} + +func (h *Handler) serveTKACosignRecoveryAUM(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + body := io.LimitReader(r.Body, 1024*1024) + aumBytes, err := io.ReadAll(body) + if err != nil { + http.Error(w, "reading AUM", http.StatusBadRequest) + return + } + var aum tka.AUM + if err := aum.Unserialize(aumBytes); err != nil { + http.Error(w, "decoding AUM", http.StatusBadRequest) + return + } + + res, err := h.b.NetworkLockCosignRecoveryAUM(&aum) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/octet-stream") + w.Write(res.Serialize()) +} + +func (h *Handler) serveTKASubmitRecoveryAUM(w http.ResponseWriter, r *http.Request) { + if !h.PermitWrite { + http.Error(w, "access denied", http.StatusForbidden) + return + } + if r.Method != httpm.POST { + http.Error(w, "use POST", http.StatusMethodNotAllowed) + return + } + + body := io.LimitReader(r.Body, 1024*1024) + aumBytes, err := io.ReadAll(body) + if err != nil { + http.Error(w, "reading AUM", http.StatusBadRequest) + return + } + var aum tka.AUM + if err := aum.Unserialize(aumBytes); err != nil { + http.Error(w, "decoding AUM", http.StatusBadRequest) + return + } + + if err := h.b.NetworkLockSubmitRecoveryAUM(&aum); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusOK) +} diff --git a/ipn/prefs.go b/ipn/prefs.go index 1efb5d0feabd9..81dd1c1c3dc49 100644 --- a/ipn/prefs.go +++ b/ipn/prefs.go @@ -20,6 +20,7 @@ import ( "tailscale.com/atomicfile" "tailscale.com/drive" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn/ipnstate" "tailscale.com/net/netaddr" "tailscale.com/net/tsaddr" @@ -264,6 +265,8 @@ type Prefs struct { // NetfilterKind specifies what netfilter implementation to use. // + // It can be "iptables", "nftables", or "" to auto-detect. + // // Linux-only. NetfilterKind string @@ -529,12 +532,16 @@ func (p *Prefs) Pretty() string { return p.pretty(runtime.GOOS) } func (p *Prefs) pretty(goos string) string { var sb strings.Builder sb.WriteString("Prefs{") - fmt.Fprintf(&sb, "ra=%v ", p.RouteAll) - fmt.Fprintf(&sb, "dns=%v want=%v ", p.CorpDNS, p.WantRunning) - if p.RunSSH { + if buildfeatures.HasUseRoutes { + fmt.Fprintf(&sb, "ra=%v ", p.RouteAll) + } + if buildfeatures.HasDNS { + fmt.Fprintf(&sb, "dns=%v want=%v ", p.CorpDNS, p.WantRunning) + } + if buildfeatures.HasSSH && p.RunSSH { sb.WriteString("ssh=true ") } - if p.RunWebClient { + if buildfeatures.HasWebClient && p.RunWebClient { sb.WriteString("webclient=true ") } if p.LoggedOut { @@ -549,26 +556,30 @@ func (p *Prefs) pretty(goos string) string { if p.ShieldsUp { sb.WriteString("shields=true ") } - if p.ExitNodeIP.IsValid() { - fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeIP, p.ExitNodeAllowLANAccess) - } else if !p.ExitNodeID.IsZero() { - fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeID, p.ExitNodeAllowLANAccess) - } - if p.AutoExitNode.IsSet() { - fmt.Fprintf(&sb, "auto=%v ", p.AutoExitNode) - } - if len(p.AdvertiseRoutes) > 0 || goos == "linux" { - fmt.Fprintf(&sb, "routes=%v ", p.AdvertiseRoutes) - } - if len(p.AdvertiseRoutes) > 0 || p.NoSNAT { - fmt.Fprintf(&sb, "snat=%v ", !p.NoSNAT) + if buildfeatures.HasUseExitNode { + if p.ExitNodeIP.IsValid() { + fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeIP, p.ExitNodeAllowLANAccess) + } else if !p.ExitNodeID.IsZero() { + fmt.Fprintf(&sb, "exit=%v lan=%t ", p.ExitNodeID, p.ExitNodeAllowLANAccess) + } + if p.AutoExitNode.IsSet() { + fmt.Fprintf(&sb, "auto=%v ", p.AutoExitNode) + } } - if len(p.AdvertiseRoutes) > 0 || p.NoStatefulFiltering.EqualBool(true) { - // Only print if we're advertising any routes, or the user has - // turned off stateful filtering (NoStatefulFiltering=true ⇒ - // StatefulFiltering=false). - bb, _ := p.NoStatefulFiltering.Get() - fmt.Fprintf(&sb, "statefulFiltering=%v ", !bb) + if buildfeatures.HasAdvertiseRoutes { + if len(p.AdvertiseRoutes) > 0 || goos == "linux" { + fmt.Fprintf(&sb, "routes=%v ", p.AdvertiseRoutes) + } + if len(p.AdvertiseRoutes) > 0 || p.NoSNAT { + fmt.Fprintf(&sb, "snat=%v ", !p.NoSNAT) + } + if len(p.AdvertiseRoutes) > 0 || p.NoStatefulFiltering.EqualBool(true) { + // Only print if we're advertising any routes, or the user has + // turned off stateful filtering (NoStatefulFiltering=true ⇒ + // StatefulFiltering=false). + bb, _ := p.NoStatefulFiltering.Get() + fmt.Fprintf(&sb, "statefulFiltering=%v ", !bb) + } } if len(p.AdvertiseTags) > 0 { fmt.Fprintf(&sb, "tags=%s ", strings.Join(p.AdvertiseTags, ",")) @@ -591,9 +602,13 @@ func (p *Prefs) pretty(goos string) string { if p.NetfilterKind != "" { fmt.Fprintf(&sb, "netfilterKind=%s ", p.NetfilterKind) } - sb.WriteString(p.AutoUpdate.Pretty()) - sb.WriteString(p.AppConnector.Pretty()) - if p.RelayServerPort != nil { + if buildfeatures.HasClientUpdate { + sb.WriteString(p.AutoUpdate.Pretty()) + } + if buildfeatures.HasAppConnectors { + sb.WriteString(p.AppConnector.Pretty()) + } + if buildfeatures.HasRelayServer && p.RelayServerPort != nil { fmt.Fprintf(&sb, "relayServerPort=%d ", *p.RelayServerPort) } if p.Persist != nil { @@ -694,6 +709,7 @@ func NewPrefs() *Prefs { // Provide default values for options which might be missing // from the json data for any reason. The json can still // override them to false. + p := &Prefs{ // ControlURL is explicitly not set to signal that // it's not yet configured, which relaxes the CLI "up" @@ -785,6 +801,9 @@ func (p *Prefs) AdvertisesExitNode() bool { // SetAdvertiseExitNode mutates p (if non-nil) to add or remove the two // /0 exit node routes. func (p *Prefs) SetAdvertiseExitNode(runExit bool) { + if !buildfeatures.HasAdvertiseExitNode { + return + } if p == nil { return } diff --git a/ipn/prefs_test.go b/ipn/prefs_test.go index 3339a631ce827..2336164096c14 100644 --- a/ipn/prefs_test.go +++ b/ipn/prefs_test.go @@ -501,7 +501,7 @@ func TestPrefsPretty(t *testing.T) { }, }, "linux", - `Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist{o=, n=[B1VKl] u=""}}`, + `Prefs{ra=false dns=false want=false routes=[] nf=off update=off Persist{o=, n=[B1VKl] u="" ak=-}}`, }, { Prefs{ diff --git a/ipn/store/awsstore/store_aws.go b/ipn/store/awsstore/store_aws.go index 40bbbf0370822..78b72d0bc8f45 100644 --- a/ipn/store/awsstore/store_aws.go +++ b/ipn/store/awsstore/store_aws.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux && !ts_omit_aws +//go:build !ts_omit_aws // Package awsstore contains an ipn.StateStore implementation using AWS SSM. package awsstore @@ -20,10 +20,21 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ssm" ssmTypes "github.com/aws/aws-sdk-go-v2/service/ssm/types" "tailscale.com/ipn" + "tailscale.com/ipn/store" "tailscale.com/ipn/store/mem" "tailscale.com/types/logger" ) +func init() { + store.Register("arn:", func(logf logger.Logf, arg string) (ipn.StateStore, error) { + ssmARN, opts, err := ParseARNAndOpts(arg) + if err != nil { + return nil, err + } + return New(logf, ssmARN, opts...) + }) +} + const ( parameterNameRxStr = `^parameter(/.*)` ) diff --git a/ipn/store/awsstore/store_aws_test.go b/ipn/store/awsstore/store_aws_test.go index 3382635a7d333..3cc23e48d4b12 100644 --- a/ipn/store/awsstore/store_aws_test.go +++ b/ipn/store/awsstore/store_aws_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux && !ts_omit_aws +//go:build !ts_omit_aws package awsstore diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index 5b25471c75638..f48237c057142 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -16,6 +16,7 @@ import ( "tailscale.com/envknob" "tailscale.com/ipn" + "tailscale.com/ipn/store" "tailscale.com/ipn/store/mem" "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" @@ -25,6 +26,13 @@ import ( "tailscale.com/util/mak" ) +func init() { + store.Register("kube:", func(logf logger.Logf, path string) (ipn.StateStore, error) { + secretName := strings.TrimPrefix(path, "kube:") + return New(logf, secretName) + }) +} + const ( // timeout is the timeout for a single state update that includes calls to the API server to write or read a // state Secret and emit an Event. diff --git a/ipn/store/store_aws.go b/ipn/store/store_aws.go deleted file mode 100644 index 834b657d34df0..0000000000000 --- a/ipn/store/store_aws.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build (ts_aws || (linux && (arm64 || amd64) && !android)) && !ts_omit_aws - -package store - -import ( - "tailscale.com/ipn" - "tailscale.com/ipn/store/awsstore" - "tailscale.com/types/logger" -) - -func init() { - Register("arn:", func(logf logger.Logf, arg string) (ipn.StateStore, error) { - ssmARN, opts, err := awsstore.ParseARNAndOpts(arg) - if err != nil { - return nil, err - } - return awsstore.New(logf, ssmARN, opts...) - }) -} diff --git a/ipn/store/store_kube.go b/ipn/store/store_kube.go deleted file mode 100644 index 7eac75c196990..0000000000000 --- a/ipn/store/store_kube.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build (ts_kube || (linux && (arm64 || amd64) && !android)) && !ts_omit_kube - -package store - -import ( - "strings" - - "tailscale.com/ipn" - "tailscale.com/ipn/store/kubestore" - "tailscale.com/types/logger" -) - -func init() { - Register("kube:", func(logf logger.Logf, path string) (ipn.StateStore, error) { - secretName := strings.TrimPrefix(path, "kube:") - return kubestore.New(logf, secretName) - }) -} diff --git a/k8s-operator/api-proxy/proxy.go b/k8s-operator/api-proxy/proxy.go index a0f2f930b8067..762a52f1fdbfc 100644 --- a/k8s-operator/api-proxy/proxy.go +++ b/k8s-operator/api-proxy/proxy.go @@ -6,10 +6,13 @@ package apiproxy import ( + "bytes" "context" "crypto/tls" + "encoding/json" "errors" "fmt" + "io" "net" "net/http" "net/http/httputil" @@ -19,13 +22,17 @@ import ( "time" "go.uber.org/zap" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/client-go/rest" "k8s.io/client-go/transport" "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" - "tailscale.com/k8s-operator/sessionrecording" + "tailscale.com/envknob" ksr "tailscale.com/k8s-operator/sessionrecording" "tailscale.com/kube/kubetypes" + "tailscale.com/net/netx" + "tailscale.com/sessionrecording" "tailscale.com/tailcfg" "tailscale.com/tsnet" "tailscale.com/util/clientmetric" @@ -83,12 +90,14 @@ func NewAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, ts *tsn } ap := &APIServerProxy{ - log: zlog, - lc: lc, - authMode: mode == kubetypes.APIServerProxyModeAuth, - https: https, - upstreamURL: u, - ts: ts, + log: zlog, + lc: lc, + authMode: mode == kubetypes.APIServerProxyModeAuth, + https: https, + upstreamURL: u, + ts: ts, + sendEventFunc: sessionrecording.SendEvent, + eventsEnabled: envknob.Bool("TS_EXPERIMENTAL_KUBE_API_EVENTS"), } ap.rp = &httputil.ReverseProxy{ Rewrite: func(pr *httputil.ProxyRequest) { @@ -183,6 +192,11 @@ type APIServerProxy struct { ts *tsnet.Server hs *http.Server upstreamURL *url.URL + + sendEventFunc func(ap netip.AddrPort, event io.Reader, dial netx.DialFunc) error + + // Flag used to enable sending API requests as events to tsrecorder. + eventsEnabled bool } // serveDefault is the default handler for Kubernetes API server requests. @@ -192,7 +206,16 @@ func (ap *APIServerProxy) serveDefault(w http.ResponseWriter, r *http.Request) { ap.authError(w, err) return } + + if err = ap.recordRequestAsEvent(r, who); err != nil { + msg := fmt.Sprintf("error recording Kubernetes API request: %v", err) + ap.log.Errorf(msg) + http.Error(w, msg, http.StatusBadGateway) + return + } + counterNumRequestsProxied.Add(1) + ap.rp.ServeHTTP(w, r.WithContext(whoIsKey.WithValue(r.Context(), who))) } @@ -220,7 +243,7 @@ func (ap *APIServerProxy) serveAttachWS(w http.ResponseWriter, r *http.Request) ap.sessionForProto(w, r, ksr.AttachSessionType, ksr.WSProtocol) } -func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request, sessionType sessionrecording.SessionType, proto ksr.Protocol) { +func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request, sessionType ksr.SessionType, proto ksr.Protocol) { const ( podNameKey = "pod" namespaceNameKey = "namespace" @@ -232,6 +255,14 @@ func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request ap.authError(w, err) return } + + if err = ap.recordRequestAsEvent(r, who); err != nil { + msg := fmt.Sprintf("error recording Kubernetes API request: %v", err) + ap.log.Errorf(msg) + http.Error(w, msg, http.StatusBadGateway) + return + } + counterNumRequestsProxied.Add(1) failOpen, addrs, err := determineRecorderConfig(who) if err != nil { @@ -283,6 +314,111 @@ func (ap *APIServerProxy) sessionForProto(w http.ResponseWriter, r *http.Request ap.rp.ServeHTTP(h, r.WithContext(whoIsKey.WithValue(r.Context(), who))) } +func (ap *APIServerProxy) recordRequestAsEvent(req *http.Request, who *apitype.WhoIsResponse) error { + if !ap.eventsEnabled { + return nil + } + + failOpen, addrs, err := determineRecorderConfig(who) + if err != nil { + return fmt.Errorf("error trying to determine whether the kubernetes api request needs to be recorded: %w", err) + } + if len(addrs) == 0 { + if failOpen { + return nil + } else { + return fmt.Errorf("forbidden: kubernetes api request must be recorded, but no recorders are available") + } + } + + factory := &request.RequestInfoFactory{ + APIPrefixes: sets.NewString("api", "apis"), + GrouplessAPIPrefixes: sets.NewString("api"), + } + + reqInfo, err := factory.NewRequestInfo(req) + if err != nil { + return fmt.Errorf("error parsing request %s %s: %w", req.Method, req.URL.Path, err) + } + + kubeReqInfo := sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: reqInfo.IsResourceRequest, + Path: reqInfo.Path, + Verb: reqInfo.Verb, + APIPrefix: reqInfo.APIPrefix, + APIGroup: reqInfo.APIGroup, + APIVersion: reqInfo.APIVersion, + Namespace: reqInfo.Namespace, + Resource: reqInfo.Resource, + Subresource: reqInfo.Subresource, + Name: reqInfo.Name, + Parts: reqInfo.Parts, + FieldSelector: reqInfo.FieldSelector, + LabelSelector: reqInfo.LabelSelector, + } + event := &sessionrecording.Event{ + Timestamp: time.Now().Unix(), + Kubernetes: kubeReqInfo, + Type: sessionrecording.KubernetesAPIEventType, + UserAgent: req.UserAgent(), + Request: sessionrecording.Request{ + Method: req.Method, + Path: req.URL.String(), + QueryParameters: req.URL.Query(), + }, + Source: sessionrecording.Source{ + NodeID: who.Node.StableID, + Node: strings.TrimSuffix(who.Node.Name, "."), + }, + } + + if !who.Node.IsTagged() { + event.Source.NodeUser = who.UserProfile.LoginName + event.Source.NodeUserID = who.UserProfile.ID + } else { + event.Source.NodeTags = who.Node.Tags + } + + bodyBytes, err := io.ReadAll(req.Body) + if err != nil { + return fmt.Errorf("failed to read body: %w", err) + } + req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) + event.Request.Body = bodyBytes + + var errs []error + // TODO: ChaosInTheCRD ensure that if there are multiple addrs timing out we don't experience slowdown on client waiting for response. + fail := true + for _, addr := range addrs { + data := new(bytes.Buffer) + if err := json.NewEncoder(data).Encode(event); err != nil { + return fmt.Errorf("error marshaling request event: %w", err) + } + + if err := ap.sendEventFunc(addr, data, ap.ts.Dial); err != nil { + if apiSupportErr, ok := err.(sessionrecording.EventAPINotSupportedErr); ok { + ap.log.Warnf(apiSupportErr.Error()) + fail = false + } else { + err := fmt.Errorf("error sending event to recorder with address %q: %v", addr.String(), err) + errs = append(errs, err) + } + } else { + return nil + } + } + + merr := errors.Join(errs...) + if fail && failOpen { + msg := fmt.Sprintf("[unexpected] failed to send event to recorders with errors: %s", merr.Error()) + msg = msg + "; failure mode is 'fail open'; continuing request without recording." + ap.log.Warn(msg) + return nil + } + + return merr +} + func (ap *APIServerProxy) addImpersonationHeadersAsRequired(r *http.Request) { r.URL.Scheme = ap.upstreamURL.Scheme r.URL.Host = ap.upstreamURL.Host diff --git a/k8s-operator/api-proxy/proxy_events_test.go b/k8s-operator/api-proxy/proxy_events_test.go new file mode 100644 index 0000000000000..8bcf484368a35 --- /dev/null +++ b/k8s-operator/api-proxy/proxy_events_test.go @@ -0,0 +1,549 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package apiproxy + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "net/http" + "net/http/httptest" + "net/netip" + "net/url" + "reflect" + "testing" + + "go.uber.org/zap" + "tailscale.com/client/tailscale/apitype" + "tailscale.com/net/netx" + "tailscale.com/sessionrecording" + "tailscale.com/tailcfg" + "tailscale.com/tsnet" +) + +type fakeSender struct { + sent map[netip.AddrPort][]byte + err error + calls int +} + +func (s *fakeSender) Send(ap netip.AddrPort, event io.Reader, dial netx.DialFunc) error { + s.calls++ + if s.err != nil { + return s.err + } + if s.sent == nil { + s.sent = make(map[netip.AddrPort][]byte) + } + data, _ := io.ReadAll(event) + s.sent[ap] = data + return nil +} + +func (s *fakeSender) Reset() { + s.sent = nil + s.err = nil + s.calls = 0 +} + +func TestRecordRequestAsEvent(t *testing.T) { + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + + sender := &fakeSender{} + ap := &APIServerProxy{ + log: zl.Sugar(), + ts: &tsnet.Server{}, + sendEventFunc: sender.Send, + eventsEnabled: true, + } + + defaultWho := &apitype.WhoIsResponse{ + Node: &tailcfg.Node{ + StableID: "stable-id", + Name: "node.ts.net.", + }, + UserProfile: &tailcfg.UserProfile{ + ID: 1, + LoginName: "user@example.com", + }, + CapMap: tailcfg.PeerCapMap{ + tailcfg.PeerCapabilityKubernetes: []tailcfg.RawMessage{ + tailcfg.RawMessage(`{"recorderAddrs":["127.0.0.1:1234"]}`), + tailcfg.RawMessage(`{"enforceRecorder": true}`), + }, + }, + } + + defaultSource := sessionrecording.Source{ + Node: "node.ts.net", + NodeID: "stable-id", + NodeUser: "user@example.com", + NodeUserID: 1, + } + + tests := []struct { + name string + req func() *http.Request + who *apitype.WhoIsResponse + setupSender func() + wantErr bool + wantEvent *sessionrecording.Event + wantNumCalls int + }{ + { + name: "request-with-dot-in-name", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/namespaces/default/pods/foo.bar", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/namespaces/default/pods/foo.bar", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/namespaces/default/pods/foo.bar", + Verb: "get", + APIPrefix: "api", + APIVersion: "v1", + Namespace: "default", + Resource: "pods", + Name: "foo.bar", + Parts: []string{"pods", "foo.bar"}, + }, + Source: defaultSource, + }, + }, + { + name: "request-with-dash-in-name", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/namespaces/default/pods/foo-bar", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/namespaces/default/pods/foo-bar", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/namespaces/default/pods/foo-bar", + Verb: "get", + APIPrefix: "api", + APIVersion: "v1", + Namespace: "default", + Resource: "pods", + Name: "foo-bar", + Parts: []string{"pods", "foo-bar"}, + }, + Source: defaultSource, + }, + }, + { + name: "request-with-query-parameter", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods?watch=true", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/pods?watch=true", + Body: nil, + QueryParameters: url.Values{"watch": []string{"true"}}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "watch", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + }, + Source: defaultSource, + }, + }, + { + name: "request-with-label-selector", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods?labelSelector=app%3Dfoo", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/pods?labelSelector=app%3Dfoo", + Body: nil, + QueryParameters: url.Values{"labelSelector": []string{"app=foo"}}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "list", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + LabelSelector: "app=foo", + }, + Source: defaultSource, + }, + }, + { + name: "request-with-field-selector", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods?fieldSelector=status.phase%3DRunning", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/pods?fieldSelector=status.phase%3DRunning", + Body: nil, + QueryParameters: url.Values{"fieldSelector": []string{"status.phase=Running"}}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "list", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + FieldSelector: "status.phase=Running", + }, + Source: defaultSource, + }, + }, + { + name: "request-for-non-existent-resource", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/foo", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/foo", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/foo", + Verb: "list", + APIPrefix: "api", + APIVersion: "v1", + Resource: "foo", + Parts: []string{"foo"}, + }, + Source: defaultSource, + }, + }, + { + name: "basic-request", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/pods", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "list", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + }, + Source: defaultSource, + }, + }, + { + name: "multiple-recorders", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods", nil) + }, + who: &apitype.WhoIsResponse{ + Node: defaultWho.Node, + UserProfile: defaultWho.UserProfile, + CapMap: tailcfg.PeerCapMap{ + tailcfg.PeerCapabilityKubernetes: []tailcfg.RawMessage{ + tailcfg.RawMessage(`{"recorderAddrs":["127.0.0.1:1234", "127.0.0.1:5678"]}`), + }, + }, + }, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + }, + { + name: "request-with-body", + req: func() *http.Request { + req := httptest.NewRequest("POST", "/api/v1/pods", bytes.NewBufferString(`{"foo":"bar"}`)) + req.Header.Set("Content-Type", "application/json") + return req + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "POST", + Path: "/api/v1/pods", + Body: json.RawMessage(`{"foo":"bar"}`), + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "create", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + }, + Source: defaultSource, + }, + }, + { + name: "tagged-node", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods", nil) + }, + who: &apitype.WhoIsResponse{ + Node: &tailcfg.Node{ + StableID: "stable-id", + Name: "node.ts.net.", + Tags: []string{"tag:foo"}, + }, + UserProfile: &tailcfg.UserProfile{}, + CapMap: defaultWho.CapMap, + }, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/pods", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/pods", + Verb: "list", + APIPrefix: "api", + APIVersion: "v1", + Resource: "pods", + Parts: []string{"pods"}, + }, + Source: sessionrecording.Source{ + Node: "node.ts.net", + NodeID: "stable-id", + NodeTags: []string{"tag:foo"}, + }, + }, + }, + { + name: "no-recorders", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods", nil) + }, + who: &apitype.WhoIsResponse{ + Node: defaultWho.Node, + UserProfile: defaultWho.UserProfile, + CapMap: tailcfg.PeerCapMap{}, + }, + setupSender: func() { sender.Reset() }, + wantNumCalls: 0, + }, + { + name: "error-sending", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/pods", nil) + }, + who: defaultWho, + setupSender: func() { + sender.Reset() + sender.err = errors.New("send error") + }, + wantErr: true, + wantNumCalls: 1, + }, + { + name: "request-for-crd", + req: func() *http.Request { + return httptest.NewRequest("GET", "/apis/custom.example.com/v1/myresources", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/apis/custom.example.com/v1/myresources", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/apis/custom.example.com/v1/myresources", + Verb: "list", + APIPrefix: "apis", + APIGroup: "custom.example.com", + APIVersion: "v1", + Resource: "myresources", + Parts: []string{"myresources"}, + }, + Source: defaultSource, + }, + }, + { + name: "request-with-proxy-verb", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/namespaces/default/pods/foo/proxy", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/namespaces/default/pods/foo/proxy", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/namespaces/default/pods/foo/proxy", + Verb: "get", + APIPrefix: "api", + APIVersion: "v1", + Namespace: "default", + Resource: "pods", + Subresource: "proxy", + Name: "foo", + Parts: []string{"pods", "foo", "proxy"}, + }, + Source: defaultSource, + }, + }, + { + name: "request-with-complex-path", + req: func() *http.Request { + return httptest.NewRequest("GET", "/api/v1/namespaces/default/services/foo:8080/proxy-subpath/more/segments", nil) + }, + who: defaultWho, + setupSender: func() { sender.Reset() }, + wantNumCalls: 1, + wantEvent: &sessionrecording.Event{ + Type: sessionrecording.KubernetesAPIEventType, + Request: sessionrecording.Request{ + Method: "GET", + Path: "/api/v1/namespaces/default/services/foo:8080/proxy-subpath/more/segments", + Body: nil, + QueryParameters: url.Values{}, + }, + Kubernetes: sessionrecording.KubernetesRequestInfo{ + IsResourceRequest: true, + Path: "/api/v1/namespaces/default/services/foo:8080/proxy-subpath/more/segments", + Verb: "get", + APIPrefix: "api", + APIVersion: "v1", + Namespace: "default", + Resource: "services", + Subresource: "proxy-subpath", + Name: "foo:8080", + Parts: []string{"services", "foo:8080", "proxy-subpath", "more", "segments"}, + }, + Source: defaultSource, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.setupSender() + + req := tt.req() + err := ap.recordRequestAsEvent(req, tt.who) + + if (err != nil) != tt.wantErr { + t.Fatalf("recordRequestAsEvent() error = %v, wantErr %v", err, tt.wantErr) + } + + if sender.calls != tt.wantNumCalls { + t.Fatalf("expected %d calls to sender, got %d", tt.wantNumCalls, sender.calls) + } + + if tt.wantEvent != nil { + for _, sentData := range sender.sent { + var got sessionrecording.Event + if err := json.Unmarshal(sentData, &got); err != nil { + t.Fatalf("failed to unmarshal sent event: %v", err) + } + + got.Timestamp = 0 + tt.wantEvent.Timestamp = got.Timestamp + + got.UserAgent = "" + tt.wantEvent.UserAgent = "" + + if !bytes.Equal(got.Request.Body, tt.wantEvent.Request.Body) { + t.Errorf("sent event body does not match wanted event body.\nGot: %s\nWant: %s", string(got.Request.Body), string(tt.wantEvent.Request.Body)) + } + got.Request.Body = nil + tt.wantEvent.Request.Body = nil + + if !reflect.DeepEqual(&got, tt.wantEvent) { + t.Errorf("sent event does not match wanted event.\nGot: %#v\nWant: %#v", &got, tt.wantEvent) + } + } + } + }) + } +} diff --git a/k8s-operator/api.md b/k8s-operator/api.md index 79c8469e11bbc..979d199cb0783 100644 --- a/k8s-operator/api.md +++ b/k8s-operator/api.md @@ -213,7 +213,6 @@ NB: if you want cluster workloads to be able to refer to Tailscale Ingress using its MagicDNS name, you must also annotate the Ingress resource with tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation to ensure that the proxy created for the Ingress listens on its Pod IP address. -NB: Clusters where Pods get assigned IPv6 addresses only are currently not supported. @@ -444,6 +443,8 @@ _Appears in:_ | --- | --- | --- | --- | | `image` _[NameserverImage](#nameserverimage)_ | Nameserver image. Defaults to tailscale/k8s-nameserver:unstable. | | | | `service` _[NameserverService](#nameserverservice)_ | Service configuration. | | | +| `pod` _[NameserverPod](#nameserverpod)_ | Pod configuration. | | | +| `replicas` _integer_ | Replicas specifies how many Pods to create. Defaults to 1. | | Minimum: 0
| #### NameserverImage @@ -463,6 +464,22 @@ _Appears in:_ | `tag` _string_ | Tag defaults to unstable. | | | +#### NameserverPod + + + + + + + +_Appears in:_ +- [Nameserver](#nameserver) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#toleration-v1-core) array_ | If specified, applies tolerations to the pods deployed by the DNSConfig resource. | | | + + #### NameserverService @@ -537,6 +554,8 @@ _Appears in:_ | `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#toleration-v1-core) array_ | Proxy Pod's tolerations.
By default Tailscale Kubernetes operator does not apply any
tolerations.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | | `topologySpreadConstraints` _[TopologySpreadConstraint](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#topologyspreadconstraint-v1-core) array_ | Proxy Pod's topology spread constraints.
By default Tailscale Kubernetes operator does not apply any topology spread constraints.
https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ | | | | `priorityClassName` _string_ | PriorityClassName for the proxy Pod.
By default Tailscale Kubernetes operator does not apply any priority class.
https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling | | | +| `dnsPolicy` _[DNSPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#dnspolicy-v1-core)_ | DNSPolicy defines how DNS will be configured for the proxy Pod.
By default the Tailscale Kubernetes Operator does not set a DNS policy (uses cluster default).
https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy | | Enum: [ClusterFirstWithHostNet ClusterFirst Default None]
| +| `dnsConfig` _[PodDNSConfig](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.3/#poddnsconfig-v1-core)_ | DNSConfig defines DNS parameters for the proxy Pod in addition to those generated from DNSPolicy.
When DNSPolicy is set to "None", DNSConfig must be specified.
https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config | | | #### PortRange diff --git a/k8s-operator/apis/v1alpha1/types_proxyclass.go b/k8s-operator/apis/v1alpha1/types_proxyclass.go index ea4e6a27c49de..4026f90848ef1 100644 --- a/k8s-operator/apis/v1alpha1/types_proxyclass.go +++ b/k8s-operator/apis/v1alpha1/types_proxyclass.go @@ -303,6 +303,17 @@ type Pod struct { // https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling // +optional PriorityClassName string `json:"priorityClassName,omitempty"` + // DNSPolicy defines how DNS will be configured for the proxy Pod. + // By default the Tailscale Kubernetes Operator does not set a DNS policy (uses cluster default). + // https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + // +kubebuilder:validation:Enum=ClusterFirstWithHostNet;ClusterFirst;Default;None + // +optional + DNSPolicy *corev1.DNSPolicy `json:"dnsPolicy,omitempty"` + // DNSConfig defines DNS parameters for the proxy Pod in addition to those generated from DNSPolicy. + // When DNSPolicy is set to "None", DNSConfig must be specified. + // https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config + // +optional + DNSConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty"` } // +kubebuilder:validation:XValidation:rule="!(has(self.serviceMonitor) && self.serviceMonitor.enable && !self.enable)",message="ServiceMonitor can only be enabled if metrics are enabled" diff --git a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go index 0e26ee6476d7a..7991003b82dff 100644 --- a/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go +++ b/k8s-operator/apis/v1alpha1/types_tsdnsconfig.go @@ -6,6 +6,7 @@ package v1alpha1 import ( + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -45,7 +46,6 @@ var DNSConfigKind = "DNSConfig" // using its MagicDNS name, you must also annotate the Ingress resource with // tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation to // ensure that the proxy created for the Ingress listens on its Pod IP address. -// NB: Clusters where Pods get assigned IPv6 addresses only are currently not supported. type DNSConfig struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -85,6 +85,13 @@ type Nameserver struct { // Service configuration. // +optional Service *NameserverService `json:"service,omitempty"` + // Pod configuration. + // +optional + Pod *NameserverPod `json:"pod,omitempty"` + // Replicas specifies how many Pods to create. Defaults to 1. + // +optional + // +kubebuilder:validation:Minimum=0 + Replicas *int32 `json:"replicas,omitempty"` } type NameserverImage struct { @@ -102,6 +109,12 @@ type NameserverService struct { ClusterIP string `json:"clusterIP,omitempty"` } +type NameserverPod struct { + // If specified, applies tolerations to the pods deployed by the DNSConfig resource. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + type DNSConfigStatus struct { // +listType=map // +listMapKey=type diff --git a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go index d7a90ad0fd895..7492f1e547395 100644 --- a/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go +++ b/k8s-operator/apis/v1alpha1/zz_generated.deepcopy.go @@ -422,6 +422,16 @@ func (in *Nameserver) DeepCopyInto(out *Nameserver) { *out = new(NameserverService) **out = **in } + if in.Pod != nil { + in, out := &in.Pod, &out.Pod + *out = new(NameserverPod) + (*in).DeepCopyInto(*out) + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Nameserver. @@ -449,6 +459,28 @@ func (in *NameserverImage) DeepCopy() *NameserverImage { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NameserverPod) DeepCopyInto(out *NameserverPod) { + *out = *in + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameserverPod. +func (in *NameserverPod) DeepCopy() *NameserverPod { + if in == nil { + return nil + } + out := new(NameserverPod) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NameserverService) DeepCopyInto(out *NameserverService) { *out = *in @@ -569,6 +601,16 @@ func (in *Pod) DeepCopyInto(out *Pod) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.DNSPolicy != nil { + in, out := &in.DNSPolicy, &out.DNSPolicy + *out = new(corev1.DNSPolicy) + **out = **in + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(corev1.PodDNSConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod. diff --git a/k8s-operator/sessionrecording/hijacker.go b/k8s-operator/sessionrecording/hijacker.go index 789a9fdb9f6a3..2d6c94710e866 100644 --- a/k8s-operator/sessionrecording/hijacker.go +++ b/k8s-operator/sessionrecording/hijacker.go @@ -11,6 +11,7 @@ import ( "bufio" "bytes" "context" + "errors" "fmt" "io" "net" @@ -19,7 +20,6 @@ import ( "net/netip" "strings" - "github.com/pkg/errors" "go.uber.org/zap" "tailscale.com/client/tailscale/apitype" "tailscale.com/k8s-operator/sessionrecording/spdy" @@ -31,7 +31,6 @@ import ( "tailscale.com/tsnet" "tailscale.com/tstime" "tailscale.com/util/clientmetric" - "tailscale.com/util/multierr" ) const ( @@ -123,7 +122,7 @@ func (h *Hijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) { return nil, nil, fmt.Errorf("error hijacking connection: %w", err) } - conn, err := h.setUpRecording(h.req.Context(), reqConn) + conn, err := h.setUpRecording(reqConn) if err != nil { return nil, nil, fmt.Errorf("error setting up session recording: %w", err) } @@ -134,7 +133,7 @@ func (h *Hijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) { // spdyHijacker.addrs. Returns conn from provided opts, wrapped in recording // logic. If connecting to the recorder fails or an error is received during the // session and spdyHijacker.failOpen is false, connection will be closed. -func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, error) { +func (h *Hijacker) setUpRecording(conn net.Conn) (_ net.Conn, retErr error) { const ( // https://docs.asciinema.org/manual/asciicast/v2/ asciicastv2 = 2 @@ -148,6 +147,14 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, errChan <-chan error ) h.log.Infof("kubectl %s session will be recorded, recorders: %v, fail open policy: %t", h.sessionType, h.addrs, h.failOpen) + // NOTE: (ChaosInTheCRD) we want to use a dedicated context here, rather than the context from the request, + // otherwise the context can be cancelled by the client (kubectl) while we are still streaming to tsrecorder. + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + if retErr != nil { + cancel() + } + }() qp := h.req.URL.Query() container := strings.Join(qp[containerKey], "") var recorderAddr net.Addr @@ -166,7 +173,7 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, } msg = msg + "; failure mode is 'fail closed'; closing connection." if err := closeConnWithWarning(conn, msg); err != nil { - return nil, multierr.New(errors.New(msg), err) + return nil, errors.Join(errors.New(msg), err) } return nil, errors.New(msg) } else { @@ -214,6 +221,7 @@ func (h *Hijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, } go func() { + defer cancel() var err error select { case <-ctx.Done(): @@ -245,7 +253,7 @@ func closeConnWithWarning(conn net.Conn, msg string) error { b := io.NopCloser(bytes.NewBuffer([]byte(msg))) resp := http.Response{Status: http.StatusText(http.StatusForbidden), StatusCode: http.StatusForbidden, Body: b} if err := resp.Write(conn); err != nil { - return multierr.New(fmt.Errorf("error writing msg %q to conn: %v", msg, err), conn.Close()) + return errors.Join(fmt.Errorf("error writing msg %q to conn: %v", msg, err), conn.Close()) } return conn.Close() } diff --git a/k8s-operator/sessionrecording/hijacker_test.go b/k8s-operator/sessionrecording/hijacker_test.go index cac6f55c7c7d7..fb45820a71b86 100644 --- a/k8s-operator/sessionrecording/hijacker_test.go +++ b/k8s-operator/sessionrecording/hijacker_test.go @@ -95,7 +95,7 @@ func Test_Hijacker(t *testing.T) { proto: tt.proto, } ctx := context.Background() - _, err := h.setUpRecording(ctx, tc) + _, err := h.setUpRecording(tc) if (err != nil) != tt.wantsSetupErr { t.Errorf("spdyHijacker.setupRecording() error = %v, wantErr %v", err, tt.wantsSetupErr) return diff --git a/k8s-operator/sessionrecording/ws/conn.go b/k8s-operator/sessionrecording/ws/conn.go index a34379658caa2..a618f85fb7822 100644 --- a/k8s-operator/sessionrecording/ws/conn.go +++ b/k8s-operator/sessionrecording/ws/conn.go @@ -21,7 +21,6 @@ import ( "k8s.io/apimachinery/pkg/util/remotecommand" "tailscale.com/k8s-operator/sessionrecording/tsrecorder" "tailscale.com/sessionrecording" - "tailscale.com/util/multierr" ) // New wraps the provided network connection and returns a connection whose reads and writes will get triggered as data is received on the hijacked connection. @@ -316,7 +315,7 @@ func (c *conn) Close() error { c.closed = true connCloseErr := c.Conn.Close() recCloseErr := c.rec.Close() - return multierr.New(connCloseErr, recCloseErr) + return errors.Join(connCloseErr, recCloseErr) } // writeBufHasIncompleteFragment returns true if the latest data message diff --git a/k8s-operator/utils.go b/k8s-operator/utils.go index 420d7e49c7ec2..2acbf338dbdd3 100644 --- a/k8s-operator/utils.go +++ b/k8s-operator/utils.go @@ -27,6 +27,11 @@ type Records struct { Version string `json:"version"` // IP4 contains a mapping of DNS names to IPv4 address(es). IP4 map[string][]string `json:"ip4"` + // IP6 contains a mapping of DNS names to IPv6 address(es). + // This field is optional and will be omitted from JSON if empty. + // It enables dual-stack DNS support in Kubernetes clusters. + // +optional + IP6 map[string][]string `json:"ip6,omitempty"` } // TailscaledConfigFileName returns a tailscaled config file name in diff --git a/kube/kubeclient/client.go b/kube/kubeclient/client.go index 332b21106ecfb..0ed960f4ddcd4 100644 --- a/kube/kubeclient/client.go +++ b/kube/kubeclient/client.go @@ -15,6 +15,7 @@ import ( "crypto/tls" "crypto/x509" "encoding/json" + "errors" "fmt" "io" "log" @@ -29,7 +30,6 @@ import ( "tailscale.com/kube/kubeapi" "tailscale.com/tstime" - "tailscale.com/util/multierr" ) const ( @@ -397,7 +397,7 @@ func (c *client) CheckSecretPermissions(ctx context.Context, secretName string) } } if len(errs) > 0 { - return false, false, multierr.New(errs...) + return false, false, errors.Join(errs...) } canPatch, err = c.checkPermission(ctx, "patch", TypeSecrets, secretName) if err != nil { diff --git a/license_test.go b/license_test.go index ec452a6e36be7..9b62c48ed218e 100644 --- a/license_test.go +++ b/license_test.go @@ -34,7 +34,7 @@ func TestLicenseHeaders(t *testing.T) { // WireGuard copyright "cmd/tailscale/cli/authenticode_windows.go", - "wgengine/router/ifconfig_windows.go", + "wgengine/router/osrouter/ifconfig_windows.go", // noiseexplorer.com copyright "control/controlbase/noiseexplorer_test.go", diff --git a/licenses/android.md b/licenses/android.md index 881f3ed3df9ea..f578c17cb19e8 100644 --- a/licenses/android.md +++ b/licenses/android.md @@ -24,7 +24,6 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.17.11/zstd/internal/xxhash/LICENSE.txt)) - [github.com/kortschak/wol](https://pkg.go.dev/github.com/kortschak/wol) ([BSD-3-Clause](https://github.com/kortschak/wol/blob/da482cc4850a/LICENSE)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) - [github.com/tailscale/goupnp](https://pkg.go.dev/github.com/tailscale/goupnp) ([BSD-2-Clause](https://github.com/tailscale/goupnp/blob/c64d0f06ea05/LICENSE)) - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) diff --git a/licenses/apple.md b/licenses/apple.md index 91ba966981785..4c50e95595742 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -33,7 +33,7 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/ebf49471dced/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/cc2cfa0554c3/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/2c02b8208cf8/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) @@ -53,7 +53,6 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) - [github.com/mdlayher/sdnotify](https://pkg.go.dev/github.com/mdlayher/sdnotify) ([MIT](https://github.com/mdlayher/sdnotify/blob/v1.0.0/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.65/LICENSE)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.22/LICENSE)) - [github.com/prometheus-community/pro-bing](https://pkg.go.dev/github.com/prometheus-community/pro-bing) ([MIT](https://github.com/prometheus-community/pro-bing/blob/v0.4.0/LICENSE)) @@ -68,13 +67,13 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.41.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/645b1fa8:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.43.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.16.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.35.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.34.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.28.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.42.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/df929982:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.44.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.17.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.36.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.35.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.29.0:LICENSE)) - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.12.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/tailscale.md b/licenses/tailscale.md index 6feb85aafcea6..0ef5bcf61d5f8 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -65,7 +65,6 @@ Some packages may only be included on certain architectures or operating systems - [github.com/mattn/go-colorable](https://pkg.go.dev/github.com/mattn/go-colorable) ([MIT](https://github.com/mattn/go-colorable/blob/v0.1.13/LICENSE)) - [github.com/mattn/go-isatty](https://pkg.go.dev/github.com/mattn/go-isatty) ([MIT](https://github.com/mattn/go-isatty/blob/v0.0.20/LICENSE)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.58/LICENSE)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - [github.com/peterbourgon/ff/v3](https://pkg.go.dev/github.com/peterbourgon/ff/v3) ([Apache-2.0](https://github.com/peterbourgon/ff/blob/v3.4.0/LICENSE)) - [github.com/pierrec/lz4/v4](https://pkg.go.dev/github.com/pierrec/lz4/v4) ([BSD-3-Clause](https://github.com/pierrec/lz4/blob/v4.1.21/LICENSE)) @@ -74,7 +73,6 @@ Some packages may only be included on certain architectures or operating systems - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) - [github.com/tailscale/certstore](https://pkg.go.dev/github.com/tailscale/certstore) ([MIT](https://github.com/tailscale/certstore/blob/d3fa0460f47e/LICENSE.md)) - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - - [github.com/tailscale/peercred](https://pkg.go.dev/github.com/tailscale/peercred) ([BSD-3-Clause](https://github.com/tailscale/peercred/blob/35a0c7bd7edc/LICENSE)) - [github.com/tailscale/web-client-prebuilt](https://pkg.go.dev/github.com/tailscale/web-client-prebuilt) ([BSD-3-Clause](https://github.com/tailscale/web-client-prebuilt/blob/d4cd19a26976/LICENSE)) - [github.com/tailscale/wf](https://pkg.go.dev/github.com/tailscale/wf) ([BSD-3-Clause](https://github.com/tailscale/wf/blob/6fbb0a674ee6/LICENSE)) - [github.com/tailscale/wireguard-go](https://pkg.go.dev/github.com/tailscale/wireguard-go) ([MIT](https://github.com/tailscale/wireguard-go/blob/1d0488a3d7da/LICENSE)) @@ -101,7 +99,6 @@ Some packages may only be included on certain architectures or operating systems - [k8s.io/client-go/util/homedir](https://pkg.go.dev/k8s.io/client-go/util/homedir) ([Apache-2.0](https://github.com/kubernetes/client-go/blob/v0.32.0/LICENSE)) - [sigs.k8s.io/yaml](https://pkg.go.dev/sigs.k8s.io/yaml) ([Apache-2.0](https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/LICENSE)) - [sigs.k8s.io/yaml/goyaml.v2](https://pkg.go.dev/sigs.k8s.io/yaml/goyaml.v2) ([Apache-2.0](https://github.com/kubernetes-sigs/yaml/blob/v1.4.0/goyaml.v2/LICENSE)) - - [software.sslmate.com/src/go-pkcs12](https://pkg.go.dev/software.sslmate.com/src/go-pkcs12) ([BSD-3-Clause](https://github.com/SSLMate/go-pkcs12/blob/v0.4.0/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) - [tailscale.com/tempfork/gliderlabs/ssh](https://pkg.go.dev/tailscale.com/tempfork/gliderlabs/ssh) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/tempfork/gliderlabs/ssh/LICENSE)) - [tailscale.com/tempfork/spf13/cobra](https://pkg.go.dev/tailscale.com/tempfork/spf13/cobra) ([Apache-2.0](https://github.com/tailscale/tailscale/blob/HEAD/tempfork/spf13/cobra/LICENSE.txt)) diff --git a/licenses/windows.md b/licenses/windows.md index aff149d4d4ba4..b284aa1361f5d 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -10,49 +10,28 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) - - [github.com/alexbrainman/sspi](https://pkg.go.dev/github.com/alexbrainman/sspi) ([BSD-3-Clause](https://github.com/alexbrainman/sspi/blob/1a75b4708caa/LICENSE)) - [github.com/apenwarr/fixconsole](https://pkg.go.dev/github.com/apenwarr/fixconsole) ([Apache-2.0](https://github.com/apenwarr/fixconsole/blob/5a9f6489cc29/LICENSE)) - [github.com/apenwarr/w32](https://pkg.go.dev/github.com/apenwarr/w32) ([BSD-3-Clause](https://github.com/apenwarr/w32/blob/aa00fece76ab/LICENSE)) - - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.29.5/config/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.58/credentials/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.27/feature/ec2/imds/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.31/internal/configsources/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.31/internal/endpoints/v2/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.2/internal/ini/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/internal/sync/singleflight/LICENSE)) - - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.12.2/service/internal/accept-encoding/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.12.12/service/internal/presigned-url/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/ssm](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssm) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssm/v1.45.0/service/ssm/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.24.14/service/sso/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.28.13/service/ssooidc/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.33.13/service/sts/LICENSE.txt)) - - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) - - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) - [github.com/beorn7/perks/quantile](https://pkg.go.dev/github.com/beorn7/perks/quantile) ([MIT](https://github.com/beorn7/perks/blob/v1.0.1/LICENSE)) - [github.com/cespare/xxhash/v2](https://pkg.go.dev/github.com/cespare/xxhash/v2) ([MIT](https://github.com/cespare/xxhash/blob/v2.3.0/LICENSE.txt)) - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) - - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/b75a8a7d7eb0/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/ebf49471dced/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/cc2cfa0554c3/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/2c02b8208cf8/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - [github.com/google/go-cmp/cmp](https://pkg.go.dev/github.com/google/go-cmp/cmp) ([BSD-3-Clause](https://github.com/google/go-cmp/blob/v0.7.0/LICENSE)) - - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) - [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE)) - [github.com/gregjones/httpcache](https://pkg.go.dev/github.com/gregjones/httpcache) ([MIT](https://github.com/gregjones/httpcache/blob/901d90724c79/LICENSE.txt)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) - [github.com/jsimonetti/rtnetlink](https://pkg.go.dev/github.com/jsimonetti/rtnetlink) ([MIT](https://github.com/jsimonetti/rtnetlink/blob/v1.4.1/LICENSE.md)) - [github.com/klauspost/compress](https://pkg.go.dev/github.com/klauspost/compress) ([Apache-2.0](https://github.com/klauspost/compress/blob/v1.18.0/LICENSE)) - [github.com/klauspost/compress/internal/snapref](https://pkg.go.dev/github.com/klauspost/compress/internal/snapref) ([BSD-3-Clause](https://github.com/klauspost/compress/blob/v1.18.0/internal/snapref/LICENSE)) - [github.com/klauspost/compress/zstd/internal/xxhash](https://pkg.go.dev/github.com/klauspost/compress/zstd/internal/xxhash) ([MIT](https://github.com/klauspost/compress/blob/v1.18.0/zstd/internal/xxhash/LICENSE.txt)) - [github.com/mdlayher/netlink](https://pkg.go.dev/github.com/mdlayher/netlink) ([MIT](https://github.com/mdlayher/netlink/blob/fbb4dce95f42/LICENSE.md)) - [github.com/mdlayher/socket](https://pkg.go.dev/github.com/mdlayher/socket) ([MIT](https://github.com/mdlayher/socket/blob/v0.5.0/LICENSE.md)) - - [github.com/miekg/dns](https://pkg.go.dev/github.com/miekg/dns) ([BSD-3-Clause](https://github.com/miekg/dns/blob/v1.1.65/LICENSE)) - [github.com/mitchellh/go-ps](https://pkg.go.dev/github.com/mitchellh/go-ps) ([MIT](https://github.com/mitchellh/go-ps/blob/v1.0.0/LICENSE.md)) - [github.com/munnerz/goautoneg](https://pkg.go.dev/github.com/munnerz/goautoneg) ([BSD-3-Clause](https://github.com/munnerz/goautoneg/blob/a7dc8b61c822/LICENSE)) - [github.com/nfnt/resize](https://pkg.go.dev/github.com/nfnt/resize) ([ISC](https://github.com/nfnt/resize/blob/83c6a9932646/LICENSE)) @@ -63,24 +42,21 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/skip2/go-qrcode](https://pkg.go.dev/github.com/skip2/go-qrcode) ([MIT](https://github.com/skip2/go-qrcode/blob/da1b6568686e/LICENSE)) - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/992244df8c5a/LICENSE)) - - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/6376defdac3f/LICENSE)) + - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/963e260a8227/LICENSE)) - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/f4da2b8ee071/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tc-hib/winres](https://pkg.go.dev/github.com/tc-hib/winres) ([0BSD](https://github.com/tc-hib/winres/blob/v0.2.1/LICENSE)) - - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.5/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.41.0:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/645b1fa8:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.42.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/df929982:LICENSE)) - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.27.0:LICENSE)) - - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.26.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.43.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.16.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.35.0:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.34.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.28.0:LICENSE)) + - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.28.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.44.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.17.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.36.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.35.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.36.7/LICENSE)) diff --git a/log/sockstatlog/logger.go b/log/sockstatlog/logger.go index 3cc27c22d8af7..e0744de0f089a 100644 --- a/log/sockstatlog/logger.go +++ b/log/sockstatlog/logger.go @@ -17,6 +17,7 @@ import ( "sync/atomic" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/logpolicy" "tailscale.com/logtail" @@ -25,6 +26,7 @@ import ( "tailscale.com/net/sockstats" "tailscale.com/types/logger" "tailscale.com/types/logid" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" ) @@ -96,8 +98,8 @@ func SockstatLogID(logID logid.PublicID) logid.PrivateID { // // The netMon parameter is optional. It should be specified in environments where // Tailscaled is manipulating the routing table. -func NewLogger(logdir string, logf logger.Logf, logID logid.PublicID, netMon *netmon.Monitor, health *health.Tracker) (*Logger, error) { - if !sockstats.IsAvailable { +func NewLogger(logdir string, logf logger.Logf, logID logid.PublicID, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (*Logger, error) { + if !sockstats.IsAvailable || !buildfeatures.HasLogTail { return nil, nil } if netMon == nil { @@ -126,6 +128,7 @@ func NewLogger(logdir string, logf logger.Logf, logID logid.PublicID, netMon *ne PrivateID: SockstatLogID(logID), Collection: "sockstats.log.tailscale.io", Buffer: filch, + Bus: bus, CompressLogs: true, FlushDelayFn: func() time.Duration { // set flush delay to 100 years so it never flushes automatically diff --git a/log/sockstatlog/logger_test.go b/log/sockstatlog/logger_test.go index 31fb17e460141..e5c2feb2986d8 100644 --- a/log/sockstatlog/logger_test.go +++ b/log/sockstatlog/logger_test.go @@ -24,7 +24,7 @@ func TestResourceCleanup(t *testing.T) { if err != nil { t.Fatal(err) } - lg, err := NewLogger(td, logger.Discard, id.Public(), nil, nil) + lg, err := NewLogger(td, logger.Discard, id.Public(), nil, nil, nil) if err != nil { t.Fatal(err) } diff --git a/logpolicy/logpolicy.go b/logpolicy/logpolicy.go index 587b421f3c4cc..9c7e62ab0da11 100644 --- a/logpolicy/logpolicy.go +++ b/logpolicy/logpolicy.go @@ -31,6 +31,8 @@ import ( "golang.org/x/term" "tailscale.com/atomicfile" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/log/filelogger" @@ -43,12 +45,12 @@ import ( "tailscale.com/net/netns" "tailscale.com/net/netx" "tailscale.com/net/tlsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/paths" "tailscale.com/safesocket" "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" "tailscale.com/util/must" "tailscale.com/util/racebuild" "tailscale.com/util/syspolicy/pkey" @@ -106,6 +108,7 @@ type Policy struct { // Logtail is the logger. Logtail *logtail.Logger // PublicID is the logger's instance identifier. + // It may be the zero value if logging is not in use. PublicID logid.PublicID // Logf is where to write informational messages about this Logger. Logf logger.Logf @@ -464,18 +467,6 @@ func New(collection string, netMon *netmon.Monitor, health *health.Tracker, logf }.New() } -// Deprecated: Use [Options.New] instead. -func NewWithConfigPath(collection, dir, cmdName string, netMon *netmon.Monitor, health *health.Tracker, logf logger.Logf) *Policy { - return Options{ - Collection: collection, - Dir: dir, - CmdName: cmdName, - NetMon: netMon, - Health: health, - Logf: logf, - }.New() -} - // Options is used to construct a [Policy]. type Options struct { // Collection is a required collection to upload logs under. @@ -499,6 +490,11 @@ type Options struct { // If non-nil, it's used to construct the default HTTP client. Health *health.Tracker + // Bus is an optional parameter for communication on the eventbus. + // If non-nil, it's passed to logtail for use in interface monitoring. + // TODO(cmol): Make this non-optional when it's plumbed in by the clients. + Bus *eventbus.Bus + // Logf is an optional logger to use. // If nil, [log.Printf] will be used instead. Logf logger.Logf @@ -625,6 +621,7 @@ func (opts Options) init(disableLogging bool) (*logtail.Config, *Policy) { Stderr: logWriter{console}, CompressLogs: true, MaxUploadSize: opts.MaxUploadSize, + Bus: opts.Bus, } if opts.Collection == logtail.CollectionNode { conf.MetricsDelta = clientmetric.EncodeLogTailMetricsDelta @@ -694,7 +691,7 @@ func (opts Options) init(disableLogging bool) (*logtail.Config, *Policy) { // New returns a new log policy (a logger and its instance ID). func (opts Options) New() *Policy { - disableLogging := envknob.NoLogsNoSupport() || testenv.InTest() || runtime.GOOS == "plan9" + disableLogging := envknob.NoLogsNoSupport() || testenv.InTest() || runtime.GOOS == "plan9" || !buildfeatures.HasLogTail _, policy := opts.init(disableLogging) return policy } @@ -868,7 +865,7 @@ type TransportOptions struct { // New returns an HTTP Transport particularly suited to uploading logs // to the given host name. See [DialContext] for details on how it works. func (opts TransportOptions) New() http.RoundTripper { - if testenv.InTest() { + if testenv.InTest() || envknob.NoLogsNoSupport() { return noopPretendSuccessTransport{} } if opts.NetMon == nil { @@ -880,8 +877,12 @@ func (opts TransportOptions) New() http.RoundTripper { tr.TLSClientConfig = opts.TLSClientConfig.Clone() } - tr.Proxy = tshttpproxy.ProxyFromEnvironment - tshttpproxy.SetTransportGetProxyConnectHeader(tr) + if buildfeatures.HasUseProxy { + tr.Proxy = feature.HookProxyFromEnvironment.GetOrNil() + if set, ok := feature.HookProxySetTransportGetProxyConnectHeader.GetOk(); ok { + set(tr) + } + } // We do our own zstd compression on uploads, and responses never contain any payload, // so don't send "Accept-Encoding: gzip" to save a few bytes on the wire, since there diff --git a/logtail/buffer.go b/logtail/buffer.go index c9f2e1ad02e0a..d14d8fbf6ae51 100644 --- a/logtail/buffer.go +++ b/logtail/buffer.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_logtail + package logtail import ( diff --git a/logtail/config.go b/logtail/config.go new file mode 100644 index 0000000000000..bf47dd8aa7b52 --- /dev/null +++ b/logtail/config.go @@ -0,0 +1,67 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package logtail + +import ( + "io" + "net/http" + "time" + + "tailscale.com/tstime" + "tailscale.com/types/logid" + "tailscale.com/util/eventbus" +) + +// DefaultHost is the default host name to upload logs to when +// Config.BaseURL isn't provided. +const DefaultHost = "log.tailscale.com" + +const defaultFlushDelay = 2 * time.Second + +const ( + // CollectionNode is the name of a logtail Config.Collection + // for tailscaled (or equivalent: IPNExtension, Android app). + CollectionNode = "tailnode.log.tailscale.io" +) + +type Config struct { + Collection string // collection name, a domain name + PrivateID logid.PrivateID // private ID for the primary log stream + CopyPrivateID logid.PrivateID // private ID for a log stream that is a superset of this log stream + BaseURL string // if empty defaults to "https://log.tailscale.com" + HTTPC *http.Client // if empty defaults to http.DefaultClient + SkipClientTime bool // if true, client_time is not written to logs + LowMemory bool // if true, logtail minimizes memory use + Clock tstime.Clock // if set, Clock.Now substitutes uses of time.Now + Stderr io.Writer // if set, logs are sent here instead of os.Stderr + Bus *eventbus.Bus // if set, uses the eventbus for awaitInternetUp instead of callback + StderrLevel int // max verbosity level to write to stderr; 0 means the non-verbose messages only + Buffer Buffer // temp storage, if nil a MemoryBuffer + CompressLogs bool // whether to compress the log uploads + MaxUploadSize int // maximum upload size; 0 means using the default + + // MetricsDelta, if non-nil, is a func that returns an encoding + // delta in clientmetrics to upload alongside existing logs. + // It can return either an empty string (for nothing) or a string + // that's safe to embed in a JSON string literal without further escaping. + MetricsDelta func() string + + // FlushDelayFn, if non-nil is a func that returns how long to wait to + // accumulate logs before uploading them. 0 or negative means to upload + // immediately. + // + // If nil, a default value is used. (currently 2 seconds) + FlushDelayFn func() time.Duration + + // IncludeProcID, if true, results in an ephemeral process identifier being + // included in logs. The ID is random and not guaranteed to be globally + // unique, but it can be used to distinguish between different instances + // running with same PrivateID. + IncludeProcID bool + + // IncludeProcSequence, if true, results in an ephemeral sequence number + // being included in the logs. The sequence number is incremented for each + // log message sent, but is not persisted across process restarts. + IncludeProcSequence bool +} diff --git a/logtail/logtail.go b/logtail/logtail.go index b355addd20b82..52823fedf4309 100644 --- a/logtail/logtail.go +++ b/logtail/logtail.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_logtail + // Package logtail sends logs to log.tailscale.com. package logtail @@ -30,6 +32,7 @@ import ( "tailscale.com/tstime" tslogger "tailscale.com/types/logger" "tailscale.com/types/logid" + "tailscale.com/util/eventbus" "tailscale.com/util/set" "tailscale.com/util/truncate" "tailscale.com/util/zstdframe" @@ -51,58 +54,6 @@ const lowMemRatio = 4 // but not too large to be a notable waste of memory if retained forever. const bufferSize = 4 << 10 -// DefaultHost is the default host name to upload logs to when -// Config.BaseURL isn't provided. -const DefaultHost = "log.tailscale.com" - -const defaultFlushDelay = 2 * time.Second - -const ( - // CollectionNode is the name of a logtail Config.Collection - // for tailscaled (or equivalent: IPNExtension, Android app). - CollectionNode = "tailnode.log.tailscale.io" -) - -type Config struct { - Collection string // collection name, a domain name - PrivateID logid.PrivateID // private ID for the primary log stream - CopyPrivateID logid.PrivateID // private ID for a log stream that is a superset of this log stream - BaseURL string // if empty defaults to "https://log.tailscale.com" - HTTPC *http.Client // if empty defaults to http.DefaultClient - SkipClientTime bool // if true, client_time is not written to logs - LowMemory bool // if true, logtail minimizes memory use - Clock tstime.Clock // if set, Clock.Now substitutes uses of time.Now - Stderr io.Writer // if set, logs are sent here instead of os.Stderr - StderrLevel int // max verbosity level to write to stderr; 0 means the non-verbose messages only - Buffer Buffer // temp storage, if nil a MemoryBuffer - CompressLogs bool // whether to compress the log uploads - MaxUploadSize int // maximum upload size; 0 means using the default - - // MetricsDelta, if non-nil, is a func that returns an encoding - // delta in clientmetrics to upload alongside existing logs. - // It can return either an empty string (for nothing) or a string - // that's safe to embed in a JSON string literal without further escaping. - MetricsDelta func() string - - // FlushDelayFn, if non-nil is a func that returns how long to wait to - // accumulate logs before uploading them. 0 or negative means to upload - // immediately. - // - // If nil, a default value is used. (currently 2 seconds) - FlushDelayFn func() time.Duration - - // IncludeProcID, if true, results in an ephemeral process identifier being - // included in logs. The ID is random and not guaranteed to be globally - // unique, but it can be used to distinguish between different instances - // running with same PrivateID. - IncludeProcID bool - - // IncludeProcSequence, if true, results in an ephemeral sequence number - // being included in the logs. The sequence number is incremented for each - // log message sent, but is not persisted across process restarts. - IncludeProcSequence bool -} - func NewLogger(cfg Config, logf tslogger.Logf) *Logger { if cfg.BaseURL == "" { cfg.BaseURL = "https://" + DefaultHost @@ -170,6 +121,11 @@ func NewLogger(cfg Config, logf tslogger.Logf) *Logger { shutdownStart: make(chan struct{}), shutdownDone: make(chan struct{}), } + + if cfg.Bus != nil { + l.eventClient = cfg.Bus.Client("logtail.Logger") + l.changeDeltaSub = eventbus.Subscribe[netmon.ChangeDelta](l.eventClient) + } l.SetSockstatsLabel(sockstats.LabelLogtailLogger) l.compressLogs = cfg.CompressLogs @@ -206,6 +162,8 @@ type Logger struct { privateID logid.PrivateID httpDoCalls atomic.Int32 sockstatsLabel atomicSocktatsLabel + eventClient *eventbus.Client + changeDeltaSub *eventbus.Subscriber[netmon.ChangeDelta] procID uint32 includeProcSequence bool @@ -271,6 +229,9 @@ func (l *Logger) Shutdown(ctx context.Context) error { l.httpc.CloseIdleConnections() }() + if l.eventClient != nil { + l.eventClient.Close() + } l.shutdownStartMu.Lock() select { case <-l.shutdownStart: @@ -467,6 +428,25 @@ func (l *Logger) internetUp() bool { } func (l *Logger) awaitInternetUp(ctx context.Context) { + if l.eventClient != nil { + for { + if l.internetUp() { + return + } + select { + case <-ctx.Done(): + return // give up + case <-l.changeDeltaSub.Done(): + return // give up (closing down) + case delta := <-l.changeDeltaSub.Events(): + if delta.New.AnyInterfaceUp() || l.internetUp() { + fmt.Fprintf(l.stderr, "logtail: internet back up\n") + return + } + fmt.Fprintf(l.stderr, "logtail: network changed, but is not up") + } + } + } upc := make(chan bool, 1) defer l.netMonitor.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { if delta.New.AnyInterfaceUp() { @@ -708,11 +688,6 @@ func appendTruncatedString(dst, src []byte, n int) []byte { return dst } -func (l *Logger) AppendTextOrJSONLocked(dst, src []byte) []byte { - l.clock = tstime.StdClock{} - return l.appendTextOrJSONLocked(dst, src, 0) -} - // appendTextOrJSONLocked appends a raw text message or a raw JSON object // in the Tailscale JSON log format. func (l *Logger) appendTextOrJSONLocked(dst, src []byte, level int) []byte { diff --git a/logtail/logtail_omit.go b/logtail/logtail_omit.go new file mode 100644 index 0000000000000..814fd3be90d8e --- /dev/null +++ b/logtail/logtail_omit.go @@ -0,0 +1,44 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_logtail + +package logtail + +import ( + "context" + + tslogger "tailscale.com/types/logger" + "tailscale.com/types/logid" +) + +// Noop implementations of everything when ts_omit_logtail is set. + +type Logger struct{} + +type Buffer any + +func Disable() {} + +func NewLogger(cfg Config, logf tslogger.Logf) *Logger { + return &Logger{} +} + +func (*Logger) Write(p []byte) (n int, err error) { + return len(p), nil +} + +func (*Logger) Logf(format string, args ...any) {} +func (*Logger) Shutdown(ctx context.Context) error { return nil } +func (*Logger) SetVerbosityLevel(level int) {} + +func (l *Logger) SetSockstatsLabel(label any) {} + +func (l *Logger) PrivateID() logid.PrivateID { return logid.PrivateID{} } +func (l *Logger) StartFlush() {} + +func RegisterLogTap(dst chan<- string) (unregister func()) { + return func() {} +} + +func (*Logger) SetNetMon(any) {} diff --git a/logtail/logtail_test.go b/logtail/logtail_test.go index b8c46c44840bc..a92f88b4bb03e 100644 --- a/logtail/logtail_test.go +++ b/logtail/logtail_test.go @@ -17,6 +17,7 @@ import ( "github.com/go-json-experiment/json/jsontext" "tailscale.com/tstest" "tailscale.com/tstime" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" ) @@ -30,6 +31,7 @@ func TestFastShutdown(t *testing.T) { l := NewLogger(Config{ BaseURL: testServ.URL, + Bus: eventbustest.NewBus(t), }, t.Logf) err := l.Shutdown(ctx) if err != nil { @@ -62,7 +64,10 @@ func NewLogtailTestHarness(t *testing.T) (*LogtailTestServer, *Logger) { t.Cleanup(ts.srv.Close) - l := NewLogger(Config{BaseURL: ts.srv.URL}, t.Logf) + l := NewLogger(Config{ + BaseURL: ts.srv.URL, + Bus: eventbustest.NewBus(t), + }, t.Logf) // There is always an initial "logtail started" message body := <-ts.uploaded diff --git a/net/ace/ace.go b/net/ace/ace.go new file mode 100644 index 0000000000000..47e780313cadd --- /dev/null +++ b/net/ace/ace.go @@ -0,0 +1,125 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package ace implements a Dialer that dials via a Tailscale ACE (CONNECT) +// proxy. +// +// TODO: document this more, when it's more done. As of 2025-09-17, it's in +// development. +package ace + +import ( + "bufio" + "cmp" + "context" + "crypto/tls" + "errors" + "fmt" + "net" + "net/http" + "net/netip" + "sync/atomic" +) + +// Dialer is an HTTP CONNECT proxy dialer to dial the control plane via an ACE +// proxy. +type Dialer struct { + ACEHost string + ACEHostIP netip.Addr // optional; if non-zero, use this IP instead of DNS + ACEPort int // zero means 443 + + // NetDialer optionally specifies the underlying dialer to use to reach the + // ACEHost. If nil, net.Dialer.DialContext is used. + NetDialer func(ctx context.Context, network, address string) (net.Conn, error) +} + +func (d *Dialer) netDialer() func(ctx context.Context, network, address string) (net.Conn, error) { + if d.NetDialer != nil { + return d.NetDialer + } + var std net.Dialer + return std.DialContext +} + +func (d *Dialer) acePort() int { return cmp.Or(d.ACEPort, 443) } + +func (d *Dialer) Dial(ctx context.Context, network, address string) (_ net.Conn, err error) { + if network != "tcp" { + return nil, errors.New("only TCP is supported") + } + + var targetHost string + if d.ACEHostIP.IsValid() { + targetHost = d.ACEHostIP.String() + } else { + targetHost = d.ACEHost + } + + cc, err := d.netDialer()(ctx, "tcp", net.JoinHostPort(targetHost, fmt.Sprint(d.acePort()))) + if err != nil { + return nil, err + } + + // Now that we've dialed, we're about to do three potentially blocking + // operations: the TLS handshake, the CONNECT write, and the HTTP response + // read. To make our context work over all that, we use a context.AfterFunc + // to start a goroutine that'll tear down the underlying connection if the + // context expires. + // + // To prevent races, we use an atomic.Bool to guard access to the underlying + // connection being either good or bad. Only one goroutine (the success path + // in this goroutine after the ReadResponse or the AfterFunc's failure + // goroutine) will compare-and-swap it from false to true. + var done atomic.Bool + stop := context.AfterFunc(ctx, func() { + if done.CompareAndSwap(false, true) { + cc.Close() + } + }) + defer func() { + if err != nil { + if ctx.Err() != nil { + // Prefer the context error. The other error is likely a side + // effect of the context expiring and our tearing down of the + // underlying connection, and is thus probably something like + // "use of closed network connection", which isn't useful (and + // actually misleading) for the caller. + err = ctx.Err() + } + stop() + cc.Close() + } + }() + + tc := tls.Client(cc, &tls.Config{ServerName: d.ACEHost}) + if err := tc.Handshake(); err != nil { + return nil, err + } + + // TODO(tailscale/corp#32484): send proxy-auth header + if _, err := fmt.Fprintf(tc, "CONNECT %s HTTP/1.1\r\nHost: %s\r\n\r\n", address, d.ACEHost); err != nil { + return nil, err + } + + br := bufio.NewReader(tc) + connRes, err := http.ReadResponse(br, &http.Request{Method: "CONNECT"}) + if err != nil { + return nil, fmt.Errorf("reading CONNECT response: %w", err) + } + + // Now that we're done with blocking operations, mark the connection + // as good, to prevent the context's AfterFunc from closing it. + if !stop() || !done.CompareAndSwap(false, true) { + // We lost a race and the context expired. + return nil, ctx.Err() + } + + if connRes.StatusCode != http.StatusOK { + return nil, fmt.Errorf("ACE CONNECT response: %s", connRes.Status) + } + + if br.Buffered() > 0 { + return nil, fmt.Errorf("unexpected %d bytes of buffered data after ACE CONNECT", br.Buffered()) + } + return tc, nil +} diff --git a/net/captivedetection/captivedetection_test.go b/net/captivedetection/captivedetection_test.go index 064a86c8c35e5..0778e07df393a 100644 --- a/net/captivedetection/captivedetection_test.go +++ b/net/captivedetection/captivedetection_test.go @@ -15,7 +15,7 @@ import ( "testing" "time" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/net/netmon" "tailscale.com/syncs" "tailscale.com/tstest/nettest" @@ -136,7 +136,7 @@ func TestAgainstDERPHandler(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - s := httptest.NewServer(http.HandlerFunc(derphttp.ServeNoContent)) + s := httptest.NewServer(http.HandlerFunc(derpserver.ServeNoContent)) defer s.Close() e := Endpoint{ URL: must.Get(url.Parse(s.URL + "/generate_204")), diff --git a/net/dns/config.go b/net/dns/config.go index b2c7c428593ff..2425b304dffd8 100644 --- a/net/dns/config.go +++ b/net/dns/config.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:generate go run tailscale.com/cmd/viewer --type=Config --clonefunc + // Package dns contains code to configure and manage DNS settings. package dns @@ -8,6 +10,7 @@ import ( "bufio" "fmt" "net/netip" + "reflect" "slices" "sort" @@ -188,3 +191,10 @@ func sameResolverNames(a, b []*dnstype.Resolver) bool { } return true } + +func (c *Config) Equal(o *Config) bool { + if c == nil || o == nil { + return c == o + } + return reflect.DeepEqual(c, o) +} diff --git a/net/dns/dbus.go b/net/dns/dbus.go new file mode 100644 index 0000000000000..c53e8b7205949 --- /dev/null +++ b/net/dns/dbus.go @@ -0,0 +1,59 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && !android && !ts_omit_dbus + +package dns + +import ( + "context" + "time" + + "github.com/godbus/dbus/v5" +) + +func init() { + optDBusPing.Set(dbusPing) + optDBusReadString.Set(dbusReadString) +} + +func dbusPing(name, objectPath string) error { + conn, err := dbus.SystemBus() + if err != nil { + // DBus probably not running. + return err + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + obj := conn.Object(name, dbus.ObjectPath(objectPath)) + call := obj.CallWithContext(ctx, "org.freedesktop.DBus.Peer.Ping", 0) + return call.Err +} + +// dbusReadString reads a string property from the provided name and object +// path. property must be in "interface.member" notation. +func dbusReadString(name, objectPath, iface, member string) (string, error) { + conn, err := dbus.SystemBus() + if err != nil { + // DBus probably not running. + return "", err + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + obj := conn.Object(name, dbus.ObjectPath(objectPath)) + + var result dbus.Variant + err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, iface, member).Store(&result) + if err != nil { + return "", err + } + + if s, ok := result.Value().(string); ok { + return s, nil + } + return result.String(), nil +} diff --git a/net/dns/direct.go b/net/dns/direct.go index f23723d9a1515..59eb0696498e8 100644 --- a/net/dns/direct.go +++ b/net/dns/direct.go @@ -23,6 +23,7 @@ import ( "sync" "time" + "tailscale.com/feature" "tailscale.com/health" "tailscale.com/net/dns/resolvconffile" "tailscale.com/net/tsaddr" @@ -415,6 +416,73 @@ func (m *directManager) GetBaseConfig() (OSConfig, error) { return oscfg, nil } +// HookWatchFile is a hook for watching file changes, for platforms that support it. +// The function is called with a directory and filename to watch, and a callback +// to call when the file changes. It returns an error if the watch could not be set up. +var HookWatchFile feature.Hook[func(ctx context.Context, dir, filename string, cb func()) error] + +func (m *directManager) runFileWatcher() { + watchFile, ok := HookWatchFile.GetOk() + if !ok { + return + } + if err := watchFile(m.ctx, "/etc/", resolvConf, m.checkForFileTrample); err != nil { + // This is all best effort for now, so surface warnings to users. + m.logf("dns: inotify: %s", err) + } +} + +var resolvTrampleWarnable = health.Register(&health.Warnable{ + Code: "resolv-conf-overwritten", + Severity: health.SeverityMedium, + Title: "DNS configuration issue", + Text: health.StaticMessage("System DNS config not ideal. /etc/resolv.conf overwritten. See https://tailscale.com/s/dns-fight"), +}) + +// checkForFileTrample checks whether /etc/resolv.conf has been trampled +// by another program on the system. (e.g. a DHCP client) +func (m *directManager) checkForFileTrample() { + m.mu.Lock() + want := m.wantResolvConf + lastWarn := m.lastWarnContents + m.mu.Unlock() + + if want == nil { + return + } + + cur, err := m.fs.ReadFile(resolvConf) + if err != nil { + m.logf("trample: read error: %v", err) + return + } + if bytes.Equal(cur, want) { + m.health.SetHealthy(resolvTrampleWarnable) + if lastWarn != nil { + m.mu.Lock() + m.lastWarnContents = nil + m.mu.Unlock() + m.logf("trample: resolv.conf again matches expected content") + } + return + } + if bytes.Equal(cur, lastWarn) { + // We already logged about this, so not worth doing it again. + return + } + + m.mu.Lock() + m.lastWarnContents = cur + m.mu.Unlock() + + show := cur + if len(show) > 1024 { + show = show[:1024] + } + m.logf("trample: resolv.conf changed from what we expected. did some other program interfere? current contents: %q", show) + m.health.SetUnhealthy(resolvTrampleWarnable, nil) +} + func (m *directManager) Close() error { m.ctxClose() diff --git a/net/dns/direct_linux.go b/net/dns/direct_linux.go deleted file mode 100644 index 0558f0f51b253..0000000000000 --- a/net/dns/direct_linux.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build linux && !android - -package dns - -import ( - "bytes" - "context" - "fmt" - - "github.com/illarion/gonotify/v3" - "tailscale.com/health" -) - -func (m *directManager) runFileWatcher() { - if err := watchFile(m.ctx, "/etc/", resolvConf, m.checkForFileTrample); err != nil { - // This is all best effort for now, so surface warnings to users. - m.logf("dns: inotify: %s", err) - } -} - -// watchFile sets up an inotify watch for a given directory and -// calls the callback function every time a particular file is changed. -// The filename should be located in the provided directory. -func watchFile(ctx context.Context, dir, filename string, cb func()) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - const events = gonotify.IN_ATTRIB | - gonotify.IN_CLOSE_WRITE | - gonotify.IN_CREATE | - gonotify.IN_DELETE | - gonotify.IN_MODIFY | - gonotify.IN_MOVE - - watcher, err := gonotify.NewDirWatcher(ctx, events, dir) - if err != nil { - return fmt.Errorf("NewDirWatcher: %w", err) - } - - for { - select { - case event := <-watcher.C: - if event.Name == filename { - cb() - } - case <-ctx.Done(): - return ctx.Err() - } - } -} - -var resolvTrampleWarnable = health.Register(&health.Warnable{ - Code: "resolv-conf-overwritten", - Severity: health.SeverityMedium, - Title: "Linux DNS configuration issue", - Text: health.StaticMessage("Linux DNS config not ideal. /etc/resolv.conf overwritten. See https://tailscale.com/s/dns-fight"), -}) - -// checkForFileTrample checks whether /etc/resolv.conf has been trampled -// by another program on the system. (e.g. a DHCP client) -func (m *directManager) checkForFileTrample() { - m.mu.Lock() - want := m.wantResolvConf - lastWarn := m.lastWarnContents - m.mu.Unlock() - - if want == nil { - return - } - - cur, err := m.fs.ReadFile(resolvConf) - if err != nil { - m.logf("trample: read error: %v", err) - return - } - if bytes.Equal(cur, want) { - m.health.SetHealthy(resolvTrampleWarnable) - if lastWarn != nil { - m.mu.Lock() - m.lastWarnContents = nil - m.mu.Unlock() - m.logf("trample: resolv.conf again matches expected content") - } - return - } - if bytes.Equal(cur, lastWarn) { - // We already logged about this, so not worth doing it again. - return - } - - m.mu.Lock() - m.lastWarnContents = cur - m.mu.Unlock() - - show := cur - if len(show) > 1024 { - show = show[:1024] - } - m.logf("trample: resolv.conf changed from what we expected. did some other program interfere? current contents: %q", show) - m.health.SetUnhealthy(resolvTrampleWarnable, nil) -} diff --git a/net/dns/direct_notlinux.go b/net/dns/direct_notlinux.go deleted file mode 100644 index a73a35e5ead2b..0000000000000 --- a/net/dns/direct_notlinux.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !linux && !android && !ios - -package dns - -func (m *directManager) runFileWatcher() { - // Not implemented on other platforms. Maybe it could resort to polling. -} diff --git a/net/dns/dns_clone.go b/net/dns/dns_clone.go new file mode 100644 index 0000000000000..807bfce23df8b --- /dev/null +++ b/net/dns/dns_clone.go @@ -0,0 +1,74 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by tailscale.com/cmd/cloner; DO NOT EDIT. + +package dns + +import ( + "net/netip" + + "tailscale.com/types/dnstype" + "tailscale.com/util/dnsname" +) + +// Clone makes a deep copy of Config. +// The result aliases no memory with the original. +func (src *Config) Clone() *Config { + if src == nil { + return nil + } + dst := new(Config) + *dst = *src + if src.DefaultResolvers != nil { + dst.DefaultResolvers = make([]*dnstype.Resolver, len(src.DefaultResolvers)) + for i := range dst.DefaultResolvers { + if src.DefaultResolvers[i] == nil { + dst.DefaultResolvers[i] = nil + } else { + dst.DefaultResolvers[i] = src.DefaultResolvers[i].Clone() + } + } + } + if dst.Routes != nil { + dst.Routes = map[dnsname.FQDN][]*dnstype.Resolver{} + for k := range src.Routes { + dst.Routes[k] = append([]*dnstype.Resolver{}, src.Routes[k]...) + } + } + dst.SearchDomains = append(src.SearchDomains[:0:0], src.SearchDomains...) + if dst.Hosts != nil { + dst.Hosts = map[dnsname.FQDN][]netip.Addr{} + for k := range src.Hosts { + dst.Hosts[k] = append([]netip.Addr{}, src.Hosts[k]...) + } + } + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _ConfigCloneNeedsRegeneration = Config(struct { + DefaultResolvers []*dnstype.Resolver + Routes map[dnsname.FQDN][]*dnstype.Resolver + SearchDomains []dnsname.FQDN + Hosts map[dnsname.FQDN][]netip.Addr + OnlyIPv6 bool +}{}) + +// Clone duplicates src into dst and reports whether it succeeded. +// To succeed, must be of types <*T, *T> or <*T, **T>, +// where T is one of Config. +func Clone(dst, src any) bool { + switch src := src.(type) { + case *Config: + switch dst := dst.(type) { + case *Config: + *dst = *src.Clone() + return true + case **Config: + *dst = src.Clone() + return true + } + } + return false +} diff --git a/net/dns/dns_view.go b/net/dns/dns_view.go new file mode 100644 index 0000000000000..c7ce376cba8db --- /dev/null +++ b/net/dns/dns_view.go @@ -0,0 +1,138 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Code generated by tailscale/cmd/viewer; DO NOT EDIT. + +package dns + +import ( + jsonv1 "encoding/json" + "errors" + "net/netip" + + jsonv2 "github.com/go-json-experiment/json" + "github.com/go-json-experiment/json/jsontext" + "tailscale.com/types/dnstype" + "tailscale.com/types/views" + "tailscale.com/util/dnsname" +) + +//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type=Config + +// View returns a read-only view of Config. +func (p *Config) View() ConfigView { + return ConfigView{ж: p} +} + +// ConfigView provides a read-only view over Config. +// +// Its methods should only be called if `Valid()` returns true. +type ConfigView struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *Config +} + +// Valid reports whether v's underlying value is non-nil. +func (v ConfigView) Valid() bool { return v.ж != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v ConfigView) AsStruct() *Config { + if v.ж == nil { + return nil + } + return v.ж.Clone() +} + +// MarshalJSON implements [jsonv1.Marshaler]. +func (v ConfigView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v ConfigView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +func (v *ConfigView) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x Config + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *ConfigView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x Config + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// DefaultResolvers are the DNS resolvers to use for DNS names +// which aren't covered by more specific per-domain routes below. +// If empty, the OS's default resolvers (the ones that predate +// Tailscale altering the configuration) are used. +func (v ConfigView) DefaultResolvers() views.SliceView[*dnstype.Resolver, dnstype.ResolverView] { + return views.SliceOfViews[*dnstype.Resolver, dnstype.ResolverView](v.ж.DefaultResolvers) +} + +// Routes maps a DNS suffix to the resolvers that should be used +// for queries that fall within that suffix. +// If a query doesn't match any entry in Routes, the +// DefaultResolvers are used. +// A Routes entry with no resolvers means the route should be +// authoritatively answered using the contents of Hosts. +func (v ConfigView) Routes() views.MapFn[dnsname.FQDN, []*dnstype.Resolver, views.SliceView[*dnstype.Resolver, dnstype.ResolverView]] { + return views.MapFnOf(v.ж.Routes, func(t []*dnstype.Resolver) views.SliceView[*dnstype.Resolver, dnstype.ResolverView] { + return views.SliceOfViews[*dnstype.Resolver, dnstype.ResolverView](t) + }) +} + +// SearchDomains are DNS suffixes to try when expanding +// single-label queries. +func (v ConfigView) SearchDomains() views.Slice[dnsname.FQDN] { + return views.SliceOf(v.ж.SearchDomains) +} + +// Hosts maps DNS FQDNs to their IPs, which can be a mix of IPv4 +// and IPv6. +// Queries matching entries in Hosts are resolved locally by +// 100.100.100.100 without leaving the machine. +// Adding an entry to Hosts merely creates the record. If you want +// it to resolve, you also need to add appropriate routes to +// Routes. +func (v ConfigView) Hosts() views.MapSlice[dnsname.FQDN, netip.Addr] { + return views.MapSliceOf(v.ж.Hosts) +} + +// OnlyIPv6, if true, uses the IPv6 service IP (for MagicDNS) +// instead of the IPv4 version (100.100.100.100). +func (v ConfigView) OnlyIPv6() bool { return v.ж.OnlyIPv6 } +func (v ConfigView) Equal(v2 ConfigView) bool { return v.ж.Equal(v2.ж) } + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _ConfigViewNeedsRegeneration = Config(struct { + DefaultResolvers []*dnstype.Resolver + Routes map[dnsname.FQDN][]*dnstype.Resolver + SearchDomains []dnsname.FQDN + Hosts map[dnsname.FQDN][]netip.Addr + OnlyIPv6 bool +}{}) diff --git a/net/dns/manager.go b/net/dns/manager.go index 4a5c4925cf092..de99fe646f786 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -20,6 +20,7 @@ import ( "time" "tailscale.com/control/controlknobs" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/dns/resolver" "tailscale.com/net/netmon" @@ -29,6 +30,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/clientmetric" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus" "tailscale.com/util/slicesx" "tailscale.com/util/syspolicy/policyclient" ) @@ -71,6 +73,9 @@ type Manager struct { // // knobs may be nil. func NewManager(logf logger.Logf, oscfg OSConfigurator, health *health.Tracker, dialer *tsdial.Dialer, linkSel resolver.ForwardLinkSelector, knobs *controlknobs.Knobs, goos string) *Manager { + if !buildfeatures.HasDNS { + return nil + } if dialer == nil { panic("nil Dialer") } @@ -97,7 +102,12 @@ func NewManager(logf logger.Logf, oscfg OSConfigurator, health *health.Tracker, } // Resolver returns the Manager's DNS Resolver. -func (m *Manager) Resolver() *resolver.Resolver { return m.resolver } +func (m *Manager) Resolver() *resolver.Resolver { + if !buildfeatures.HasDNS { + return nil + } + return m.resolver +} // RecompileDNSConfig recompiles the last attempted DNS configuration, which has // the side effect of re-querying the OS's interface nameservers. This should be used @@ -111,6 +121,9 @@ func (m *Manager) Resolver() *resolver.Resolver { return m.resolver } // // It returns [ErrNoDNSConfig] if [Manager.Set] has never been called. func (m *Manager) RecompileDNSConfig() error { + if !buildfeatures.HasDNS { + return nil + } m.mu.Lock() defer m.mu.Unlock() if m.config != nil { @@ -120,6 +133,9 @@ func (m *Manager) RecompileDNSConfig() error { } func (m *Manager) Set(cfg Config) error { + if !buildfeatures.HasDNS { + return nil + } m.mu.Lock() defer m.mu.Unlock() return m.setLocked(cfg) @@ -127,6 +143,9 @@ func (m *Manager) Set(cfg Config) error { // GetBaseConfig returns the current base OS DNS configuration as provided by the OSConfigurator. func (m *Manager) GetBaseConfig() (OSConfig, error) { + if !buildfeatures.HasDNS { + panic("unreachable") + } return m.os.GetBaseConfig() } @@ -559,6 +578,9 @@ func (m *Manager) HandleTCPConn(conn net.Conn, srcAddr netip.AddrPort) { } func (m *Manager) Down() error { + if !buildfeatures.HasDNS { + return nil + } m.ctxCancel() if err := m.os.Close(); err != nil { return err @@ -568,6 +590,9 @@ func (m *Manager) Down() error { } func (m *Manager) FlushCaches() error { + if !buildfeatures.HasDNS { + return nil + } return flushCaches() } @@ -576,7 +601,10 @@ func (m *Manager) FlushCaches() error { // No other state needs to be instantiated before this runs. // // health must not be nil -func CleanUp(logf logger.Logf, netMon *netmon.Monitor, health *health.Tracker, interfaceName string) { +func CleanUp(logf logger.Logf, netMon *netmon.Monitor, bus *eventbus.Bus, health *health.Tracker, interfaceName string) { + if !buildfeatures.HasDNS { + return + } oscfg, err := NewOSConfigurator(logf, health, policyclient.Get(), nil, interfaceName) if err != nil { logf("creating dns cleanup: %v", err) @@ -584,6 +612,7 @@ func CleanUp(logf logger.Logf, netMon *netmon.Monitor, health *health.Tracker, i } d := &tsdial.Dialer{Logf: logf} d.SetNetMon(netMon) + d.SetBus(bus) dns := NewManager(logf, oscfg, health, d, nil, nil, runtime.GOOS) if err := dns.Down(); err != nil { logf("dns down: %v", err) diff --git a/net/dns/manager_linux.go b/net/dns/manager_linux.go index 8b66ac3a685e3..4304df2616e98 100644 --- a/net/dns/manager_linux.go +++ b/net/dns/manager_linux.go @@ -7,7 +7,6 @@ package dns import ( "bytes" - "context" "errors" "fmt" "os" @@ -15,13 +14,13 @@ import ( "sync" "time" - "github.com/godbus/dbus/v5" "tailscale.com/control/controlknobs" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/netaddr" "tailscale.com/types/logger" "tailscale.com/util/clientmetric" - "tailscale.com/util/cmpver" "tailscale.com/util/syspolicy/policyclient" "tailscale.com/version/distro" ) @@ -36,22 +35,59 @@ func (kv kv) String() string { var publishOnce sync.Once +// reconfigTimeout is the time interval within which Manager.{Up,Down} should complete. +// +// This is particularly useful because certain conditions can cause indefinite hangs +// (such as improper dbus auth followed by contextless dbus.Object.Call). +// Such operations should be wrapped in a timeout context. +const reconfigTimeout = time.Second + +// Set unless ts_omit_networkmanager +var ( + optNewNMManager feature.Hook[func(ifName string) (OSConfigurator, error)] + optNMIsUsingResolved feature.Hook[func() error] + optNMVersionBetween feature.Hook[func(v1, v2 string) (bool, error)] +) + +// Set unless ts_omit_resolved +var ( + optNewResolvedManager feature.Hook[func(logf logger.Logf, health *health.Tracker, interfaceName string) (OSConfigurator, error)] +) + +// Set unless ts_omit_dbus +var ( + optDBusPing feature.Hook[func(name, objectPath string) error] + optDBusReadString feature.Hook[func(name, objectPath, iface, member string) (string, error)] +) + // NewOSConfigurator created a new OS configurator. // // The health tracker may be nil; the knobs may be nil and are ignored on this platform. func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ policyclient.Client, _ *controlknobs.Knobs, interfaceName string) (ret OSConfigurator, err error) { - if distro.Get() == distro.JetKVM { + if !buildfeatures.HasDNS || distro.Get() == distro.JetKVM { return NewNoopManager() } env := newOSConfigEnv{ - fs: directFS{}, - dbusPing: dbusPing, - dbusReadString: dbusReadString, - nmIsUsingResolved: nmIsUsingResolved, - nmVersionBetween: nmVersionBetween, - resolvconfStyle: resolvconfStyle, + fs: directFS{}, + resolvconfStyle: resolvconfStyle, } + if f, ok := optDBusPing.GetOk(); ok { + env.dbusPing = f + } else { + env.dbusPing = func(_, _ string) error { return errors.ErrUnsupported } + } + if f, ok := optDBusReadString.GetOk(); ok { + env.dbusReadString = f + } else { + env.dbusReadString = func(_, _, _, _ string) (string, error) { return "", errors.ErrUnsupported } + } + if f, ok := optNMIsUsingResolved.GetOk(); ok { + env.nmIsUsingResolved = f + } else { + env.nmIsUsingResolved = func() error { return errors.ErrUnsupported } + } + env.nmVersionBetween, _ = optNMVersionBetween.GetOk() // GetOk to not panic if nil; unused if optNMIsUsingResolved returns an error mode, err := dnsMode(logf, health, env) if err != nil { return nil, err @@ -66,17 +102,24 @@ func NewOSConfigurator(logf logger.Logf, health *health.Tracker, _ policyclient. case "direct": return newDirectManagerOnFS(logf, health, env.fs), nil case "systemd-resolved": - return newResolvedManager(logf, health, interfaceName) + if f, ok := optNewResolvedManager.GetOk(); ok { + return f(logf, health, interfaceName) + } + return nil, fmt.Errorf("tailscaled was built without DNS %q support", mode) case "network-manager": - return newNMManager(interfaceName) + if f, ok := optNewNMManager.GetOk(); ok { + return f(interfaceName) + } + return nil, fmt.Errorf("tailscaled was built without DNS %q support", mode) case "debian-resolvconf": return newDebianResolvconfManager(logf) case "openresolv": return newOpenresolvManager(logf) default: logf("[unexpected] detected unknown DNS mode %q, using direct manager as last resort", mode) - return newDirectManagerOnFS(logf, health, env.fs), nil } + + return newDirectManagerOnFS(logf, health, env.fs), nil } // newOSConfigEnv are the funcs newOSConfigurator needs, pulled out for testing. @@ -292,50 +335,6 @@ func dnsMode(logf logger.Logf, health *health.Tracker, env newOSConfigEnv) (ret } } -func nmVersionBetween(first, last string) (bool, error) { - conn, err := dbus.SystemBus() - if err != nil { - // DBus probably not running. - return false, err - } - - nm := conn.Object("org.freedesktop.NetworkManager", dbus.ObjectPath("/org/freedesktop/NetworkManager")) - v, err := nm.GetProperty("org.freedesktop.NetworkManager.Version") - if err != nil { - return false, err - } - - version, ok := v.Value().(string) - if !ok { - return false, fmt.Errorf("unexpected type %T for NM version", v.Value()) - } - - outside := cmpver.Compare(version, first) < 0 || cmpver.Compare(version, last) > 0 - return !outside, nil -} - -func nmIsUsingResolved() error { - conn, err := dbus.SystemBus() - if err != nil { - // DBus probably not running. - return err - } - - nm := conn.Object("org.freedesktop.NetworkManager", dbus.ObjectPath("/org/freedesktop/NetworkManager/DnsManager")) - v, err := nm.GetProperty("org.freedesktop.NetworkManager.DnsManager.Mode") - if err != nil { - return fmt.Errorf("getting NM mode: %w", err) - } - mode, ok := v.Value().(string) - if !ok { - return fmt.Errorf("unexpected type %T for NM DNS mode", v.Value()) - } - if mode != "systemd-resolved" { - return errors.New("NetworkManager is not using systemd-resolved for DNS") - } - return nil -} - // resolvedIsActuallyResolver reports whether the system is using // systemd-resolved as the resolver. There are two different ways to // use systemd-resolved: @@ -396,44 +395,3 @@ func isLibnssResolveUsed(env newOSConfigEnv) error { } return fmt.Errorf("libnss_resolve not used") } - -func dbusPing(name, objectPath string) error { - conn, err := dbus.SystemBus() - if err != nil { - // DBus probably not running. - return err - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - obj := conn.Object(name, dbus.ObjectPath(objectPath)) - call := obj.CallWithContext(ctx, "org.freedesktop.DBus.Peer.Ping", 0) - return call.Err -} - -// dbusReadString reads a string property from the provided name and object -// path. property must be in "interface.member" notation. -func dbusReadString(name, objectPath, iface, member string) (string, error) { - conn, err := dbus.SystemBus() - if err != nil { - // DBus probably not running. - return "", err - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - obj := conn.Object(name, dbus.ObjectPath(objectPath)) - - var result dbus.Variant - err = obj.CallWithContext(ctx, "org.freedesktop.DBus.Properties.Get", 0, iface, member).Store(&result) - if err != nil { - return "", err - } - - if s, ok := result.Value().(string); ok { - return s, nil - } - return result.String(), nil -} diff --git a/net/dns/manager_tcp_test.go b/net/dns/manager_tcp_test.go index f4c42791e9b5b..dcdc88c7a22bf 100644 --- a/net/dns/manager_tcp_test.go +++ b/net/dns/manager_tcp_test.go @@ -20,6 +20,7 @@ import ( "tailscale.com/net/tsdial" "tailscale.com/tstest" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus/eventbustest" ) func mkDNSRequest(domain dnsname.FQDN, tp dns.Type, modify func(*dns.Builder)) []byte { @@ -89,7 +90,10 @@ func TestDNSOverTCP(t *testing.T) { SearchDomains: fqdns("coffee.shop"), }, } - m := NewManager(t.Logf, &f, new(health.Tracker), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "") + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) + m := NewManager(t.Logf, &f, health.NewTracker(bus), dialer, nil, nil, "") m.resolver.TestOnlySetHook(f.SetResolver) m.Set(Config{ Hosts: hosts( @@ -174,7 +178,10 @@ func TestDNSOverTCP_TooLarge(t *testing.T) { SearchDomains: fqdns("coffee.shop"), }, } - m := NewManager(log, &f, new(health.Tracker), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "") + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) + m := NewManager(log, &f, health.NewTracker(bus), dialer, nil, nil, "") m.resolver.TestOnlySetHook(f.SetResolver) m.Set(Config{ Hosts: hosts("andrew.ts.com.", "1.2.3.4"), diff --git a/net/dns/manager_test.go b/net/dns/manager_test.go index 522f9636abefe..92b660007cdd2 100644 --- a/net/dns/manager_test.go +++ b/net/dns/manager_test.go @@ -19,6 +19,7 @@ import ( "tailscale.com/net/tsdial" "tailscale.com/types/dnstype" "tailscale.com/util/dnsname" + "tailscale.com/util/eventbus/eventbustest" ) type fakeOSConfigurator struct { @@ -932,7 +933,10 @@ func TestManager(t *testing.T) { goos = "linux" } knobs := &controlknobs.Knobs{} - m := NewManager(t.Logf, &f, new(health.Tracker), tsdial.NewDialer(netmon.NewStatic()), nil, knobs, goos) + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) + m := NewManager(t.Logf, &f, health.NewTracker(bus), dialer, nil, knobs, goos) m.resolver.TestOnlySetHook(f.SetResolver) if err := m.Set(test.in); err != nil { @@ -1038,7 +1042,10 @@ func TestConfigRecompilation(t *testing.T) { SearchDomains: fqdns("foo.ts.net"), } - m := NewManager(t.Logf, f, new(health.Tracker), tsdial.NewDialer(netmon.NewStatic()), nil, nil, "darwin") + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) + m := NewManager(t.Logf, f, health.NewTracker(bus), dialer, nil, nil, "darwin") var managerConfig *resolver.Config m.resolver.TestOnlySetHook(func(cfg resolver.Config) { diff --git a/net/dns/nm.go b/net/dns/nm.go index 97557e33aa9bf..a88d29b374ebb 100644 --- a/net/dns/nm.go +++ b/net/dns/nm.go @@ -1,13 +1,14 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux && !android +//go:build linux && !android && !ts_omit_networkmanager package dns import ( "context" "encoding/binary" + "errors" "fmt" "net" "net/netip" @@ -16,6 +17,7 @@ import ( "github.com/godbus/dbus/v5" "tailscale.com/net/tsaddr" + "tailscale.com/util/cmpver" "tailscale.com/util/dnsname" ) @@ -25,13 +27,6 @@ const ( lowerPriority = int32(200) // lower than all builtin auto priorities ) -// reconfigTimeout is the time interval within which Manager.{Up,Down} should complete. -// -// This is particularly useful because certain conditions can cause indefinite hangs -// (such as improper dbus auth followed by contextless dbus.Object.Call). -// Such operations should be wrapped in a timeout context. -const reconfigTimeout = time.Second - // nmManager uses the NetworkManager DBus API. type nmManager struct { interfaceName string @@ -39,7 +34,13 @@ type nmManager struct { dnsManager dbus.BusObject } -func newNMManager(interfaceName string) (*nmManager, error) { +func init() { + optNewNMManager.Set(newNMManager) + optNMIsUsingResolved.Set(nmIsUsingResolved) + optNMVersionBetween.Set(nmVersionBetween) +} + +func newNMManager(interfaceName string) (OSConfigurator, error) { conn, err := dbus.SystemBus() if err != nil { return nil, err @@ -389,3 +390,47 @@ func (m *nmManager) Close() error { // settings when the tailscale interface goes away. return nil } + +func nmVersionBetween(first, last string) (bool, error) { + conn, err := dbus.SystemBus() + if err != nil { + // DBus probably not running. + return false, err + } + + nm := conn.Object("org.freedesktop.NetworkManager", dbus.ObjectPath("/org/freedesktop/NetworkManager")) + v, err := nm.GetProperty("org.freedesktop.NetworkManager.Version") + if err != nil { + return false, err + } + + version, ok := v.Value().(string) + if !ok { + return false, fmt.Errorf("unexpected type %T for NM version", v.Value()) + } + + outside := cmpver.Compare(version, first) < 0 || cmpver.Compare(version, last) > 0 + return !outside, nil +} + +func nmIsUsingResolved() error { + conn, err := dbus.SystemBus() + if err != nil { + // DBus probably not running. + return err + } + + nm := conn.Object("org.freedesktop.NetworkManager", dbus.ObjectPath("/org/freedesktop/NetworkManager/DnsManager")) + v, err := nm.GetProperty("org.freedesktop.NetworkManager.DnsManager.Mode") + if err != nil { + return fmt.Errorf("getting NM mode: %w", err) + } + mode, ok := v.Value().(string) + if !ok { + return fmt.Errorf("unexpected type %T for NM DNS mode", v.Value()) + } + if mode != "systemd-resolved" { + return errors.New("NetworkManager is not using systemd-resolved for DNS") + } + return nil +} diff --git a/net/dns/osconfig.go b/net/dns/osconfig.go index 842c5ac607853..af4c0f01fc75b 100644 --- a/net/dns/osconfig.go +++ b/net/dns/osconfig.go @@ -11,6 +11,7 @@ import ( "slices" "strings" + "tailscale.com/feature/buildfeatures" "tailscale.com/types/logger" "tailscale.com/util/dnsname" ) @@ -158,6 +159,10 @@ func (a OSConfig) Equal(b OSConfig) bool { // Fixes https://github.com/tailscale/tailscale/issues/5669 func (a OSConfig) Format(f fmt.State, verb rune) { logger.ArgWriter(func(w *bufio.Writer) { + if !buildfeatures.HasDNS { + w.WriteString(`{DNS-unlinked}`) + return + } w.WriteString(`{Nameservers:[`) for i, ns := range a.Nameservers { if i != 0 { diff --git a/net/dns/publicdns/publicdns.go b/net/dns/publicdns/publicdns.go index 0dbd3ab8200f1..b8a7f88091617 100644 --- a/net/dns/publicdns/publicdns.go +++ b/net/dns/publicdns/publicdns.go @@ -17,6 +17,8 @@ import ( "strconv" "strings" "sync" + + "tailscale.com/feature/buildfeatures" ) // dohOfIP maps from public DNS IPs to their DoH base URL. @@ -163,6 +165,9 @@ const ( // populate is called once to initialize the knownDoH and dohIPsOfBase maps. func populate() { + if !buildfeatures.HasDNS { + return + } // Cloudflare // https://developers.cloudflare.com/1.1.1.1/ip-addresses/ addDoH("1.1.1.1", "https://cloudflare-dns.com/dns-query") diff --git a/net/dns/recursive/recursive.go b/net/dns/recursive/recursive.go deleted file mode 100644 index fd865e37ab737..0000000000000 --- a/net/dns/recursive/recursive.go +++ /dev/null @@ -1,622 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Package recursive implements a simple recursive DNS resolver. -package recursive - -import ( - "context" - "errors" - "fmt" - "net" - "net/netip" - "slices" - "strings" - "time" - - "github.com/miekg/dns" - "tailscale.com/envknob" - "tailscale.com/net/netns" - "tailscale.com/types/logger" - "tailscale.com/util/dnsname" - "tailscale.com/util/mak" - "tailscale.com/util/multierr" - "tailscale.com/util/slicesx" -) - -const ( - // maxDepth is how deep from the root nameservers we'll recurse when - // resolving; passing this limit will instead return an error. - // - // maxDepth must be at least 20 to resolve "console.aws.amazon.com", - // which is a domain with a moderately complicated DNS setup. The - // current value of 30 was chosen semi-arbitrarily to ensure that we - // have about 50% headroom. - maxDepth = 30 - // numStartingServers is the number of root nameservers that we use as - // initial candidates for our recursion. - numStartingServers = 3 - // udpQueryTimeout is the amount of time we wait for a UDP response - // from a nameserver before falling back to a TCP connection. - udpQueryTimeout = 5 * time.Second - - // These constants aren't typed in the DNS package, so we create typed - // versions here to avoid having to do repeated type casts. - qtypeA dns.Type = dns.Type(dns.TypeA) - qtypeAAAA dns.Type = dns.Type(dns.TypeAAAA) -) - -var ( - // ErrMaxDepth is returned when recursive resolving exceeds the maximum - // depth limit for this package. - ErrMaxDepth = fmt.Errorf("exceeded max depth %d when resolving", maxDepth) - - // ErrAuthoritativeNoResponses is the error returned when an - // authoritative nameserver indicates that there are no responses to - // the given query. - ErrAuthoritativeNoResponses = errors.New("authoritative server returned no responses") - - // ErrNoResponses is returned when our resolution process completes - // with no valid responses from any nameserver, but no authoritative - // server explicitly returned NXDOMAIN. - ErrNoResponses = errors.New("no responses to query") -) - -var rootServersV4 = []netip.Addr{ - netip.MustParseAddr("198.41.0.4"), // a.root-servers.net - netip.MustParseAddr("170.247.170.2"), // b.root-servers.net - netip.MustParseAddr("192.33.4.12"), // c.root-servers.net - netip.MustParseAddr("199.7.91.13"), // d.root-servers.net - netip.MustParseAddr("192.203.230.10"), // e.root-servers.net - netip.MustParseAddr("192.5.5.241"), // f.root-servers.net - netip.MustParseAddr("192.112.36.4"), // g.root-servers.net - netip.MustParseAddr("198.97.190.53"), // h.root-servers.net - netip.MustParseAddr("192.36.148.17"), // i.root-servers.net - netip.MustParseAddr("192.58.128.30"), // j.root-servers.net - netip.MustParseAddr("193.0.14.129"), // k.root-servers.net - netip.MustParseAddr("199.7.83.42"), // l.root-servers.net - netip.MustParseAddr("202.12.27.33"), // m.root-servers.net -} - -var rootServersV6 = []netip.Addr{ - netip.MustParseAddr("2001:503:ba3e::2:30"), // a.root-servers.net - netip.MustParseAddr("2801:1b8:10::b"), // b.root-servers.net - netip.MustParseAddr("2001:500:2::c"), // c.root-servers.net - netip.MustParseAddr("2001:500:2d::d"), // d.root-servers.net - netip.MustParseAddr("2001:500:a8::e"), // e.root-servers.net - netip.MustParseAddr("2001:500:2f::f"), // f.root-servers.net - netip.MustParseAddr("2001:500:12::d0d"), // g.root-servers.net - netip.MustParseAddr("2001:500:1::53"), // h.root-servers.net - netip.MustParseAddr("2001:7fe::53"), // i.root-servers.net - netip.MustParseAddr("2001:503:c27::2:30"), // j.root-servers.net - netip.MustParseAddr("2001:7fd::1"), // k.root-servers.net - netip.MustParseAddr("2001:500:9f::42"), // l.root-servers.net - netip.MustParseAddr("2001:dc3::35"), // m.root-servers.net -} - -var debug = envknob.RegisterBool("TS_DEBUG_RECURSIVE_DNS") - -// Resolver is a recursive DNS resolver that is designed for looking up A and AAAA records. -type Resolver struct { - // Dialer is used to create outbound connections. If nil, a zero - // net.Dialer will be used instead. - Dialer netns.Dialer - - // Logf is the logging function to use; if none is specified, then logs - // will be dropped. - Logf logger.Logf - - // NoIPv6, if set, will prevent this package from querying for AAAA - // records and will avoid contacting nameservers over IPv6. - NoIPv6 bool - - // Test mocks - testQueryHook func(name dnsname.FQDN, nameserver netip.Addr, protocol string, qtype dns.Type) (*dns.Msg, error) - testExchangeHook func(nameserver netip.Addr, network string, msg *dns.Msg) (*dns.Msg, error) - rootServers []netip.Addr - timeNow func() time.Time - - // Caching - // NOTE(andrew): if we make resolution parallel, this needs a mutex - queryCache map[dnsQuery]dnsMsgWithExpiry - - // Possible future additions: - // - Additional nameservers? From the system maybe? - // - NoIPv4 for IPv4 - // - DNS-over-HTTPS or DNS-over-TLS support -} - -// queryState stores all state during the course of a single query -type queryState struct { - // rootServers are the root nameservers to start from - rootServers []netip.Addr - - // TODO: metrics? -} - -type dnsQuery struct { - nameserver netip.Addr - name dnsname.FQDN - qtype dns.Type -} - -func (q dnsQuery) String() string { - return fmt.Sprintf("dnsQuery{nameserver:%q,name:%q,qtype:%v}", q.nameserver.String(), q.name, q.qtype) -} - -type dnsMsgWithExpiry struct { - *dns.Msg - expiresAt time.Time -} - -func (r *Resolver) now() time.Time { - if r.timeNow != nil { - return r.timeNow() - } - return time.Now() -} - -func (r *Resolver) logf(format string, args ...any) { - if r.Logf == nil { - return - } - r.Logf(format, args...) -} - -func (r *Resolver) depthlogf(depth int, format string, args ...any) { - if r.Logf == nil || !debug() { - return - } - prefix := fmt.Sprintf("[%d] %s", depth, strings.Repeat(" ", depth)) - r.Logf(prefix+format, args...) -} - -var defaultDialer net.Dialer - -func (r *Resolver) dialer() netns.Dialer { - if r.Dialer != nil { - return r.Dialer - } - - return &defaultDialer -} - -func (r *Resolver) newState() *queryState { - var rootServers []netip.Addr - if len(r.rootServers) > 0 { - rootServers = r.rootServers - } else { - // Select a random subset of root nameservers to start from, since if - // we don't get responses from those, something else has probably gone - // horribly wrong. - roots4 := slices.Clone(rootServersV4) - slicesx.Shuffle(roots4) - roots4 = roots4[:numStartingServers] - - var roots6 []netip.Addr - if !r.NoIPv6 { - roots6 = slices.Clone(rootServersV6) - slicesx.Shuffle(roots6) - roots6 = roots6[:numStartingServers] - } - - // Interleave the root servers so that we try to contact them over - // IPv4, then IPv6, IPv4, IPv6, etc. - rootServers = slicesx.Interleave(roots4, roots6) - } - - return &queryState{ - rootServers: rootServers, - } -} - -// Resolve will perform a recursive DNS resolution for the provided name, -// starting at a randomly-chosen root DNS server, and return the A and AAAA -// responses as a slice of netip.Addrs along with the minimum TTL for the -// returned records. -func (r *Resolver) Resolve(ctx context.Context, name string) (addrs []netip.Addr, minTTL time.Duration, err error) { - dnsName, err := dnsname.ToFQDN(name) - if err != nil { - return nil, 0, err - } - - qstate := r.newState() - - r.logf("querying IPv4 addresses for: %q", name) - addrs4, minTTL4, err4 := r.resolveRecursiveFromRoot(ctx, qstate, 0, dnsName, qtypeA) - - var ( - addrs6 []netip.Addr - minTTL6 time.Duration - err6 error - ) - if !r.NoIPv6 { - r.logf("querying IPv6 addresses for: %q", name) - addrs6, minTTL6, err6 = r.resolveRecursiveFromRoot(ctx, qstate, 0, dnsName, qtypeAAAA) - } - - if err4 != nil && err6 != nil { - if err4 == err6 { - return nil, 0, err4 - } - - return nil, 0, multierr.New(err4, err6) - } - if err4 != nil { - return addrs6, minTTL6, nil - } else if err6 != nil { - return addrs4, minTTL4, nil - } - - minTTL = minTTL4 - if minTTL6 < minTTL { - minTTL = minTTL6 - } - - addrs = append(addrs4, addrs6...) - if len(addrs) == 0 { - return nil, 0, ErrNoResponses - } - - slicesx.Shuffle(addrs) - return addrs, minTTL, nil -} - -func (r *Resolver) resolveRecursiveFromRoot( - ctx context.Context, - qstate *queryState, - depth int, - name dnsname.FQDN, // what we're querying - qtype dns.Type, -) ([]netip.Addr, time.Duration, error) { - r.depthlogf(depth, "resolving %q from root (type: %v)", name, qtype) - - var depthError bool - for _, server := range qstate.rootServers { - addrs, minTTL, err := r.resolveRecursive(ctx, qstate, depth, name, server, qtype) - if err == nil { - return addrs, minTTL, err - } else if errors.Is(err, ErrAuthoritativeNoResponses) { - return nil, 0, ErrAuthoritativeNoResponses - } else if errors.Is(err, ErrMaxDepth) { - depthError = true - } - } - - if depthError { - return nil, 0, ErrMaxDepth - } - return nil, 0, ErrNoResponses -} - -func (r *Resolver) resolveRecursive( - ctx context.Context, - qstate *queryState, - depth int, - name dnsname.FQDN, // what we're querying - nameserver netip.Addr, - qtype dns.Type, -) ([]netip.Addr, time.Duration, error) { - if depth == maxDepth { - r.depthlogf(depth, "not recursing past maximum depth") - return nil, 0, ErrMaxDepth - } - - // Ask this nameserver for an answer. - resp, err := r.queryNameserver(ctx, depth, name, nameserver, qtype) - if err != nil { - return nil, 0, err - } - - // If we get an actual answer from the nameserver, then return it. - var ( - answers []netip.Addr - cnames []dnsname.FQDN - minTTL = 24 * 60 * 60 // 24 hours in seconds - ) - for _, answer := range resp.Answer { - if crec, ok := answer.(*dns.CNAME); ok { - cnameFQDN, err := dnsname.ToFQDN(crec.Target) - if err != nil { - r.logf("bad CNAME %q returned: %v", crec.Target, err) - continue - } - - cnames = append(cnames, cnameFQDN) - continue - } - - addr := addrFromRecord(answer) - if !addr.IsValid() { - r.logf("[unexpected] invalid record in %T answer", answer) - } else if addr.Is4() && qtype != qtypeA { - r.logf("[unexpected] got IPv4 answer but qtype=%v", qtype) - } else if addr.Is6() && qtype != qtypeAAAA { - r.logf("[unexpected] got IPv6 answer but qtype=%v", qtype) - } else { - answers = append(answers, addr) - minTTL = min(minTTL, int(answer.Header().Ttl)) - } - } - - if len(answers) > 0 { - r.depthlogf(depth, "got answers for %q: %v", name, answers) - return answers, time.Duration(minTTL) * time.Second, nil - } - - r.depthlogf(depth, "no answers for %q", name) - - // If we have a non-zero number of CNAMEs, then try resolving those - // (from the root again) and return the first one that succeeds. - // - // TODO: return the union of all responses? - // TODO: parallelism? - if len(cnames) > 0 { - r.depthlogf(depth, "got CNAME responses for %q: %v", name, cnames) - } - var cnameDepthError bool - for _, cname := range cnames { - answers, minTTL, err := r.resolveRecursiveFromRoot(ctx, qstate, depth+1, cname, qtype) - if err == nil { - return answers, minTTL, nil - } else if errors.Is(err, ErrAuthoritativeNoResponses) { - return nil, 0, ErrAuthoritativeNoResponses - } else if errors.Is(err, ErrMaxDepth) { - cnameDepthError = true - } - } - - // If this is an authoritative response, then we know that continuing - // to look further is not going to result in any answers and we should - // bail out. - if resp.MsgHdr.Authoritative { - // If we failed to recurse into a CNAME due to a depth limit, - // propagate that here. - if cnameDepthError { - return nil, 0, ErrMaxDepth - } - - r.depthlogf(depth, "got authoritative response with no answers; stopping") - return nil, 0, ErrAuthoritativeNoResponses - } - - r.depthlogf(depth, "got %d NS responses and %d ADDITIONAL responses for %q", len(resp.Ns), len(resp.Extra), name) - - // No CNAMEs and no answers; see if we got any AUTHORITY responses, - // which indicate which nameservers to query next. - var authorities []dnsname.FQDN - for _, rr := range resp.Ns { - ns, ok := rr.(*dns.NS) - if !ok { - continue - } - - nsName, err := dnsname.ToFQDN(ns.Ns) - if err != nil { - r.logf("unexpected bad NS name %q: %v", ns.Ns, err) - continue - } - - authorities = append(authorities, nsName) - } - - // Also check for "glue" records, which are IP addresses provided by - // the DNS server for authority responses; these are required when the - // authority server is a subdomain of what's being resolved. - glueRecords := make(map[dnsname.FQDN][]netip.Addr) - for _, rr := range resp.Extra { - name, err := dnsname.ToFQDN(rr.Header().Name) - if err != nil { - r.logf("unexpected bad Name %q in Extra addr: %v", rr.Header().Name, err) - continue - } - - if addr := addrFromRecord(rr); addr.IsValid() { - glueRecords[name] = append(glueRecords[name], addr) - } else { - r.logf("unexpected bad Extra %T addr", rr) - } - } - - // Try authorities with glue records first, to minimize the number of - // additional DNS queries that we need to make. - authoritiesGlue, authoritiesNoGlue := slicesx.Partition(authorities, func(aa dnsname.FQDN) bool { - return len(glueRecords[aa]) > 0 - }) - - authorityDepthError := false - - r.depthlogf(depth, "authorities with glue records for recursion: %v", authoritiesGlue) - for _, authority := range authoritiesGlue { - for _, nameserver := range glueRecords[authority] { - answers, minTTL, err := r.resolveRecursive(ctx, qstate, depth+1, name, nameserver, qtype) - if err == nil { - return answers, minTTL, nil - } else if errors.Is(err, ErrAuthoritativeNoResponses) { - return nil, 0, ErrAuthoritativeNoResponses - } else if errors.Is(err, ErrMaxDepth) { - authorityDepthError = true - } - } - } - - r.depthlogf(depth, "authorities with no glue records for recursion: %v", authoritiesNoGlue) - for _, authority := range authoritiesNoGlue { - // First, resolve the IP for the authority server from the - // root, querying for both IPv4 and IPv6 addresses regardless - // of what the current question type is. - // - // TODO: check for infinite recursion; it'll get caught by our - // recursion depth, but we want to bail early. - for _, authorityQtype := range []dns.Type{qtypeAAAA, qtypeA} { - answers, _, err := r.resolveRecursiveFromRoot(ctx, qstate, depth+1, authority, authorityQtype) - if err != nil { - r.depthlogf(depth, "error querying authority %q: %v", authority, err) - continue - } - r.depthlogf(depth, "resolved authority %q (type %v) to: %v", authority, authorityQtype, answers) - - // Now, query this authority for the final address. - for _, nameserver := range answers { - answers, minTTL, err := r.resolveRecursive(ctx, qstate, depth+1, name, nameserver, qtype) - if err == nil { - return answers, minTTL, nil - } else if errors.Is(err, ErrAuthoritativeNoResponses) { - return nil, 0, ErrAuthoritativeNoResponses - } else if errors.Is(err, ErrMaxDepth) { - authorityDepthError = true - } - } - } - } - - if authorityDepthError { - return nil, 0, ErrMaxDepth - } - return nil, 0, ErrNoResponses -} - -// queryNameserver sends a query for "name" to the nameserver "nameserver" for -// records of type "qtype", trying both UDP and TCP connections as -// appropriate. -func (r *Resolver) queryNameserver( - ctx context.Context, - depth int, - name dnsname.FQDN, // what we're querying - nameserver netip.Addr, // destination of query - qtype dns.Type, -) (*dns.Msg, error) { - // TODO(andrew): we should QNAME minimisation here to avoid sending the - // full name to intermediate/root nameservers. See: - // https://www.rfc-editor.org/rfc/rfc7816 - - // Handle the case where UDP is blocked by adding an explicit timeout - // for the UDP portion of this query. - udpCtx, udpCtxCancel := context.WithTimeout(ctx, udpQueryTimeout) - defer udpCtxCancel() - - msg, err := r.queryNameserverProto(udpCtx, depth, name, nameserver, "udp", qtype) - if err == nil { - return msg, nil - } - - msg, err2 := r.queryNameserverProto(ctx, depth, name, nameserver, "tcp", qtype) - if err2 == nil { - return msg, nil - } - - return nil, multierr.New(err, err2) -} - -// queryNameserverProto sends a query for "name" to the nameserver "nameserver" -// for records of type "qtype" over the provided protocol (either "udp" -// or "tcp"), and returns the DNS response or an error. -func (r *Resolver) queryNameserverProto( - ctx context.Context, - depth int, - name dnsname.FQDN, // what we're querying - nameserver netip.Addr, // destination of query - protocol string, - qtype dns.Type, -) (resp *dns.Msg, err error) { - if r.testQueryHook != nil { - return r.testQueryHook(name, nameserver, protocol, qtype) - } - - now := r.now() - nameserverStr := nameserver.String() - - cacheKey := dnsQuery{ - nameserver: nameserver, - name: name, - qtype: qtype, - } - cacheEntry, ok := r.queryCache[cacheKey] - if ok && cacheEntry.expiresAt.Before(now) { - r.depthlogf(depth, "using cached response from %s about %q (type: %v)", nameserverStr, name, qtype) - return cacheEntry.Msg, nil - } - - var network string - if nameserver.Is4() { - network = protocol + "4" - } else { - network = protocol + "6" - } - - // Prepare a message asking for an appropriately-typed record - // for the name we're querying. - m := new(dns.Msg) - m.SetEdns0(1232, false /* no DNSSEC */) - m.SetQuestion(name.WithTrailingDot(), uint16(qtype)) - - // Allow mocking out the network components with our exchange hook. - if r.testExchangeHook != nil { - resp, err = r.testExchangeHook(nameserver, network, m) - } else { - // Dial the current nameserver using our dialer. - var nconn net.Conn - nconn, err = r.dialer().DialContext(ctx, network, net.JoinHostPort(nameserverStr, "53")) - if err != nil { - return nil, err - } - - var c dns.Client // TODO: share? - conn := &dns.Conn{ - Conn: nconn, - UDPSize: c.UDPSize, - } - - // Send the DNS request to the current nameserver. - r.depthlogf(depth, "asking %s over %s about %q (type: %v)", nameserverStr, protocol, name, qtype) - resp, _, err = c.ExchangeWithConnContext(ctx, m, conn) - } - if err != nil { - return nil, err - } - - // If the message was truncated and we're using UDP, re-run with TCP. - if resp.MsgHdr.Truncated && protocol == "udp" { - r.depthlogf(depth, "response message truncated; re-running query with TCP") - resp, err = r.queryNameserverProto(ctx, depth, name, nameserver, "tcp", qtype) - if err != nil { - return nil, err - } - } - - // Find minimum expiry for all records in this message. - var minTTL int - for _, rr := range resp.Answer { - minTTL = min(minTTL, int(rr.Header().Ttl)) - } - for _, rr := range resp.Ns { - minTTL = min(minTTL, int(rr.Header().Ttl)) - } - for _, rr := range resp.Extra { - minTTL = min(minTTL, int(rr.Header().Ttl)) - } - - mak.Set(&r.queryCache, cacheKey, dnsMsgWithExpiry{ - Msg: resp, - expiresAt: now.Add(time.Duration(minTTL) * time.Second), - }) - return resp, nil -} - -func addrFromRecord(rr dns.RR) netip.Addr { - switch v := rr.(type) { - case *dns.A: - ip, ok := netip.AddrFromSlice(v.A) - if !ok || !ip.Is4() { - return netip.Addr{} - } - return ip - case *dns.AAAA: - ip, ok := netip.AddrFromSlice(v.AAAA) - if !ok || !ip.Is6() { - return netip.Addr{} - } - return ip - } - return netip.Addr{} -} diff --git a/net/dns/recursive/recursive_test.go b/net/dns/recursive/recursive_test.go deleted file mode 100644 index d47e4cebf70f2..0000000000000 --- a/net/dns/recursive/recursive_test.go +++ /dev/null @@ -1,742 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package recursive - -import ( - "context" - "errors" - "flag" - "fmt" - "net" - "net/netip" - "reflect" - "strings" - "testing" - "time" - - "slices" - - "github.com/miekg/dns" - "tailscale.com/envknob" - "tailscale.com/tstest" -) - -const testDomain = "tailscale.com" - -// Recursively resolving the AWS console requires being able to handle CNAMEs, -// glue records, falling back from UDP to TCP for oversize queries, and more; -// it's a great integration test for DNS resolution and they can handle the -// traffic :) -const complicatedTestDomain = "console.aws.amazon.com" - -var flagNetworkAccess = flag.Bool("enable-network-access", false, "run tests that need external network access") - -func init() { - envknob.Setenv("TS_DEBUG_RECURSIVE_DNS", "true") -} - -func newResolver(tb testing.TB) *Resolver { - clock := tstest.NewClock(tstest.ClockOpts{ - Step: 50 * time.Millisecond, - }) - return &Resolver{ - Logf: tb.Logf, - timeNow: clock.Now, - } -} - -func TestResolve(t *testing.T) { - if !*flagNetworkAccess { - t.SkipNow() - } - - ctx := context.Background() - r := newResolver(t) - addrs, minTTL, err := r.Resolve(ctx, testDomain) - if err != nil { - t.Fatal(err) - } - - t.Logf("addrs: %+v", addrs) - t.Logf("minTTL: %v", minTTL) - if len(addrs) < 1 { - t.Fatalf("expected at least one address") - } - - if minTTL <= 10*time.Second || minTTL >= 24*time.Hour { - t.Errorf("invalid minimum TTL: %v", minTTL) - } - - var has4, has6 bool - for _, addr := range addrs { - has4 = has4 || addr.Is4() - has6 = has6 || addr.Is6() - } - - if !has4 { - t.Errorf("expected at least one IPv4 address") - } - if !has6 { - t.Errorf("expected at least one IPv6 address") - } -} - -func TestResolveComplicated(t *testing.T) { - if !*flagNetworkAccess { - t.SkipNow() - } - - ctx := context.Background() - r := newResolver(t) - addrs, minTTL, err := r.Resolve(ctx, complicatedTestDomain) - if err != nil { - t.Fatal(err) - } - - t.Logf("addrs: %+v", addrs) - t.Logf("minTTL: %v", minTTL) - if len(addrs) < 1 { - t.Fatalf("expected at least one address") - } - - if minTTL <= 10*time.Second || minTTL >= 24*time.Hour { - t.Errorf("invalid minimum TTL: %v", minTTL) - } -} - -func TestResolveNoIPv6(t *testing.T) { - if !*flagNetworkAccess { - t.SkipNow() - } - - r := newResolver(t) - r.NoIPv6 = true - - addrs, _, err := r.Resolve(context.Background(), testDomain) - if err != nil { - t.Fatal(err) - } - - t.Logf("addrs: %+v", addrs) - if len(addrs) < 1 { - t.Fatalf("expected at least one address") - } - - for _, addr := range addrs { - if addr.Is6() { - t.Errorf("got unexpected IPv6 address: %v", addr) - } - } -} - -func TestResolveFallbackToTCP(t *testing.T) { - var udpCalls, tcpCalls int - hook := func(nameserver netip.Addr, network string, req *dns.Msg) (*dns.Msg, error) { - if strings.HasPrefix(network, "udp") { - t.Logf("got %q query; returning truncated result", network) - udpCalls++ - resp := &dns.Msg{} - resp.SetReply(req) - resp.Truncated = true - return resp, nil - } - - t.Logf("got %q query; returning real result", network) - tcpCalls++ - resp := &dns.Msg{} - resp.SetReply(req) - resp.Answer = append(resp.Answer, &dns.A{ - Hdr: dns.RR_Header{ - Name: req.Question[0].Name, - Rrtype: req.Question[0].Qtype, - Class: dns.ClassINET, - Ttl: 300, - }, - A: net.IPv4(1, 2, 3, 4), - }) - return resp, nil - } - - r := newResolver(t) - r.testExchangeHook = hook - - ctx := context.Background() - resp, err := r.queryNameserverProto(ctx, 0, "tailscale.com", netip.MustParseAddr("9.9.9.9"), "udp", dns.Type(dns.TypeA)) - if err != nil { - t.Fatal(err) - } - - if len(resp.Answer) < 1 { - t.Fatalf("no answers in response: %v", resp) - } - rrA, ok := resp.Answer[0].(*dns.A) - if !ok { - t.Fatalf("invalid RR type: %T", resp.Answer[0]) - } - if !rrA.A.Equal(net.IPv4(1, 2, 3, 4)) { - t.Errorf("wanted A response 1.2.3.4, got: %v", rrA.A) - } - if tcpCalls != 1 { - t.Errorf("got %d, want 1 TCP calls", tcpCalls) - } - if udpCalls != 1 { - t.Errorf("got %d, want 1 UDP calls", udpCalls) - } - - // Verify that we're cached and re-run to fetch from the cache. - if len(r.queryCache) < 1 { - t.Errorf("wanted entries in the query cache") - } - - resp2, err := r.queryNameserverProto(ctx, 0, "tailscale.com", netip.MustParseAddr("9.9.9.9"), "udp", dns.Type(dns.TypeA)) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(resp, resp2) { - t.Errorf("expected equal responses; old=%+v new=%+v", resp, resp2) - } - - // We didn't make any more network requests since we loaded from the cache. - if tcpCalls != 1 { - t.Errorf("got %d, want 1 TCP calls", tcpCalls) - } - if udpCalls != 1 { - t.Errorf("got %d, want 1 UDP calls", udpCalls) - } -} - -func dnsIPRR(name string, addr netip.Addr) dns.RR { - if addr.Is4() { - return &dns.A{ - Hdr: dns.RR_Header{ - Name: name, - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 300, - }, - A: net.IP(addr.AsSlice()), - } - } - - return &dns.AAAA{ - Hdr: dns.RR_Header{ - Name: name, - Rrtype: dns.TypeAAAA, - Class: dns.ClassINET, - Ttl: 300, - }, - AAAA: net.IP(addr.AsSlice()), - } -} - -func cnameRR(name, target string) dns.RR { - return &dns.CNAME{ - Hdr: dns.RR_Header{ - Name: name, - Rrtype: dns.TypeCNAME, - Class: dns.ClassINET, - Ttl: 300, - }, - Target: target, - } -} - -func nsRR(name, target string) dns.RR { - return &dns.NS{ - Hdr: dns.RR_Header{ - Name: name, - Rrtype: dns.TypeNS, - Class: dns.ClassINET, - Ttl: 300, - }, - Ns: target, - } -} - -type mockReply struct { - name string - qtype dns.Type - resp *dns.Msg -} - -type replyMock struct { - tb testing.TB - replies map[netip.Addr][]mockReply -} - -func (r *replyMock) exchangeHook(nameserver netip.Addr, network string, req *dns.Msg) (*dns.Msg, error) { - if len(req.Question) != 1 { - r.tb.Fatalf("unsupported multiple or empty question: %v", req.Question) - } - question := req.Question[0] - - replies := r.replies[nameserver] - if len(replies) == 0 { - r.tb.Fatalf("no configured replies for nameserver: %v", nameserver) - } - - for _, reply := range replies { - if reply.name == question.Name && reply.qtype == dns.Type(question.Qtype) { - return reply.resp.Copy(), nil - } - } - - r.tb.Fatalf("no replies found for query %q of type %v to %v", question.Name, question.Qtype, nameserver) - panic("unreachable") -} - -// responses for mocking, shared between the following tests -var ( - rootServerAddr = netip.MustParseAddr("198.41.0.4") // a.root-servers.net. - comNSAddr = netip.MustParseAddr("192.5.6.30") // a.gtld-servers.net. - - // DNS response from the root nameservers for a .com nameserver - comRecord = &dns.Msg{ - Ns: []dns.RR{nsRR("com.", "a.gtld-servers.net.")}, - Extra: []dns.RR{dnsIPRR("a.gtld-servers.net.", comNSAddr)}, - } - - // Random Amazon nameservers that we use in glue records - amazonNS = netip.MustParseAddr("205.251.192.197") - amazonNSv6 = netip.MustParseAddr("2600:9000:5306:1600::1") - - // Nameservers for the tailscale.com domain - tailscaleNameservers = &dns.Msg{ - Ns: []dns.RR{ - nsRR("tailscale.com.", "ns-197.awsdns-24.com."), - nsRR("tailscale.com.", "ns-557.awsdns-05.net."), - nsRR("tailscale.com.", "ns-1558.awsdns-02.co.uk."), - nsRR("tailscale.com.", "ns-1359.awsdns-41.org."), - }, - Extra: []dns.RR{ - dnsIPRR("ns-197.awsdns-24.com.", amazonNS), - }, - } -) - -func TestBasicRecursion(t *testing.T) { - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{ - // Query to the root server returns the .com server + a glue record - rootServerAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - }, - - // Query to the ".com" server return the nameservers for tailscale.com - comNSAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - }, - - // Query to the actual nameserver works. - amazonNS: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{ - dnsIPRR("tailscale.com.", netip.MustParseAddr("13.248.141.131")), - dnsIPRR("tailscale.com.", netip.MustParseAddr("76.223.15.28")), - }, - }}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{ - dnsIPRR("tailscale.com.", netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b")), - dnsIPRR("tailscale.com.", netip.MustParseAddr("2600:9000:a51d:27c1:1530:b9ef:2a6:b9e5")), - }, - }}, - }, - }, - } - - r := newResolver(t) - r.testExchangeHook = mock.exchangeHook - r.rootServers = []netip.Addr{rootServerAddr} - - // Query for tailscale.com, verify we get the right responses - ctx := context.Background() - addrs, minTTL, err := r.Resolve(ctx, "tailscale.com") - if err != nil { - t.Fatal(err) - } - wantAddrs := []netip.Addr{ - netip.MustParseAddr("13.248.141.131"), - netip.MustParseAddr("76.223.15.28"), - netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b"), - netip.MustParseAddr("2600:9000:a51d:27c1:1530:b9ef:2a6:b9e5"), - } - slices.SortFunc(addrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - slices.SortFunc(wantAddrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - - if !reflect.DeepEqual(addrs, wantAddrs) { - t.Errorf("got addrs=%+v; want %+v", addrs, wantAddrs) - } - - const wantMinTTL = 5 * time.Minute - if minTTL != wantMinTTL { - t.Errorf("got minTTL=%+v; want %+v", minTTL, wantMinTTL) - } -} - -func TestNoAnswers(t *testing.T) { - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{ - // Query to the root server returns the .com server + a glue record - rootServerAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - }, - - // Query to the ".com" server return the nameservers for tailscale.com - comNSAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - }, - - // Query to the actual nameserver returns no responses, authoritatively. - amazonNS: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{}, - }}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{}, - }}, - }, - }, - } - - r := &Resolver{ - Logf: t.Logf, - testExchangeHook: mock.exchangeHook, - rootServers: []netip.Addr{rootServerAddr}, - } - - // Query for tailscale.com, verify we get the right responses - _, _, err := r.Resolve(context.Background(), "tailscale.com") - if err == nil { - t.Fatalf("got no error, want error") - } - if !errors.Is(err, ErrAuthoritativeNoResponses) { - t.Fatalf("got err=%v, want %v", err, ErrAuthoritativeNoResponses) - } -} - -func TestRecursionCNAME(t *testing.T) { - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{ - // Query to the root server returns the .com server + a glue record - rootServerAddr: { - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - }, - - // Query to the ".com" server return the nameservers for tailscale.com - comNSAddr: { - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - }, - - // Query to the actual nameserver works. - amazonNS: { - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{cnameRR("subdomain.otherdomain.com.", "subdomain.tailscale.com.")}, - }}, - {name: "subdomain.otherdomain.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{cnameRR("subdomain.otherdomain.com.", "subdomain.tailscale.com.")}, - }}, - - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR("tailscale.com.", netip.MustParseAddr("13.248.141.131"))}, - }}, - {name: "subdomain.tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR("tailscale.com.", netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b"))}, - }}, - }, - }, - } - - r := &Resolver{ - Logf: t.Logf, - testExchangeHook: mock.exchangeHook, - rootServers: []netip.Addr{rootServerAddr}, - } - - // Query for tailscale.com, verify we get the right responses - addrs, minTTL, err := r.Resolve(context.Background(), "subdomain.otherdomain.com") - if err != nil { - t.Fatal(err) - } - wantAddrs := []netip.Addr{ - netip.MustParseAddr("13.248.141.131"), - netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b"), - } - slices.SortFunc(addrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - slices.SortFunc(wantAddrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - - if !reflect.DeepEqual(addrs, wantAddrs) { - t.Errorf("got addrs=%+v; want %+v", addrs, wantAddrs) - } - - const wantMinTTL = 5 * time.Minute - if minTTL != wantMinTTL { - t.Errorf("got minTTL=%+v; want %+v", minTTL, wantMinTTL) - } -} - -func TestRecursionNoGlue(t *testing.T) { - coukNS := netip.MustParseAddr("213.248.216.1") - coukRecord := &dns.Msg{ - Ns: []dns.RR{nsRR("com.", "dns1.nic.uk.")}, - Extra: []dns.RR{dnsIPRR("dns1.nic.uk.", coukNS)}, - } - - intermediateNS := netip.MustParseAddr("205.251.193.66") // g-ns-322.awsdns-02.co.uk. - intermediateRecord := &dns.Msg{ - Ns: []dns.RR{nsRR("awsdns-02.co.uk.", "g-ns-322.awsdns-02.co.uk.")}, - Extra: []dns.RR{dnsIPRR("g-ns-322.awsdns-02.co.uk.", intermediateNS)}, - } - - const amazonNameserver = "ns-1558.awsdns-02.co.uk." - tailscaleNameservers := &dns.Msg{ - Ns: []dns.RR{ - nsRR("tailscale.com.", amazonNameserver), - }, - } - - tailscaleResponses := []mockReply{ - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR("tailscale.com.", netip.MustParseAddr("13.248.141.131"))}, - }}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR("tailscale.com.", netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b"))}, - }}, - } - - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{ - rootServerAddr: { - // Query to the root server returns the .com server + a glue record - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - - // Querying the .co.uk nameserver returns the .co.uk nameserver + a glue record. - {name: amazonNameserver, qtype: dns.Type(dns.TypeA), resp: coukRecord}, - {name: amazonNameserver, qtype: dns.Type(dns.TypeAAAA), resp: coukRecord}, - }, - - // Queries to the ".com" server return the nameservers - // for tailscale.com, which don't contain a glue - // record. - comNSAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - }, - - // Queries to the ".co.uk" nameserver returns the - // address of the intermediate Amazon nameserver. - coukNS: { - {name: amazonNameserver, qtype: dns.Type(dns.TypeA), resp: intermediateRecord}, - {name: amazonNameserver, qtype: dns.Type(dns.TypeAAAA), resp: intermediateRecord}, - }, - - // Queries to the intermediate nameserver returns an - // answer for the final Amazon nameserver. - intermediateNS: { - {name: amazonNameserver, qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR(amazonNameserver, amazonNS)}, - }}, - {name: amazonNameserver, qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{dnsIPRR(amazonNameserver, amazonNSv6)}, - }}, - }, - - // Queries to the actual nameserver work and return - // responses to the query. - amazonNS: tailscaleResponses, - amazonNSv6: tailscaleResponses, - }, - } - - r := newResolver(t) - r.testExchangeHook = mock.exchangeHook - r.rootServers = []netip.Addr{rootServerAddr} - - // Query for tailscale.com, verify we get the right responses - addrs, minTTL, err := r.Resolve(context.Background(), "tailscale.com") - if err != nil { - t.Fatal(err) - } - wantAddrs := []netip.Addr{ - netip.MustParseAddr("13.248.141.131"), - netip.MustParseAddr("2600:9000:a602:b1e6:86d:8165:5e8c:295b"), - } - slices.SortFunc(addrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - slices.SortFunc(wantAddrs, func(x, y netip.Addr) int { return strings.Compare(x.String(), y.String()) }) - - if !reflect.DeepEqual(addrs, wantAddrs) { - t.Errorf("got addrs=%+v; want %+v", addrs, wantAddrs) - } - - const wantMinTTL = 5 * time.Minute - if minTTL != wantMinTTL { - t.Errorf("got minTTL=%+v; want %+v", minTTL, wantMinTTL) - } -} - -func TestRecursionLimit(t *testing.T) { - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{}, - } - - // Fill out a CNAME chain equal to our recursion limit; we won't get - // this far since each CNAME is more than 1 level "deep", but this - // ensures that we have more than the limit. - for i := range maxDepth + 1 { - curr := fmt.Sprintf("%d-tailscale.com.", i) - - tailscaleNameservers := &dns.Msg{ - Ns: []dns.RR{nsRR(curr, "ns-197.awsdns-24.com.")}, - Extra: []dns.RR{dnsIPRR("ns-197.awsdns-24.com.", amazonNS)}, - } - - // Query to the root server returns the .com server + a glue record - mock.replies[rootServerAddr] = append(mock.replies[rootServerAddr], - mockReply{name: curr, qtype: dns.Type(dns.TypeA), resp: comRecord}, - mockReply{name: curr, qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - ) - - // Query to the ".com" server return the nameservers for NN-tailscale.com - mock.replies[comNSAddr] = append(mock.replies[comNSAddr], - mockReply{name: curr, qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - mockReply{name: curr, qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - ) - - // Queries to the nameserver return a CNAME for the n+1th server. - next := fmt.Sprintf("%d-tailscale.com.", i+1) - mock.replies[amazonNS] = append(mock.replies[amazonNS], - mockReply{ - name: curr, - qtype: dns.Type(dns.TypeA), - resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{cnameRR(curr, next)}, - }, - }, - mockReply{ - name: curr, - qtype: dns.Type(dns.TypeAAAA), - resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{cnameRR(curr, next)}, - }, - }, - ) - } - - r := newResolver(t) - r.testExchangeHook = mock.exchangeHook - r.rootServers = []netip.Addr{rootServerAddr} - - // Query for the first node in the chain, 0-tailscale.com, and verify - // we get a max-depth error. - ctx := context.Background() - _, _, err := r.Resolve(ctx, "0-tailscale.com") - if err == nil { - t.Fatal("expected error, got nil") - } else if !errors.Is(err, ErrMaxDepth) { - t.Fatalf("got err=%v, want ErrMaxDepth", err) - } -} - -func TestInvalidResponses(t *testing.T) { - mock := &replyMock{ - tb: t, - replies: map[netip.Addr][]mockReply{ - // Query to the root server returns the .com server + a glue record - rootServerAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: comRecord}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: comRecord}, - }, - - // Query to the ".com" server return the nameservers for tailscale.com - comNSAddr: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: tailscaleNameservers}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: tailscaleNameservers}, - }, - - // Query to the actual nameserver returns an invalid IP address - amazonNS: { - {name: "tailscale.com.", qtype: dns.Type(dns.TypeA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - Answer: []dns.RR{&dns.A{ - Hdr: dns.RR_Header{ - Name: "tailscale.com.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 300, - }, - // Note: this is an IPv6 addr in an IPv4 response - A: net.IP(netip.MustParseAddr("2600:9000:a51d:27c1:1530:b9ef:2a6:b9e5").AsSlice()), - }}, - }}, - {name: "tailscale.com.", qtype: dns.Type(dns.TypeAAAA), resp: &dns.Msg{ - MsgHdr: dns.MsgHdr{Authoritative: true}, - // This an IPv4 response to an IPv6 query - Answer: []dns.RR{&dns.A{ - Hdr: dns.RR_Header{ - Name: "tailscale.com.", - Rrtype: dns.TypeA, - Class: dns.ClassINET, - Ttl: 300, - }, - A: net.IP(netip.MustParseAddr("13.248.141.131").AsSlice()), - }}, - }}, - }, - }, - } - - r := &Resolver{ - Logf: t.Logf, - testExchangeHook: mock.exchangeHook, - rootServers: []netip.Addr{rootServerAddr}, - } - - // Query for tailscale.com, verify we get no responses since the - // addresses are invalid. - _, _, err := r.Resolve(context.Background(), "tailscale.com") - if err == nil { - t.Fatalf("got no error, want error") - } - if !errors.Is(err, ErrAuthoritativeNoResponses) { - t.Fatalf("got err=%v, want %v", err, ErrAuthoritativeNoResponses) - } -} - -// TODO(andrew): test for more edge cases that aren't currently covered: -// * Nameservers that cross between IPv4 and IPv6 -// * Authoritative no replies after following CNAME -// * Authoritative no replies after following non-glue NS record -// * Error querying non-glue NS record followed by success diff --git a/net/dns/resolved.go b/net/dns/resolved.go index 4f58f3f9cc080..d8f63c9d66006 100644 --- a/net/dns/resolved.go +++ b/net/dns/resolved.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux && !android +//go:build linux && !android && !ts_omit_resolved package dns @@ -15,8 +15,8 @@ import ( "github.com/godbus/dbus/v5" "golang.org/x/sys/unix" "tailscale.com/health" - "tailscale.com/logtail/backoff" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/dnsname" ) @@ -70,7 +70,11 @@ type resolvedManager struct { configCR chan changeRequest // tracks OSConfigs changes and error responses } -func newResolvedManager(logf logger.Logf, health *health.Tracker, interfaceName string) (*resolvedManager, error) { +func init() { + optNewResolvedManager.Set(newResolvedManager) +} + +func newResolvedManager(logf logger.Logf, health *health.Tracker, interfaceName string) (OSConfigurator, error) { iface, err := net.InterfaceByName(interfaceName) if err != nil { return nil, err diff --git a/net/dns/resolver/debug.go b/net/dns/resolver/debug.go index da195d49d41e5..0f9b106bb2eb4 100644 --- a/net/dns/resolver/debug.go +++ b/net/dns/resolver/debug.go @@ -12,10 +12,14 @@ import ( "sync/atomic" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" ) func init() { + if !buildfeatures.HasDNS { + return + } health.RegisterDebugHandler("dnsfwd", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { n, _ := strconv.Atoi(r.FormValue("n")) if n <= 0 { diff --git a/net/dns/resolver/forwarder.go b/net/dns/resolver/forwarder.go index c87fbd5041a93..86f0f5b8c48c4 100644 --- a/net/dns/resolver/forwarder.go +++ b/net/dns/resolver/forwarder.go @@ -27,6 +27,8 @@ import ( dns "golang.org/x/net/dns/dnsmessage" "tailscale.com/control/controlknobs" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/dns/publicdns" "tailscale.com/net/dnscache" @@ -217,11 +219,12 @@ type resolverAndDelay struct { // forwarder forwards DNS packets to a number of upstream nameservers. type forwarder struct { - logf logger.Logf - netMon *netmon.Monitor // always non-nil - linkSel ForwardLinkSelector // TODO(bradfitz): remove this when tsdial.Dialer absorbs it - dialer *tsdial.Dialer - health *health.Tracker // always non-nil + logf logger.Logf + netMon *netmon.Monitor // always non-nil + linkSel ForwardLinkSelector // TODO(bradfitz): remove this when tsdial.Dialer absorbs it + dialer *tsdial.Dialer + health *health.Tracker // always non-nil + verboseFwd bool // if true, log all DNS forwarding controlKnobs *controlknobs.Knobs // or nil @@ -248,6 +251,9 @@ type forwarder struct { } func newForwarder(logf logger.Logf, netMon *netmon.Monitor, linkSel ForwardLinkSelector, dialer *tsdial.Dialer, health *health.Tracker, knobs *controlknobs.Knobs) *forwarder { + if !buildfeatures.HasDNS { + return nil + } if netMon == nil { panic("nil netMon") } @@ -258,6 +264,7 @@ func newForwarder(logf logger.Logf, netMon *netmon.Monitor, linkSel ForwardLinkS dialer: dialer, health: health, controlKnobs: knobs, + verboseFwd: verboseDNSForward(), } f.ctx, f.ctxCancel = context.WithCancel(context.Background()) return f @@ -515,7 +522,7 @@ var ( // // send expects the reply to have the same txid as txidOut. func (f *forwarder) send(ctx context.Context, fq *forwardQuery, rr resolverAndDelay) (ret []byte, err error) { - if verboseDNSForward() { + if f.verboseFwd { id := forwarderCount.Add(1) domain, typ, _ := nameFromQuery(fq.packet) f.logf("forwarder.send(%q, %d, %v, %d) [%d] ...", rr.name.Addr, fq.txid, typ, len(domain), id) @@ -524,6 +531,9 @@ func (f *forwarder) send(ctx context.Context, fq *forwardQuery, rr resolverAndDe }() } if strings.HasPrefix(rr.name.Addr, "http://") { + if !buildfeatures.HasPeerAPIClient { + return nil, feature.ErrUnavailable + } return f.sendDoH(ctx, rr.name.Addr, f.dialer.PeerAPIHTTPClient(), fq.packet) } if strings.HasPrefix(rr.name.Addr, "https://") { @@ -748,6 +758,9 @@ var optDNSForwardUseRoutes = envknob.RegisterOptBool("TS_DEBUG_DNS_FORWARD_USE_R // // See tailscale/tailscale#12027. func ShouldUseRoutes(knobs *controlknobs.Knobs) bool { + if !buildfeatures.HasDNS { + return false + } switch runtime.GOOS { case "android", "ios": // On mobile platforms with lower memory limits (e.g., 50MB on iOS), @@ -978,7 +991,7 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo } defer fq.closeOnCtxDone.Close() - if verboseDNSForward() { + if f.verboseFwd { domainSha256 := sha256.Sum256([]byte(domain)) domainSig := base64.RawStdEncoding.EncodeToString(domainSha256[:3]) f.logf("request(%d, %v, %d, %s) %d...", fq.txid, typ, len(domain), domainSig, len(fq.packet)) @@ -1023,7 +1036,7 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo metricDNSFwdErrorContext.Add(1) return fmt.Errorf("waiting to send response: %w", ctx.Err()) case responseChan <- packet{v, query.family, query.addr}: - if verboseDNSForward() { + if f.verboseFwd { f.logf("response(%d, %v, %d) = %d, nil", fq.txid, typ, len(domain), len(v)) } metricDNSFwdSuccess.Add(1) @@ -1053,7 +1066,7 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo } f.health.SetUnhealthy(dnsForwarderFailing, health.Args{health.ArgDNSServers: strings.Join(resolverAddrs, ",")}) case responseChan <- res: - if verboseDNSForward() { + if f.verboseFwd { f.logf("forwarder response(%d, %v, %d) = %d, %v", fq.txid, typ, len(domain), len(res.bs), firstErr) } return nil diff --git a/net/dns/resolver/forwarder_test.go b/net/dns/resolver/forwarder_test.go index f7cda15f6a000..ec491c581af99 100644 --- a/net/dns/resolver/forwarder_test.go +++ b/net/dns/resolver/forwarder_test.go @@ -12,7 +12,6 @@ import ( "io" "net" "net/netip" - "os" "reflect" "slices" "strings" @@ -23,13 +22,12 @@ import ( dns "golang.org/x/net/dns/dnsmessage" "tailscale.com/control/controlknobs" - "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/net/tsdial" "tailscale.com/tstest" "tailscale.com/types/dnstype" - "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" ) func (rr resolverAndDelay) String() string { @@ -124,7 +122,6 @@ func TestResolversWithDelays(t *testing.T) { } }) } - } func TestGetRCode(t *testing.T) { @@ -400,13 +397,6 @@ func runDNSServer(tb testing.TB, opts *testDNSServerOptions, response []byte, on return } -func enableDebug(tb testing.TB) { - const debugKnob = "TS_DEBUG_DNS_FORWARD_SEND" - oldVal := os.Getenv(debugKnob) - envknob.Setenv(debugKnob, "true") - tb.Cleanup(func() { envknob.Setenv(debugKnob, oldVal) }) -} - func makeLargeResponse(tb testing.TB, domain string) (request, response []byte) { name := dns.MustNewName(domain) @@ -455,8 +445,7 @@ func makeLargeResponse(tb testing.TB, domain string) (request, response []byte) func runTestQuery(tb testing.TB, request []byte, modify func(*forwarder), ports ...uint16) ([]byte, error) { logf := tstest.WhileTestRunningLogger(tb) - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(tb) netMon, err := netmon.New(bus, logf) if err != nil { tb.Fatal(err) @@ -464,8 +453,9 @@ func runTestQuery(tb testing.TB, request []byte, modify func(*forwarder), ports var dialer tsdial.Dialer dialer.SetNetMon(netMon) + dialer.SetBus(bus) - fwd := newForwarder(logf, netMon, nil, &dialer, new(health.Tracker), nil) + fwd := newForwarder(logf, netMon, nil, &dialer, health.NewTracker(bus), nil) if modify != nil { modify(fwd) } @@ -555,9 +545,11 @@ func mustRunTestQuery(tb testing.TB, request []byte, modify func(*forwarder), po return resp } -func TestForwarderTCPFallback(t *testing.T) { - enableDebug(t) +func beVerbose(f *forwarder) { + f.verboseFwd = true +} +func TestForwarderTCPFallback(t *testing.T) { const domain = "large-dns-response.tailscale.com." // Make a response that's very large, containing a bunch of localhost addresses. @@ -577,7 +569,7 @@ func TestForwarderTCPFallback(t *testing.T) { } }) - resp := mustRunTestQuery(t, request, nil, port) + resp := mustRunTestQuery(t, request, beVerbose, port) if !bytes.Equal(resp, largeResponse) { t.Errorf("invalid response\ngot: %+v\nwant: %+v", resp, largeResponse) } @@ -593,8 +585,6 @@ func TestForwarderTCPFallback(t *testing.T) { // Test to ensure that if the UDP listener is unresponsive, we always make a // TCP request even if we never get a response. func TestForwarderTCPFallbackTimeout(t *testing.T) { - enableDebug(t) - const domain = "large-dns-response.tailscale.com." // Make a response that's very large, containing a bunch of localhost addresses. @@ -615,7 +605,7 @@ func TestForwarderTCPFallbackTimeout(t *testing.T) { } }) - resp := mustRunTestQuery(t, request, nil, port) + resp := mustRunTestQuery(t, request, beVerbose, port) if !bytes.Equal(resp, largeResponse) { t.Errorf("invalid response\ngot: %+v\nwant: %+v", resp, largeResponse) } @@ -625,8 +615,6 @@ func TestForwarderTCPFallbackTimeout(t *testing.T) { } func TestForwarderTCPFallbackDisabled(t *testing.T) { - enableDebug(t) - const domain = "large-dns-response.tailscale.com." // Make a response that's very large, containing a bunch of localhost addresses. @@ -647,6 +635,7 @@ func TestForwarderTCPFallbackDisabled(t *testing.T) { }) resp := mustRunTestQuery(t, request, func(fwd *forwarder) { + fwd.verboseFwd = true // Disable retries for this test. fwd.controlKnobs = &controlknobs.Knobs{} fwd.controlKnobs.DisableDNSForwarderTCPRetries.Store(true) @@ -669,8 +658,6 @@ func TestForwarderTCPFallbackDisabled(t *testing.T) { // Test to ensure that we propagate DNS errors func TestForwarderTCPFallbackError(t *testing.T) { - enableDebug(t) - const domain = "error-response.tailscale.com." // Our response is a SERVFAIL @@ -687,7 +674,7 @@ func TestForwarderTCPFallbackError(t *testing.T) { } }) - resp, err := runTestQuery(t, request, nil, port) + resp, err := runTestQuery(t, request, beVerbose, port) if !sawRequest.Load() { t.Error("did not see DNS request") } @@ -707,8 +694,6 @@ func TestForwarderTCPFallbackError(t *testing.T) { // Test to ensure that if we have more than one resolver, and at least one of them // returns a successful response, we propagate it. func TestForwarderWithManyResolvers(t *testing.T) { - enableDebug(t) - const domain = "example.com." request := makeTestRequest(t, domain) @@ -811,7 +796,7 @@ func TestForwarderWithManyResolvers(t *testing.T) { for i := range tt.responses { ports[i] = runDNSServer(t, nil, tt.responses[i], func(isTCP bool, gotRequest []byte) {}) } - gotResponse, err := runTestQuery(t, request, nil, ports...) + gotResponse, err := runTestQuery(t, request, beVerbose, ports...) if err != nil { t.Fatalf("wanted nil, got %v", err) } @@ -870,7 +855,7 @@ func TestNXDOMAINIncludesQuestion(t *testing.T) { port := runDNSServer(t, nil, response, func(isTCP bool, gotRequest []byte) { }) - res, err := runTestQuery(t, request, nil, port) + res, err := runTestQuery(t, request, beVerbose, port) if err != nil { t.Fatal(err) } diff --git a/net/dns/resolver/tsdns.go b/net/dns/resolver/tsdns.go index 33fa9c3c07d4c..93cbf3839c923 100644 --- a/net/dns/resolver/tsdns.go +++ b/net/dns/resolver/tsdns.go @@ -25,6 +25,8 @@ import ( dns "golang.org/x/net/dns/dnsmessage" "tailscale.com/control/controlknobs" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/dns/resolvconffile" "tailscale.com/net/netaddr" @@ -254,6 +256,9 @@ func New(logf logger.Logf, linkSel ForwardLinkSelector, dialer *tsdial.Dialer, h func (r *Resolver) TestOnlySetHook(hook func(Config)) { r.saveConfigForTests = hook } func (r *Resolver) SetConfig(cfg Config) error { + if !buildfeatures.HasDNS { + return nil + } if r.saveConfigForTests != nil { r.saveConfigForTests(cfg) } @@ -279,6 +284,9 @@ func (r *Resolver) SetConfig(cfg Config) error { // Close shuts down the resolver and ensures poll goroutines have exited. // The Resolver cannot be used again after Close is called. func (r *Resolver) Close() { + if !buildfeatures.HasDNS { + return + } select { case <-r.closed: return @@ -296,6 +304,9 @@ func (r *Resolver) Close() { const dnsQueryTimeout = 10 * time.Second func (r *Resolver) Query(ctx context.Context, bs []byte, family string, from netip.AddrPort) ([]byte, error) { + if !buildfeatures.HasDNS { + return nil, feature.ErrUnavailable + } metricDNSQueryLocal.Add(1) select { case <-r.closed: @@ -323,6 +334,9 @@ func (r *Resolver) Query(ctx context.Context, bs []byte, family string, from net // GetUpstreamResolvers returns the resolvers that would be used to resolve // the given FQDN. func (r *Resolver) GetUpstreamResolvers(name dnsname.FQDN) []*dnstype.Resolver { + if !buildfeatures.HasDNS { + return nil + } return r.forwarder.GetUpstreamResolvers(name) } @@ -351,6 +365,9 @@ func parseExitNodeQuery(q []byte) *response { // and a nil error. // TODO: figure out if we even need an error result. func (r *Resolver) HandlePeerDNSQuery(ctx context.Context, q []byte, from netip.AddrPort, allowName func(name string) bool) (res []byte, err error) { + if !buildfeatures.HasDNS { + return nil, feature.ErrUnavailable + } metricDNSExitProxyQuery.Add(1) ch := make(chan packet, 1) @@ -427,6 +444,9 @@ var debugExitNodeDNSNetPkg = envknob.RegisterBool("TS_DEBUG_EXIT_NODE_DNS_NET_PK // response contains the pre-serialized response, which notably // includes the original question and its header. func handleExitNodeDNSQueryWithNetPkg(ctx context.Context, logf logger.Logf, resolver *net.Resolver, resp *response) (res []byte, err error) { + if !buildfeatures.HasDNS { + return nil, feature.ErrUnavailable + } logf = logger.WithPrefix(logf, "exitNodeDNSQueryWithNetPkg: ") if resp.Question.Class != dns.ClassINET { return nil, errors.New("unsupported class") @@ -1247,6 +1267,9 @@ func (r *Resolver) respondReverse(query []byte, name dnsname.FQDN, resp *respons // respond returns a DNS response to query if it can be resolved locally. // Otherwise, it returns errNotOurName. func (r *Resolver) respond(query []byte) ([]byte, error) { + if !buildfeatures.HasDNS { + return nil, feature.ErrUnavailable + } parser := dnsParserPool.Get().(*dnsParser) defer dnsParserPool.Put(parser) diff --git a/net/dns/resolver/tsdns_test.go b/net/dns/resolver/tsdns_test.go index 4bbfd4d6a417e..f0dbb48b33f6e 100644 --- a/net/dns/resolver/tsdns_test.go +++ b/net/dns/resolver/tsdns_test.go @@ -31,7 +31,7 @@ import ( "tailscale.com/types/dnstype" "tailscale.com/types/logger" "tailscale.com/util/dnsname" - "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" ) var ( @@ -353,10 +353,13 @@ func TestRDNSNameToIPv6(t *testing.T) { } func newResolver(t testing.TB) *Resolver { + bus := eventbustest.NewBus(t) + dialer := tsdial.NewDialer(netmon.NewStatic()) + dialer.SetBus(bus) return New(t.Logf, nil, // no link selector - tsdial.NewDialer(netmon.NewStatic()), - new(health.Tracker), + dialer, + health.NewTracker(bus), nil, // no control knobs ) } @@ -1060,8 +1063,7 @@ func TestForwardLinkSelection(t *testing.T) { // routes differently. specialIP := netaddr.IPv4(1, 2, 3, 4) - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(t) netMon, err := netmon.New(bus, logger.WithPrefix(t.Logf, ".... netmon: ")) if err != nil { @@ -1074,7 +1076,7 @@ func TestForwardLinkSelection(t *testing.T) { return "special" } return "" - }), new(tsdial.Dialer), new(health.Tracker), nil /* no control knobs */) + }), new(tsdial.Dialer), health.NewTracker(bus), nil /* no control knobs */) // Test non-special IP. if got, err := fwd.packetListener(netip.Addr{}); err != nil { diff --git a/net/dnscache/dnscache.go b/net/dnscache/dnscache.go index d60e92f0b8bbc..94d4bbee7955f 100644 --- a/net/dnscache/dnscache.go +++ b/net/dnscache/dnscache.go @@ -205,6 +205,9 @@ func (r *Resolver) LookupIP(ctx context.Context, host string) (ip, v6 netip.Addr } allIPs = append(allIPs, naIP) } + if !ip.IsValid() && v6.IsValid() { + ip = v6 + } r.dlogf("returning %d static results", len(allIPs)) return } diff --git a/net/dnscache/dnscache_test.go b/net/dnscache/dnscache_test.go index ef4249b7401f3..58bb6cd7f594c 100644 --- a/net/dnscache/dnscache_test.go +++ b/net/dnscache/dnscache_test.go @@ -11,6 +11,7 @@ import ( "net" "net/netip" "reflect" + "slices" "testing" "time" @@ -240,3 +241,60 @@ func TestShouldTryBootstrap(t *testing.T) { }) } } + +func TestSingleHostStaticResult(t *testing.T) { + v4 := netip.MustParseAddr("0.0.0.1") + v6 := netip.MustParseAddr("2001::a") + + tests := []struct { + name string + static []netip.Addr + wantIP netip.Addr + wantIP6 netip.Addr + wantAll []netip.Addr + }{ + { + name: "just-v6", + static: []netip.Addr{v6}, + wantIP: v6, + wantIP6: v6, + wantAll: []netip.Addr{v6}, + }, + { + name: "just-v4", + static: []netip.Addr{v4}, + wantIP: v4, + wantIP6: netip.Addr{}, + wantAll: []netip.Addr{v4}, + }, + { + name: "v6-then-v4", + static: []netip.Addr{v6, v4}, + wantIP: v4, + wantIP6: v6, + wantAll: []netip.Addr{v6, v4}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := &Resolver{ + SingleHost: "example.com", + SingleHostStaticResult: tt.static, + } + ip, ip6, all, err := r.LookupIP(context.Background(), "example.com") + if err != nil { + t.Fatal(err) + } + if ip != tt.wantIP { + t.Errorf("got ip %v; want %v", ip, tt.wantIP) + } + if ip6 != tt.wantIP6 { + t.Errorf("got ip6 %v; want %v", ip6, tt.wantIP6) + } + if !slices.Equal(all, tt.wantAll) { + t.Errorf("got all %v; want %v", all, tt.wantAll) + } + }) + } +} diff --git a/net/dnsfallback/dnsfallback.go b/net/dnsfallback/dnsfallback.go index 8e53c3b293cb4..74b625970302b 100644 --- a/net/dnsfallback/dnsfallback.go +++ b/net/dnsfallback/dnsfallback.go @@ -22,35 +22,20 @@ import ( "net/url" "os" "reflect" - "slices" "sync/atomic" "time" "tailscale.com/atomicfile" - "tailscale.com/envknob" + "tailscale.com/feature" "tailscale.com/health" - "tailscale.com/net/dns/recursive" "tailscale.com/net/netmon" "tailscale.com/net/netns" "tailscale.com/net/tlsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/tailcfg" "tailscale.com/types/logger" - "tailscale.com/util/clientmetric" - "tailscale.com/util/singleflight" "tailscale.com/util/slicesx" ) -var ( - optRecursiveResolver = envknob.RegisterOptBool("TS_DNSFALLBACK_RECURSIVE_RESOLVER") - disableRecursiveResolver = envknob.RegisterBool("TS_DNSFALLBACK_DISABLE_RECURSIVE_RESOLVER") // legacy pre-1.52 env knob name -) - -type resolveResult struct { - addrs []netip.Addr - minTTL time.Duration -} - // MakeLookupFunc creates a function that can be used to resolve hostnames // (e.g. as a LookupIPFallback from dnscache.Resolver). // The netMon parameter is optional; if non-nil it's used to do faster interface lookups. @@ -68,145 +53,13 @@ type fallbackResolver struct { logf logger.Logf netMon *netmon.Monitor // or nil healthTracker *health.Tracker // or nil - sf singleflight.Group[string, resolveResult] // for tests waitForCompare bool } func (fr *fallbackResolver) Lookup(ctx context.Context, host string) ([]netip.Addr, error) { - // If they've explicitly disabled the recursive resolver with the legacy - // TS_DNSFALLBACK_DISABLE_RECURSIVE_RESOLVER envknob or not set the - // newer TS_DNSFALLBACK_RECURSIVE_RESOLVER to true, then don't use the - // recursive resolver. (tailscale/corp#15261) In the future, we might - // change the default (the opt.Bool being unset) to mean enabled. - if disableRecursiveResolver() || !optRecursiveResolver().EqualBool(true) { - return lookup(ctx, host, fr.logf, fr.healthTracker, fr.netMon) - } - - addrsCh := make(chan []netip.Addr, 1) - - // Run the recursive resolver in the background so we can - // compare the results. For tests, we also allow waiting for the - // comparison to complete; normally, we do this entirely asynchronously - // so as not to block the caller. - var done chan struct{} - if fr.waitForCompare { - done = make(chan struct{}) - go func() { - defer close(done) - fr.compareWithRecursive(ctx, addrsCh, host) - }() - } else { - go fr.compareWithRecursive(ctx, addrsCh, host) - } - - addrs, err := lookup(ctx, host, fr.logf, fr.healthTracker, fr.netMon) - if err != nil { - addrsCh <- nil - return nil, err - } - - addrsCh <- slices.Clone(addrs) - if fr.waitForCompare { - select { - case <-done: - case <-ctx.Done(): - } - } - return addrs, nil -} - -// compareWithRecursive is responsible for comparing the DNS resolution -// performed via the "normal" path (bootstrap DNS requests to the DERP servers) -// with DNS resolution performed with our in-process recursive DNS resolver. -// -// It will select on addrsCh to read exactly one set of addrs (returned by the -// "normal" path) and compare against the results returned by the recursive -// resolver. If ctx is canceled, then it will abort. -func (fr *fallbackResolver) compareWithRecursive( - ctx context.Context, - addrsCh <-chan []netip.Addr, - host string, -) { - logf := logger.WithPrefix(fr.logf, "recursive: ") - - // Ensure that we catch panics while we're testing this - // code path; this should never panic, but we don't - // want to take down the process by having the panic - // propagate to the top of the goroutine's stack and - // then terminate. - defer func() { - if r := recover(); r != nil { - logf("bootstrap DNS: recovered panic: %v", r) - metricRecursiveErrors.Add(1) - } - }() - - // Don't resolve the same host multiple times - // concurrently; if we end up in a tight loop, this can - // take up a lot of CPU. - var didRun bool - result, err, _ := fr.sf.Do(host, func() (resolveResult, error) { - didRun = true - resolver := &recursive.Resolver{ - Dialer: netns.NewDialer(logf, fr.netMon), - Logf: logf, - } - addrs, minTTL, err := resolver.Resolve(ctx, host) - if err != nil { - logf("error using recursive resolver: %v", err) - metricRecursiveErrors.Add(1) - return resolveResult{}, err - } - return resolveResult{addrs, minTTL}, nil - }) - - // The singleflight function handled errors; return if - // there was one. Additionally, don't bother doing the - // comparison if we waited on another singleflight - // caller; the results are likely to be the same, so - // rather than spam the logs we can just exit and let - // the singleflight call that did execute do the - // comparison. - // - // Returning here is safe because the addrsCh channel - // is buffered, so the main function won't block even - // if we never read from it. - if err != nil || !didRun { - return - } - - addrs, minTTL := result.addrs, result.minTTL - compareAddr := func(a, b netip.Addr) int { return a.Compare(b) } - slices.SortFunc(addrs, compareAddr) - - // Wait for a response from the main function; try this once before we - // check whether the context is canceled since selects are - // nondeterministic. - var oldAddrs []netip.Addr - select { - case oldAddrs = <-addrsCh: - // All good; continue - default: - // Now block. - select { - case oldAddrs = <-addrsCh: - case <-ctx.Done(): - return - } - } - slices.SortFunc(oldAddrs, compareAddr) - - matches := slices.Equal(addrs, oldAddrs) - - logf("bootstrap DNS comparison: matches=%v oldAddrs=%v addrs=%v minTTL=%v", matches, oldAddrs, addrs, minTTL) - - if matches { - metricRecursiveMatches.Add(1) - } else { - metricRecursiveMismatches.Add(1) - } + return lookup(ctx, host, fr.logf, fr.healthTracker, fr.netMon) } func lookup(ctx context.Context, host string, logf logger.Logf, ht *health.Tracker, netMon *netmon.Monitor) ([]netip.Addr, error) { @@ -282,7 +135,7 @@ func bootstrapDNSMap(ctx context.Context, serverName string, serverIP netip.Addr dialer := netns.NewDialer(logf, netMon) tr := http.DefaultTransport.(*http.Transport).Clone() tr.DisableKeepAlives = true // This transport is meant to be used once. - tr.Proxy = tshttpproxy.ProxyFromEnvironment + tr.Proxy = feature.HookProxyFromEnvironment.GetOrNil() tr.DialContext = func(ctx context.Context, netw, addr string) (net.Conn, error) { return dialer.DialContext(ctx, "tcp", net.JoinHostPort(serverIP.String(), "443")) } @@ -428,9 +281,3 @@ func SetCachePath(path string, logf logger.Logf) { cachedDERPMap.Store(dm) logf("[v2] dnsfallback: SetCachePath loaded cached DERP map") } - -var ( - metricRecursiveMatches = clientmetric.NewCounter("dnsfallback_recursive_matches") - metricRecursiveMismatches = clientmetric.NewCounter("dnsfallback_recursive_mismatches") - metricRecursiveErrors = clientmetric.NewCounter("dnsfallback_recursive_errors") -) diff --git a/net/memnet/listener.go b/net/memnet/listener.go index d84a2e443cbff..202026e160b27 100644 --- a/net/memnet/listener.go +++ b/net/memnet/listener.go @@ -22,6 +22,7 @@ type Listener struct { ch chan Conn closeOnce sync.Once closed chan struct{} + onClose func() // or nil // NewConn, if non-nil, is called to create a new pair of connections // when dialing. If nil, NewConn is used. @@ -44,9 +45,14 @@ func (l *Listener) Addr() net.Addr { // Close closes the pipe listener. func (l *Listener) Close() error { + var cleanup func() l.closeOnce.Do(func() { + cleanup = l.onClose close(l.closed) }) + if cleanup != nil { + cleanup() + } return nil } diff --git a/net/memnet/memnet.go b/net/memnet/memnet.go index 7c2435684059e..1e43df2daaaae 100644 --- a/net/memnet/memnet.go +++ b/net/memnet/memnet.go @@ -61,6 +61,11 @@ func (m *Network) Listen(network, address string) (net.Listener, error) { } ln := Listen(key) m.lns[key] = ln + ln.onClose = func() { + m.mu.Lock() + delete(m.lns, key) + m.mu.Unlock() + } return ln, nil } } diff --git a/net/memnet/memnet_test.go b/net/memnet/memnet_test.go new file mode 100644 index 0000000000000..38086cec05f3c --- /dev/null +++ b/net/memnet/memnet_test.go @@ -0,0 +1,23 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package memnet + +import "testing" + +func TestListenAddressReuse(t *testing.T) { + var nw Network + ln1, err := nw.Listen("tcp", "127.0.0.1:80") + if err != nil { + t.Fatalf("listen failed: %v", err) + } + if _, err := nw.Listen("tcp", "127.0.0.1:80"); err == nil { + t.Errorf("listen on in-use address succeeded") + } + if err := ln1.Close(); err != nil { + t.Fatalf("close failed: %v", err) + } + if _, err := nw.Listen("tcp", "127.0.0.1:80"); err != nil { + t.Errorf("listen on same address after close failed: %v", err) + } +} diff --git a/net/netcheck/captiveportal.go b/net/netcheck/captiveportal.go new file mode 100644 index 0000000000000..ad11f19a05b6b --- /dev/null +++ b/net/netcheck/captiveportal.go @@ -0,0 +1,55 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_captiveportal + +package netcheck + +import ( + "context" + "time" + + "tailscale.com/net/captivedetection" + "tailscale.com/tailcfg" +) + +func init() { + hookStartCaptivePortalDetection.Set(startCaptivePortalDetection) +} + +func startCaptivePortalDetection(ctx context.Context, rs *reportState, dm *tailcfg.DERPMap, preferredDERP int) (done <-chan struct{}, stop func()) { + c := rs.c + + // NOTE(andrew): we can't simply add this goroutine to the + // `NewWaitGroupChan` below, since we don't wait for that + // waitgroup to finish when exiting this function and thus get + // a data race. + ch := make(chan struct{}) + + tmr := time.AfterFunc(c.captivePortalDelay(), func() { + defer close(ch) + d := captivedetection.NewDetector(c.logf) + found := d.Detect(ctx, c.NetMon, dm, preferredDERP) + rs.report.CaptivePortal.Set(found) + }) + + stop = func() { + // Don't cancel our captive portal check if we're + // explicitly doing a verbose netcheck. + if c.Verbose { + return + } + + if tmr.Stop() { + // Stopped successfully; need to close the + // signal channel ourselves. + close(ch) + return + } + + // Did not stop; do nothing and it'll finish by itself + // and close the signal channel. + } + + return ch, stop +} diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index cb622a339944d..726221675fb03 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -26,14 +26,15 @@ import ( "tailscale.com/derp" "tailscale.com/derp/derphttp" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/hostinfo" - "tailscale.com/net/captivedetection" "tailscale.com/net/dnscache" "tailscale.com/net/neterror" "tailscale.com/net/netmon" "tailscale.com/net/netns" "tailscale.com/net/ping" - "tailscale.com/net/portmapper" + "tailscale.com/net/portmapper/portmappertype" "tailscale.com/net/sockstats" "tailscale.com/net/stun" "tailscale.com/syncs" @@ -215,7 +216,7 @@ type Client struct { // PortMapper, if non-nil, is used for portmap queries. // If nil, portmap discovery is not done. - PortMapper *portmapper.Client // lazily initialized on first use + PortMapper portmappertype.Client // UseDNSCache controls whether this client should use a // *dnscache.Resolver to resolve DERP hostnames, when no IP address is @@ -730,7 +731,7 @@ func (rs *reportState) probePortMapServices() { res, err := rs.c.PortMapper.Probe(context.Background()) if err != nil { - if !errors.Is(err, portmapper.ErrGatewayRange) { + if !errors.Is(err, portmappertype.ErrGatewayRange) { // "skipping portmap; gateway range likely lacks support" // is not very useful, and too spammy on cloud systems. // If there are other errors, we want to log those. @@ -786,6 +787,8 @@ func (c *Client) SetForcePreferredDERP(region int) { c.ForcePreferredDERP = region } +var hookStartCaptivePortalDetection feature.Hook[func(ctx context.Context, rs *reportState, dm *tailcfg.DERPMap, preferredDERP int) (<-chan struct{}, func())] + // GetReport gets a report. The 'opts' argument is optional and can be nil. // Callers are discouraged from passing a ctx with an arbitrary deadline as this // may cause GetReport to return prematurely before all reporting methods have @@ -910,38 +913,9 @@ func (c *Client) GetReport(ctx context.Context, dm *tailcfg.DERPMap, opts *GetRe // it's unnecessary. captivePortalDone := syncs.ClosedChan() captivePortalStop := func() {} - if !rs.incremental && !onlySTUN { - // NOTE(andrew): we can't simply add this goroutine to the - // `NewWaitGroupChan` below, since we don't wait for that - // waitgroup to finish when exiting this function and thus get - // a data race. - ch := make(chan struct{}) - captivePortalDone = ch - - tmr := time.AfterFunc(c.captivePortalDelay(), func() { - defer close(ch) - d := captivedetection.NewDetector(c.logf) - found := d.Detect(ctx, c.NetMon, dm, preferredDERP) - rs.report.CaptivePortal.Set(found) - }) - - captivePortalStop = func() { - // Don't cancel our captive portal check if we're - // explicitly doing a verbose netcheck. - if c.Verbose { - return - } - - if tmr.Stop() { - // Stopped successfully; need to close the - // signal channel ourselves. - close(ch) - return - } - - // Did not stop; do nothing and it'll finish by itself - // and close the signal channel. - } + if buildfeatures.HasCaptivePortal && !rs.incremental && !onlySTUN { + start := hookStartCaptivePortalDetection.Get() + captivePortalDone, captivePortalStop = start(ctx, rs, dm, preferredDERP) } wg := syncs.NewWaitGroupChan() @@ -1099,7 +1073,6 @@ func (c *Client) runHTTPOnlyChecks(ctx context.Context, last *Report, rs *report continue } wg.Add(1) - rg := rg go func() { defer wg.Done() node := rg.Nodes[0] diff --git a/net/netcheck/standalone.go b/net/netcheck/standalone.go index c72d7005f7c7e..b4523a832d463 100644 --- a/net/netcheck/standalone.go +++ b/net/netcheck/standalone.go @@ -13,7 +13,6 @@ import ( "tailscale.com/net/stun" "tailscale.com/types/logger" "tailscale.com/types/nettype" - "tailscale.com/util/multierr" ) // Standalone creates the necessary UDP sockets on the given bindAddr and starts @@ -62,7 +61,7 @@ func (c *Client) Standalone(ctx context.Context, bindAddr string) error { // If both v4 and v6 failed, report an error, otherwise let one succeed. if len(errs) == 2 { - return multierr.New(errs...) + return errors.Join(errs...) } return nil } diff --git a/net/netmon/interfaces_linux.go b/net/netmon/interfaces_linux.go index d0fb15ababe9e..a9b93c0a1ff49 100644 --- a/net/netmon/interfaces_linux.go +++ b/net/netmon/interfaces_linux.go @@ -22,6 +22,7 @@ import ( "github.com/mdlayher/netlink" "go4.org/mem" "golang.org/x/sys/unix" + "tailscale.com/feature/buildfeatures" "tailscale.com/net/netaddr" "tailscale.com/util/lineiter" ) @@ -41,6 +42,9 @@ ens18 00000000 0100000A 0003 0 0 0 00000000 ens18 0000000A 00000000 0001 0 0 0 0000FFFF 0 0 0 */ func likelyHomeRouterIPLinux() (ret netip.Addr, myIP netip.Addr, ok bool) { + if !buildfeatures.HasPortMapper { + return + } if procNetRouteErr.Load() { // If we failed to read /proc/net/route previously, don't keep trying. return ret, myIP, false diff --git a/net/netmon/interfaces_windows.go b/net/netmon/interfaces_windows.go index 00b686e593b1e..d6625ead3cd05 100644 --- a/net/netmon/interfaces_windows.go +++ b/net/netmon/interfaces_windows.go @@ -13,6 +13,7 @@ import ( "golang.org/x/sys/windows" "golang.zx2c4.com/wireguard/windows/tunnel/winipcfg" + "tailscale.com/feature/buildfeatures" "tailscale.com/tsconst" ) @@ -22,7 +23,9 @@ const ( func init() { likelyHomeRouterIP = likelyHomeRouterIPWindows - getPAC = getPACWindows + if buildfeatures.HasUseProxy { + getPAC = getPACWindows + } } func likelyHomeRouterIPWindows() (ret netip.Addr, _ netip.Addr, ok bool) { @@ -244,6 +247,9 @@ const ( ) func getPACWindows() string { + if !buildfeatures.HasUseProxy { + return "" + } var res *uint16 r, _, e := detectAutoProxyConfigURL.Call( winHTTP_AUTO_DETECT_TYPE_DHCP|winHTTP_AUTO_DETECT_TYPE_DNS_A, diff --git a/net/netmon/loghelper.go b/net/netmon/loghelper.go index 824faeef09b1c..2e28e8cda7895 100644 --- a/net/netmon/loghelper.go +++ b/net/netmon/loghelper.go @@ -4,24 +4,26 @@ package netmon import ( + "context" "sync" "tailscale.com/types/logger" + "tailscale.com/util/eventbus" ) // LinkChangeLogLimiter returns a new [logger.Logf] that logs each unique // format string to the underlying logger only once per major LinkChange event. // -// The returned function should be called when the logger is no longer needed, -// to release resources from the Monitor. -func LinkChangeLogLimiter(logf logger.Logf, nm *Monitor) (_ logger.Logf, unregister func()) { +// The logger stops tracking seen format strings when the provided context is +// done. +func LinkChangeLogLimiter(ctx context.Context, logf logger.Logf, nm *Monitor) logger.Logf { var formatSeen sync.Map // map[string]bool - unregister = nm.RegisterChangeCallback(func(cd *ChangeDelta) { + nm.b.Monitor(nm.changeDeltaWatcher(nm.b, ctx, func(cd ChangeDelta) { // If we're in a major change or a time jump, clear the seen map. if cd.Major || cd.TimeJumped { formatSeen.Clear() } - }) + })) return func(format string, args ...any) { // We only store 'true' in the map, so if it's present then it @@ -38,5 +40,21 @@ func LinkChangeLogLimiter(logf logger.Logf, nm *Monitor) (_ logger.Logf, unregis } logf(format, args...) - }, unregister + } +} + +func (nm *Monitor) changeDeltaWatcher(ec *eventbus.Client, ctx context.Context, fn func(ChangeDelta)) func(*eventbus.Client) { + sub := eventbus.Subscribe[ChangeDelta](ec) + return func(ec *eventbus.Client) { + for { + select { + case <-ctx.Done(): + return + case <-sub.Done(): + return + case change := <-sub.Events(): + fn(change) + } + } + } } diff --git a/net/netmon/loghelper_test.go b/net/netmon/loghelper_test.go index 44aa46783de07..ca3b1284cfa0e 100644 --- a/net/netmon/loghelper_test.go +++ b/net/netmon/loghelper_test.go @@ -5,13 +5,18 @@ package netmon import ( "bytes" + "context" "fmt" "testing" + "testing/synctest" "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" ) -func TestLinkChangeLogLimiter(t *testing.T) { +func TestLinkChangeLogLimiter(t *testing.T) { synctest.Test(t, syncTestLinkChangeLogLimiter) } + +func syncTestLinkChangeLogLimiter(t *testing.T) { bus := eventbus.New() defer bus.Close() mon, err := New(bus, t.Logf) @@ -30,8 +35,10 @@ func TestLinkChangeLogLimiter(t *testing.T) { fmt.Fprintf(&logBuffer, format, args...) } - logf, unregister := LinkChangeLogLimiter(logf, mon) - defer unregister() + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + + logf = LinkChangeLogLimiter(ctx, logf, mon) // Log once, which should write to our log buffer. logf("hello %s", "world") @@ -55,25 +62,22 @@ func TestLinkChangeLogLimiter(t *testing.T) { // string cache and allow the next log to write to our log buffer. // // InjectEvent doesn't work because it's not a major event, so we - // instead reach into the netmon and grab the callback, and then call - // it ourselves. - mon.mu.Lock() - var cb func(*ChangeDelta) - for _, c := range mon.cbs { - cb = c - break - } - mon.mu.Unlock() - - cb(&ChangeDelta{Major: true}) + // instead inject the event ourselves. + injector := eventbustest.NewInjector(t, bus) + eventbustest.Inject(injector, ChangeDelta{Major: true}) + synctest.Wait() logf("hello %s", "world") - if got := logBuffer.String(); got != "hello world\nother message\nhello world\n" { - t.Errorf("unexpected log buffer contents: %q", got) + want := "hello world\nother message\nhello world\n" + if got := logBuffer.String(); got != want { + t.Errorf("unexpected log buffer contents, got: %q, want, %q", got, want) } - // Unregistering the callback should clear our 'cbs' set. - unregister() + // Canceling the context we passed to LinkChangeLogLimiter should + // unregister the callback from the netmon. + cancel() + synctest.Wait() + mon.mu.Lock() if len(mon.cbs) != 0 { t.Errorf("expected no callbacks, got %v", mon.cbs) diff --git a/net/netmon/netmon.go b/net/netmon/netmon.go index b97b184d476f4..f7d1b1107e379 100644 --- a/net/netmon/netmon.go +++ b/net/netmon/netmon.go @@ -14,6 +14,7 @@ import ( "sync" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/types/logger" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" @@ -53,7 +54,7 @@ type osMon interface { type Monitor struct { logf logger.Logf b *eventbus.Client - changed *eventbus.Publisher[*ChangeDelta] + changed *eventbus.Publisher[ChangeDelta] om osMon // nil means not supported on this platform change chan bool // send false to wake poller, true to also force ChangeDeltas be sent @@ -84,9 +85,6 @@ type ChangeFunc func(*ChangeDelta) // ChangeDelta describes the difference between two network states. type ChangeDelta struct { - // Monitor is the network monitor that sent this delta. - Monitor *Monitor - // Old is the old interface state, if known. // It's nil if the old state is unknown. // Do not mutate it. @@ -126,7 +124,7 @@ func New(bus *eventbus.Bus, logf logger.Logf) (*Monitor, error) { stop: make(chan struct{}), lastWall: wallTime(), } - m.changed = eventbus.Publish[*ChangeDelta](m.b) + m.changed = eventbus.Publish[ChangeDelta](m.b) st, err := m.interfaceStateUncached() if err != nil { return nil, err @@ -184,6 +182,9 @@ func (m *Monitor) SetTailscaleInterfaceName(ifName string) { // It's the same as interfaces.LikelyHomeRouterIP, but it caches the // result until the monitor detects a network change. func (m *Monitor) GatewayAndSelfIP() (gw, myIP netip.Addr, ok bool) { + if !buildfeatures.HasPortMapper { + return + } if m.static { return } @@ -401,8 +402,7 @@ func (m *Monitor) handlePotentialChange(newState *State, forceCallbacks bool) { return } - delta := &ChangeDelta{ - Monitor: m, + delta := ChangeDelta{ Old: oldState, New: newState, TimeJumped: timeJumped, @@ -437,7 +437,7 @@ func (m *Monitor) handlePotentialChange(newState *State, forceCallbacks bool) { } m.changed.Publish(delta) for _, cb := range m.cbs { - go cb(delta) + go cb(&delta) } } diff --git a/net/netmon/netmon_test.go b/net/netmon/netmon_test.go index b8ec1b75f97ec..6a87cedb8e7ea 100644 --- a/net/netmon/netmon_test.go +++ b/net/netmon/netmon_test.go @@ -7,6 +7,7 @@ import ( "flag" "net" "net/netip" + "reflect" "sync/atomic" "testing" "time" @@ -81,7 +82,7 @@ func TestMonitorInjectEventOnBus(t *testing.T) { mon.Start() mon.InjectEvent() - if err := eventbustest.Expect(tw, eventbustest.Type[*ChangeDelta]()); err != nil { + if err := eventbustest.Expect(tw, eventbustest.Type[ChangeDelta]()); err != nil { t.Error(err) } } @@ -143,7 +144,7 @@ func TestMonitorMode(t *testing.T) { <-done t.Logf("%v callbacks", n) case "eventbus": - tw.TimeOut = *monitorDuration + time.AfterFunc(*monitorDuration, bus.Close) n := 0 mon.Start() eventbustest.Expect(tw, func(event *ChangeDelta) (bool, error) { @@ -267,6 +268,45 @@ func TestIsMajorChangeFrom(t *testing.T) { }) } } +func TestForeachInterface(t *testing.T) { + tests := []struct { + name string + addrs []net.Addr + want []string + }{ + { + name: "Mixed_IPv4_and_IPv6", + addrs: []net.Addr{ + &net.IPNet{IP: net.IPv4(1, 2, 3, 4), Mask: net.CIDRMask(24, 32)}, + &net.IPAddr{IP: net.IP{5, 6, 7, 8}, Zone: ""}, + &net.IPNet{IP: net.ParseIP("2001:db8::1"), Mask: net.CIDRMask(64, 128)}, + &net.IPAddr{IP: net.ParseIP("2001:db8::2"), Zone: ""}, + }, + want: []string{"1.2.3.4", "5.6.7.8", "2001:db8::1", "2001:db8::2"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var got []string + ifaces := InterfaceList{ + { + Interface: &net.Interface{Name: "eth0"}, + AltAddrs: tt.addrs, + }, + } + ifaces.ForeachInterface(func(iface Interface, prefixes []netip.Prefix) { + for _, prefix := range prefixes { + ip := prefix.Addr() + got = append(got, ip.String()) + } + }) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("got %q, want %q", got, tt.want) + } + }) + } +} type testOSMon struct { osMon diff --git a/net/netmon/state.go b/net/netmon/state.go index bd09607682bb4..27e3524e8d7c9 100644 --- a/net/netmon/state.go +++ b/net/netmon/state.go @@ -15,10 +15,11 @@ import ( "strings" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/hostinfo" "tailscale.com/net/netaddr" "tailscale.com/net/tsaddr" - "tailscale.com/net/tshttpproxy" "tailscale.com/util/mak" ) @@ -182,6 +183,10 @@ func (ifaces InterfaceList) ForeachInterfaceAddress(fn func(Interface, netip.Pre if pfx, ok := netaddr.FromStdIPNet(v); ok { fn(iface, pfx) } + case *net.IPAddr: + if ip, ok := netip.AddrFromSlice(v.IP); ok { + fn(iface, netip.PrefixFrom(ip, ip.BitLen())) + } } } } @@ -214,6 +219,10 @@ func (ifaces InterfaceList) ForeachInterface(fn func(Interface, []netip.Prefix)) if pfx, ok := netaddr.FromStdIPNet(v); ok { pfxs = append(pfxs, pfx) } + case *net.IPAddr: + if ip, ok := netip.AddrFromSlice(v.IP); ok { + pfxs = append(pfxs, netip.PrefixFrom(ip, ip.BitLen())) + } } } sort.Slice(pfxs, func(i, j int) bool { @@ -501,13 +510,15 @@ func getState(optTSInterfaceName string) (*State, error) { } } - if s.AnyInterfaceUp() { + if buildfeatures.HasUseProxy && s.AnyInterfaceUp() { req, err := http.NewRequest("GET", LoginEndpointForProxyDetermination, nil) if err != nil { return nil, err } - if u, err := tshttpproxy.ProxyFromEnvironment(req); err == nil && u != nil { - s.HTTPProxy = u.String() + if proxyFromEnv, ok := feature.HookProxyFromEnvironment.GetOk(); ok { + if u, err := proxyFromEnv(req); err == nil && u != nil { + s.HTTPProxy = u.String() + } } if getPAC != nil { s.PAC = getPAC() @@ -570,6 +581,9 @@ var disableLikelyHomeRouterIPSelf = envknob.RegisterBool("TS_DEBUG_DISABLE_LIKEL // the LAN using that gateway. // This is used as the destination for UPnP, NAT-PMP, PCP, etc queries. func LikelyHomeRouterIP() (gateway, myIP netip.Addr, ok bool) { + if !buildfeatures.HasPortMapper { + return + } // If we don't have a way to get the home router IP, then we can't do // anything; just return. if likelyHomeRouterIP == nil { diff --git a/net/netns/netns_darwin.go b/net/netns/netns_darwin.go index f2ed16601b88e..1f30f00d2a870 100644 --- a/net/netns/netns_darwin.go +++ b/net/netns/netns_darwin.go @@ -33,10 +33,8 @@ var bindToInterfaceByRouteEnv = envknob.RegisterBool("TS_BIND_TO_INTERFACE_BY_RO var errInterfaceStateInvalid = errors.New("interface state invalid") -// controlLogf marks c as necessary to dial in a separate network namespace. -// -// It's intentionally the same signature as net.Dialer.Control -// and net.ListenConfig.Control. +// controlLogf binds c to a particular interface as necessary to dial the +// provided (network, address). func controlLogf(logf logger.Logf, netMon *netmon.Monitor, network, address string, c syscall.RawConn) error { if isLocalhost(address) { // Don't bind to an interface for localhost connections. diff --git a/net/netns/netns_linux.go b/net/netns/netns_linux.go index aaf6dab4a9d64..609f524b5cc01 100644 --- a/net/netns/netns_linux.go +++ b/net/netns/netns_linux.go @@ -15,8 +15,8 @@ import ( "golang.org/x/sys/unix" "tailscale.com/envknob" "tailscale.com/net/netmon" + "tailscale.com/tsconst" "tailscale.com/types/logger" - "tailscale.com/util/linuxfw" ) // socketMarkWorksOnce is the sync.Once & cached value for useSocketMark. @@ -111,7 +111,7 @@ func controlC(network, address string, c syscall.RawConn) error { } func setBypassMark(fd uintptr) error { - if err := unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_MARK, linuxfw.TailscaleBypassMarkNum); err != nil { + if err := unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_MARK, tsconst.LinuxBypassMarkNum); err != nil { return fmt.Errorf("setting SO_MARK bypass: %w", err) } return nil diff --git a/net/netns/socks.go b/net/netns/socks.go index ee8dfa20eec7f..9a137db7f5b18 100644 --- a/net/netns/socks.go +++ b/net/netns/socks.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios && !js && !android +//go:build !ios && !js && !android && !ts_omit_useproxy package netns diff --git a/net/packet/checksum/checksum.go b/net/packet/checksum/checksum.go index 547ea3a3577ed..4b5b82174a22f 100644 --- a/net/packet/checksum/checksum.go +++ b/net/packet/checksum/checksum.go @@ -8,8 +8,6 @@ import ( "encoding/binary" "net/netip" - "gvisor.dev/gvisor/pkg/tcpip" - "gvisor.dev/gvisor/pkg/tcpip/header" "tailscale.com/net/packet" "tailscale.com/types/ipproto" ) @@ -88,13 +86,13 @@ func updateV4PacketChecksums(p *packet.Parsed, old, new netip.Addr) { tr := p.Transport() switch p.IPProto { case ipproto.UDP, ipproto.DCCP: - if len(tr) < header.UDPMinimumSize { + if len(tr) < minUDPSize { // Not enough space for a UDP header. return } updateV4Checksum(tr[6:8], o4[:], n4[:]) case ipproto.TCP: - if len(tr) < header.TCPMinimumSize { + if len(tr) < minTCPSize { // Not enough space for a TCP header. return } @@ -112,34 +110,60 @@ func updateV4PacketChecksums(p *packet.Parsed, old, new netip.Addr) { } } +const ( + minUDPSize = 8 + minTCPSize = 20 + minICMPv6Size = 8 + minIPv6Header = 40 + + offsetICMPv6Checksum = 2 + offsetUDPChecksum = 6 + offsetTCPChecksum = 16 +) + // updateV6PacketChecksums updates the checksums in the packet buffer. // p is modified in place. // If p.IPProto is unknown, no checksums are updated. func updateV6PacketChecksums(p *packet.Parsed, old, new netip.Addr) { - if len(p.Buffer()) < 40 { + if len(p.Buffer()) < minIPv6Header { // Not enough space for an IPv6 header. return } - o6, n6 := tcpip.AddrFrom16Slice(old.AsSlice()), tcpip.AddrFrom16Slice(new.AsSlice()) + o6, n6 := old.As16(), new.As16() // Now update the transport layer checksums, where applicable. tr := p.Transport() switch p.IPProto { case ipproto.ICMPv6: - if len(tr) < header.ICMPv6MinimumSize { + if len(tr) < minICMPv6Size { return } - header.ICMPv6(tr).UpdateChecksumPseudoHeaderAddress(o6, n6) + + ss := tr[offsetICMPv6Checksum:] + xsum := binary.BigEndian.Uint16(ss) + binary.BigEndian.PutUint16(ss, + ^checksumUpdate2ByteAlignedAddress(^xsum, o6, n6)) + case ipproto.UDP, ipproto.DCCP: - if len(tr) < header.UDPMinimumSize { + if len(tr) < minUDPSize { return } - header.UDP(tr).UpdateChecksumPseudoHeaderAddress(o6, n6, true) + ss := tr[offsetUDPChecksum:] + xsum := binary.BigEndian.Uint16(ss) + xsum = ^xsum + xsum = checksumUpdate2ByteAlignedAddress(xsum, o6, n6) + xsum = ^xsum + binary.BigEndian.PutUint16(ss, xsum) case ipproto.TCP: - if len(tr) < header.TCPMinimumSize { + if len(tr) < minTCPSize { return } - header.TCP(tr).UpdateChecksumPseudoHeaderAddress(o6, n6, true) + ss := tr[offsetTCPChecksum:] + xsum := binary.BigEndian.Uint16(ss) + xsum = ^xsum + xsum = checksumUpdate2ByteAlignedAddress(xsum, o6, n6) + xsum = ^xsum + binary.BigEndian.PutUint16(ss, xsum) case ipproto.SCTP: // No transport layer update required. } @@ -195,3 +219,77 @@ func updateV4Checksum(oldSum, old, new []byte) { hcPrime := ^uint16(cPrime) binary.BigEndian.PutUint16(oldSum, hcPrime) } + +// checksumUpdate2ByteAlignedAddress updates an address in a calculated +// checksum. +// +// The addresses must have the same length and must contain an even number +// of bytes. The address MUST begin at a 2-byte boundary in the original buffer. +// +// This implementation is copied from gVisor, but updated to use [16]byte. +func checksumUpdate2ByteAlignedAddress(xsum uint16, old, new [16]byte) uint16 { + const uint16Bytes = 2 + + oldAddr := old[:] + newAddr := new[:] + + // As per RFC 1071 page 4, + // (4) Incremental Update + // + // ... + // + // To update the checksum, simply add the differences of the + // sixteen bit integers that have been changed. To see why this + // works, observe that every 16-bit integer has an additive inverse + // and that addition is associative. From this it follows that + // given the original value m, the new value m', and the old + // checksum C, the new checksum C' is: + // + // C' = C + (-m) + m' = C + (m' - m) + for len(oldAddr) != 0 { + // Convert the 2 byte sequences to uint16 values then apply the increment + // update. + xsum = checksumUpdate2ByteAlignedUint16(xsum, (uint16(oldAddr[0])<<8)+uint16(oldAddr[1]), (uint16(newAddr[0])<<8)+uint16(newAddr[1])) + oldAddr = oldAddr[uint16Bytes:] + newAddr = newAddr[uint16Bytes:] + } + + return xsum +} + +// checksumUpdate2ByteAlignedUint16 updates a uint16 value in a calculated +// checksum. +// +// The value MUST begin at a 2-byte boundary in the original buffer. +// +// This implementation is copied from gVisor. +func checksumUpdate2ByteAlignedUint16(xsum, old, new uint16) uint16 { + // As per RFC 1071 page 4, + // (4) Incremental Update + // + // ... + // + // To update the checksum, simply add the differences of the + // sixteen bit integers that have been changed. To see why this + // works, observe that every 16-bit integer has an additive inverse + // and that addition is associative. From this it follows that + // given the original value m, the new value m', and the old + // checksum C, the new checksum C' is: + // + // C' = C + (-m) + m' = C + (m' - m) + if old == new { + return xsum + } + return checksumCombine(xsum, checksumCombine(new, ^old)) +} + +// checksumCombine combines the two uint16 to form their checksum. This is done +// by adding them and the carry. +// +// Note that checksum a must have been computed on an even number of bytes. +// +// This implementation is copied from gVisor. +func checksumCombine(a, b uint16) uint16 { + v := uint32(a) + uint32(b) + return uint16(v + v>>16) +} diff --git a/net/packet/tsmp.go b/net/packet/tsmp.go index d78d10d36d3bb..0ea321e84eb2a 100644 --- a/net/packet/tsmp.go +++ b/net/packet/tsmp.go @@ -15,7 +15,6 @@ import ( "fmt" "net/netip" - "tailscale.com/net/flowtrack" "tailscale.com/types/ipproto" ) @@ -58,10 +57,6 @@ type TailscaleRejectedHeader struct { const rejectFlagBitMaybeBroken = 0x1 -func (rh TailscaleRejectedHeader) Flow() flowtrack.Tuple { - return flowtrack.MakeTuple(rh.Proto, rh.Src, rh.Dst) -} - func (rh TailscaleRejectedHeader) String() string { return fmt.Sprintf("TSMP-reject-flow{%s %s > %s}: %s", rh.Proto, rh.Src, rh.Dst, rh.Reason) } diff --git a/net/ping/ping.go b/net/ping/ping.go index 01f3dcf2c4976..1ff3862dc65a1 100644 --- a/net/ping/ping.go +++ b/net/ping/ping.go @@ -10,6 +10,7 @@ import ( "context" "crypto/rand" "encoding/binary" + "errors" "fmt" "io" "log" @@ -24,7 +25,6 @@ import ( "golang.org/x/net/ipv6" "tailscale.com/types/logger" "tailscale.com/util/mak" - "tailscale.com/util/multierr" ) const ( @@ -157,17 +157,17 @@ func (p *Pinger) Close() error { p.conns = nil p.mu.Unlock() - var errors []error + var errs []error for _, c := range conns { if err := c.Close(); err != nil { - errors = append(errors, err) + errs = append(errs, err) } } p.wg.Wait() p.cleanupOutstanding() - return multierr.New(errors...) + return errors.Join(errs...) } func (p *Pinger) run(ctx context.Context, conn net.PacketConn, typ string) { diff --git a/net/portmapper/igd_test.go b/net/portmapper/igd_test.go index cca87e0b8238e..77015f5bfb189 100644 --- a/net/portmapper/igd_test.go +++ b/net/portmapper/igd_test.go @@ -14,7 +14,6 @@ import ( "sync/atomic" "testing" - "tailscale.com/control/controlknobs" "tailscale.com/net/netaddr" "tailscale.com/net/netmon" "tailscale.com/syncs" @@ -273,10 +272,9 @@ func newTestClient(t *testing.T, igd *TestIGD, bus *eventbus.Bus) *Client { } var c *Client c = NewClient(Config{ - Logf: tstest.WhileTestRunningLogger(t), - NetMon: netmon.NewStatic(), - ControlKnobs: new(controlknobs.Knobs), - EventBus: bus, + Logf: tstest.WhileTestRunningLogger(t), + NetMon: netmon.NewStatic(), + EventBus: bus, OnChange: func() { // TODO(creachadair): Remove. t.Logf("port map changed") t.Logf("have mapping: %v", c.HaveMapping()) diff --git a/net/portmapper/portmapper.go b/net/portmapper/portmapper.go index a1ab868155219..9368d1c4ee05b 100644 --- a/net/portmapper/portmapper.go +++ b/net/portmapper/portmapper.go @@ -8,7 +8,6 @@ package portmapper import ( "context" "encoding/binary" - "errors" "fmt" "io" "net" @@ -20,12 +19,13 @@ import ( "time" "go4.org/mem" - "tailscale.com/control/controlknobs" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/net/netaddr" "tailscale.com/net/neterror" "tailscale.com/net/netmon" "tailscale.com/net/netns" + "tailscale.com/net/portmapper/portmappertype" "tailscale.com/net/sockstats" "tailscale.com/syncs" "tailscale.com/types/logger" @@ -34,6 +34,13 @@ import ( "tailscale.com/util/eventbus" ) +var ( + ErrNoPortMappingServices = portmappertype.ErrNoPortMappingServices + ErrGatewayRange = portmappertype.ErrGatewayRange + ErrGatewayIPv6 = portmappertype.ErrGatewayIPv6 + ErrPortMappingDisabled = portmappertype.ErrPortMappingDisabled +) + var disablePortMapperEnv = envknob.RegisterBool("TS_DISABLE_PORTMAPPER") // DebugKnobs contains debug configuration that can be provided when creating a @@ -49,15 +56,33 @@ type DebugKnobs struct { LogHTTP bool // Disable* disables a specific service from mapping. - DisableUPnP bool - DisablePMP bool - DisablePCP bool + // If the funcs are nil or return false, the service is not disabled. + // Use the corresponding accessor methods without the "Func" suffix + // to check whether a service is disabled. + DisableUPnPFunc func() bool + DisablePMPFunc func() bool + DisablePCPFunc func() bool // DisableAll, if non-nil, is a func that reports whether all port // mapping attempts should be disabled. DisableAll func() bool } +// DisableUPnP reports whether UPnP is disabled. +func (k *DebugKnobs) DisableUPnP() bool { + return k != nil && k.DisableUPnPFunc != nil && k.DisableUPnPFunc() +} + +// DisablePMP reports whether NAT-PMP is disabled. +func (k *DebugKnobs) DisablePMP() bool { + return k != nil && k.DisablePMPFunc != nil && k.DisablePMPFunc() +} + +// DisablePCP reports whether PCP is disabled. +func (k *DebugKnobs) DisablePCP() bool { + return k != nil && k.DisablePCPFunc != nil && k.DisablePCPFunc() +} + func (k *DebugKnobs) disableAll() bool { if disablePortMapperEnv() { return true @@ -88,11 +113,10 @@ type Client struct { // The following two fields must both be non-nil. // Both are immutable after construction. pubClient *eventbus.Client - updates *eventbus.Publisher[Mapping] + updates *eventbus.Publisher[portmappertype.Mapping] logf logger.Logf netMon *netmon.Monitor // optional; nil means interfaces will be looked up on-demand - controlKnobs *controlknobs.Knobs ipAndGateway func() (gw, ip netip.Addr, ok bool) onChange func() // or nil debug DebugKnobs @@ -130,6 +154,8 @@ type Client struct { mapping mapping // non-nil if we have a mapping } +var _ portmappertype.Client = (*Client)(nil) + func (c *Client) vlogf(format string, args ...any) { if c.debug.VerboseLogs { c.logf(format, args...) @@ -159,7 +185,6 @@ type mapping interface { MappingDebug() string } -// HaveMapping reports whether we have a current valid mapping. func (c *Client) HaveMapping() bool { c.mu.Lock() defer c.mu.Unlock() @@ -223,10 +248,6 @@ type Config struct { // debugging. If nil, a sensible set of defaults will be used. DebugKnobs *DebugKnobs - // ControlKnobs, if non-nil, specifies knobs from the control plane that - // might disable port mapping. - ControlKnobs *controlknobs.Knobs - // OnChange is called to run in a new goroutine whenever the port mapping // status has changed. If nil, no callback is issued. OnChange func() @@ -242,14 +263,16 @@ func NewClient(c Config) *Client { panic("nil EventBus") } ret := &Client{ - logf: c.Logf, - netMon: c.NetMon, - ipAndGateway: netmon.LikelyHomeRouterIP, // TODO(bradfitz): move this to method on netMon - onChange: c.OnChange, - controlKnobs: c.ControlKnobs, + logf: c.Logf, + netMon: c.NetMon, + onChange: c.OnChange, + } + if buildfeatures.HasPortMapper { + // TODO(bradfitz): move this to method on netMon + ret.ipAndGateway = netmon.LikelyHomeRouterIP } ret.pubClient = c.EventBus.Client("portmapper") - ret.updates = eventbus.Publish[Mapping](ret.pubClient) + ret.updates = eventbus.Publish[portmappertype.Mapping](ret.pubClient) if ret.logf == nil { ret.logf = logger.Discard } @@ -448,13 +471,6 @@ func IsNoMappingError(err error) bool { return ok } -var ( - ErrNoPortMappingServices = errors.New("no port mapping services were found") - ErrGatewayRange = errors.New("skipping portmap; gateway range likely lacks support") - ErrGatewayIPv6 = errors.New("skipping portmap; no IPv6 support for portmapping") - ErrPortMappingDisabled = errors.New("port mapping is disabled") -) - // GetCachedMappingOrStartCreatingOne quickly returns with our current cached portmapping, if any. // If there's not one, it starts up a background goroutine to create one. // If the background goroutine ends up creating one, the onChange hook registered with the @@ -512,7 +528,7 @@ func (c *Client) createMapping() { // the control flow to eliminate that possibility. Meanwhile, this // mitigates a panic downstream, cf. #16662. } - c.updates.Publish(Mapping{ + c.updates.Publish(portmappertype.Mapping{ External: mapping.External(), Type: mapping.MappingType(), GoodUntil: mapping.GoodUntil(), @@ -524,15 +540,6 @@ func (c *Client) createMapping() { } } -// Mapping is an event recording the allocation of a port mapping. -type Mapping struct { - External netip.AddrPort - Type string - GoodUntil time.Time - - // TODO(creachadair): Record whether we reused an existing mapping? -} - // wildcardIP is used when the previous external IP is not known for PCP port mapping. var wildcardIP = netip.MustParseAddr("0.0.0.0") @@ -545,7 +552,7 @@ func (c *Client) createOrGetMapping(ctx context.Context) (mapping mapping, exter if c.debug.disableAll() { return nil, netip.AddrPort{}, NoMappingError{ErrPortMappingDisabled} } - if c.debug.DisableUPnP && c.debug.DisablePCP && c.debug.DisablePMP { + if c.debug.DisableUPnP() && c.debug.DisablePCP() && c.debug.DisablePMP() { return nil, netip.AddrPort{}, NoMappingError{ErrNoPortMappingServices} } gw, myIP, ok := c.gatewayAndSelfIP() @@ -624,7 +631,7 @@ func (c *Client) createOrGetMapping(ctx context.Context) (mapping mapping, exter prevPort = m.External().Port() } - if c.debug.DisablePCP && c.debug.DisablePMP { + if c.debug.DisablePCP() && c.debug.DisablePMP() { c.mu.Unlock() if external, ok := c.getUPnPPortMapping(ctx, gw, internalAddr, prevPort); ok { return nil, external, nil @@ -675,7 +682,7 @@ func (c *Client) createOrGetMapping(ctx context.Context) (mapping mapping, exter pxpAddr := netip.AddrPortFrom(gw, c.pxpPort()) - preferPCP := !c.debug.DisablePCP && (c.debug.DisablePMP || (!haveRecentPMP && haveRecentPCP)) + preferPCP := !c.debug.DisablePCP() && (c.debug.DisablePMP() || (!haveRecentPMP && haveRecentPCP)) // Create a mapping, defaulting to PMP unless only PCP was seen recently. if preferPCP { @@ -860,19 +867,13 @@ func parsePMPResponse(pkt []byte) (res pmpResponse, ok bool) { return res, true } -type ProbeResult struct { - PCP bool - PMP bool - UPnP bool -} - // Probe returns a summary of which port mapping services are // available on the network. // // If a probe has run recently and there haven't been any network changes since, // the returned result might be server from the Client's cache, without // sending any network traffic. -func (c *Client) Probe(ctx context.Context) (res ProbeResult, err error) { +func (c *Client) Probe(ctx context.Context) (res portmappertype.ProbeResult, err error) { if c.debug.disableAll() { return res, ErrPortMappingDisabled } @@ -907,19 +908,19 @@ func (c *Client) Probe(ctx context.Context) (res ProbeResult, err error) { // https://github.com/tailscale/tailscale/issues/1001 if c.sawPMPRecently() { res.PMP = true - } else if !c.debug.DisablePMP { + } else if !c.debug.DisablePMP() { metricPMPSent.Add(1) uc.WriteToUDPAddrPort(pmpReqExternalAddrPacket, pxpAddr) } if c.sawPCPRecently() { res.PCP = true - } else if !c.debug.DisablePCP { + } else if !c.debug.DisablePCP() { metricPCPSent.Add(1) uc.WriteToUDPAddrPort(pcpAnnounceRequest(myIP), pxpAddr) } if c.sawUPnPRecently() { res.UPnP = true - } else if !c.debug.DisableUPnP { + } else if !c.debug.DisableUPnP() { // Strictly speaking, you discover UPnP services by sending an // SSDP query (which uPnPPacket is) to udp/1900 on the SSDP // multicast address, and then get a flood of responses back diff --git a/net/portmapper/portmapper_test.go b/net/portmapper/portmapper_test.go index e66d3c159eccb..a697a39089635 100644 --- a/net/portmapper/portmapper_test.go +++ b/net/portmapper/portmapper_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - "tailscale.com/control/controlknobs" + "tailscale.com/net/portmapper/portmappertype" "tailscale.com/util/eventbus/eventbustest" ) @@ -19,7 +19,7 @@ func TestCreateOrGetMapping(t *testing.T) { if v, _ := strconv.ParseBool(os.Getenv("HIT_NETWORK")); !v { t.Skip("skipping test without HIT_NETWORK=1") } - c := NewClient(Config{Logf: t.Logf, ControlKnobs: new(controlknobs.Knobs)}) + c := NewClient(Config{Logf: t.Logf}) defer c.Close() c.SetLocalPort(1234) for i := range 2 { @@ -35,7 +35,7 @@ func TestClientProbe(t *testing.T) { if v, _ := strconv.ParseBool(os.Getenv("HIT_NETWORK")); !v { t.Skip("skipping test without HIT_NETWORK=1") } - c := NewClient(Config{Logf: t.Logf, ControlKnobs: new(controlknobs.Knobs)}) + c := NewClient(Config{Logf: t.Logf}) defer c.Close() for i := range 3 { if i > 0 { @@ -50,7 +50,7 @@ func TestClientProbeThenMap(t *testing.T) { if v, _ := strconv.ParseBool(os.Getenv("HIT_NETWORK")); !v { t.Skip("skipping test without HIT_NETWORK=1") } - c := NewClient(Config{Logf: t.Logf, ControlKnobs: new(controlknobs.Knobs)}) + c := NewClient(Config{Logf: t.Logf}) defer c.Close() c.debug.VerboseLogs = true c.SetLocalPort(1234) @@ -150,7 +150,7 @@ func TestUpdateEvent(t *testing.T) { t.Fatalf("Probe failed: %v", err) } c.GetCachedMappingOrStartCreatingOne() - if err := eventbustest.Expect(tw, eventbustest.Type[Mapping]()); err != nil { + if err := eventbustest.Expect(tw, eventbustest.Type[portmappertype.Mapping]()); err != nil { t.Error(err.Error()) } } diff --git a/net/portmapper/portmappertype/portmappertype.go b/net/portmapper/portmappertype/portmappertype.go new file mode 100644 index 0000000000000..cc8358a4aed12 --- /dev/null +++ b/net/portmapper/portmappertype/portmappertype.go @@ -0,0 +1,88 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package portmappertype defines the net/portmapper interface, which may or may not be +// linked into the binary. +package portmappertype + +import ( + "context" + "errors" + "net/netip" + "time" + + "tailscale.com/feature" + "tailscale.com/net/netmon" + "tailscale.com/types/logger" + "tailscale.com/util/eventbus" +) + +// HookNewPortMapper is a hook to install the portmapper creation function. +// It must be set by an init function when buildfeatures.HasPortmapper is true. +var HookNewPortMapper feature.Hook[func(logf logger.Logf, + bus *eventbus.Bus, + netMon *netmon.Monitor, + disableUPnPOrNil, + onlyTCP443OrNil func() bool) Client] + +var ( + ErrNoPortMappingServices = errors.New("no port mapping services were found") + ErrGatewayRange = errors.New("skipping portmap; gateway range likely lacks support") + ErrGatewayIPv6 = errors.New("skipping portmap; no IPv6 support for portmapping") + ErrPortMappingDisabled = errors.New("port mapping is disabled") +) + +// ProbeResult is the result of a portmapper probe, saying +// which port mapping protocols were discovered. +type ProbeResult struct { + PCP bool + PMP bool + UPnP bool +} + +// Client is the interface implemented by a portmapper client. +type Client interface { + // Probe returns a summary of which port mapping services are available on + // the network. + // + // If a probe has run recently and there haven't been any network changes + // since, the returned result might be server from the Client's cache, + // without sending any network traffic. + Probe(context.Context) (ProbeResult, error) + + // HaveMapping reports whether we have a current valid mapping. + HaveMapping() bool + + // SetGatewayLookupFunc set the func that returns the machine's default + // gateway IP, and the primary IP address for that gateway. It must be + // called before the client is used. If not called, + // interfaces.LikelyHomeRouterIP is used. + SetGatewayLookupFunc(f func() (gw, myIP netip.Addr, ok bool)) + + // NoteNetworkDown should be called when the network has transitioned to a down state. + // It's too late to release port mappings at this point (the user might've just turned off + // their wifi), but we can make sure we invalidate mappings for later when the network + // comes back. + NoteNetworkDown() + + // GetCachedMappingOrStartCreatingOne quickly returns with our current cached portmapping, if any. + // If there's not one, it starts up a background goroutine to create one. + // If the background goroutine ends up creating one, the onChange hook registered with the + // NewClient constructor (if any) will fire. + GetCachedMappingOrStartCreatingOne() (external netip.AddrPort, ok bool) + + // SetLocalPort updates the local port number to which we want to port + // map UDP traffic + SetLocalPort(localPort uint16) + + Close() error +} + +// Mapping is an event recording the allocation of a port mapping. +type Mapping struct { + External netip.AddrPort + Type string + GoodUntil time.Time + + // TODO(creachadair): Record whether we reused an existing mapping? +} diff --git a/net/portmapper/upnp.go b/net/portmapper/upnp.go index 13418313597f0..d65d6e94d70fd 100644 --- a/net/portmapper/upnp.go +++ b/net/portmapper/upnp.go @@ -209,7 +209,7 @@ func addAnyPortMapping( // The meta is the most recently parsed UDP discovery packet response // from the Internet Gateway Device. func getUPnPRootDevice(ctx context.Context, logf logger.Logf, debug DebugKnobs, gw netip.Addr, meta uPnPDiscoResponse) (rootDev *goupnp.RootDevice, loc *url.URL, err error) { - if debug.DisableUPnP { + if debug.DisableUPnP() { return nil, nil, nil } @@ -434,7 +434,7 @@ func (c *Client) getUPnPPortMapping( internal netip.AddrPort, prevPort uint16, ) (external netip.AddrPort, ok bool) { - if disableUPnpEnv() || c.debug.DisableUPnP || (c.controlKnobs != nil && c.controlKnobs.DisableUPnP.Load()) { + if disableUPnpEnv() || c.debug.DisableUPnP() { return netip.AddrPort{}, false } diff --git a/net/portmapper/upnp_test.go b/net/portmapper/upnp_test.go index c07ec020813ed..a954b2beac094 100644 --- a/net/portmapper/upnp_test.go +++ b/net/portmapper/upnp_test.go @@ -18,6 +18,7 @@ import ( "sync/atomic" "testing" + "tailscale.com/net/portmapper/portmappertype" "tailscale.com/tstest" ) @@ -1039,7 +1040,7 @@ func (u *upnpServer) handleControl(w http.ResponseWriter, r *http.Request, handl } } -func mustProbeUPnP(tb testing.TB, ctx context.Context, c *Client) ProbeResult { +func mustProbeUPnP(tb testing.TB, ctx context.Context, c *Client) portmappertype.ProbeResult { tb.Helper() res, err := c.Probe(ctx) if err != nil { diff --git a/net/speedtest/speedtest.go b/net/speedtest/speedtest.go index 7ab0881cc22f9..a462dbeece42b 100644 --- a/net/speedtest/speedtest.go +++ b/net/speedtest/speedtest.go @@ -24,7 +24,7 @@ const ( // conduct the test. type config struct { Version int `json:"version"` - TestDuration time.Duration `json:"time"` + TestDuration time.Duration `json:"time,format:nano"` Direction Direction `json:"direction"` } diff --git a/net/speedtest/speedtest_test.go b/net/speedtest/speedtest_test.go index 55dcbeea1abdf..69fdb6b5685c0 100644 --- a/net/speedtest/speedtest_test.go +++ b/net/speedtest/speedtest_test.go @@ -4,12 +4,22 @@ package speedtest import ( + "flag" "net" "testing" "time" + + "tailscale.com/cmd/testwrapper/flakytest" ) +var manualTest = flag.Bool("do-speedtest", false, "if true, run the speedtest TestDownload test. Otherwise skip it because it's slow and flaky; see https://github.com/tailscale/tailscale/issues/17338") + func TestDownload(t *testing.T) { + if !*manualTest { + t.Skip("skipping slow test without --do-speedtest") + } + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/17338") + // start a listener and find the port where the server will be listening. l, err := net.Listen("tcp", ":0") if err != nil { diff --git a/net/tlsdial/blockblame/blockblame.go b/net/tlsdial/blockblame/blockblame.go index 57dc7a6e6d885..5b48dc009b980 100644 --- a/net/tlsdial/blockblame/blockblame.go +++ b/net/tlsdial/blockblame/blockblame.go @@ -9,13 +9,19 @@ package blockblame import ( "crypto/x509" "strings" + "sync" + + "tailscale.com/feature/buildfeatures" ) // VerifyCertificate checks if the given certificate c is issued by a firewall manufacturer // that is known to block Tailscale connections. It returns true and the Manufacturer of // the equipment if it is, or false and nil if it is not. func VerifyCertificate(c *x509.Certificate) (m *Manufacturer, ok bool) { - for _, m := range Manufacturers { + if !buildfeatures.HasDebug { + return nil, false + } + for _, m := range manufacturers() { if m.match != nil && m.match(c) { return m, true } @@ -33,46 +39,56 @@ type Manufacturer struct { match matchFunc } -var Manufacturers = []*Manufacturer{ - { - Name: "Aruba Networks", - match: issuerContains("Aruba"), - }, - { - Name: "Cisco", - match: issuerContains("Cisco"), - }, - { - Name: "Fortinet", - match: matchAny( - issuerContains("Fortinet"), - certEmail("support@fortinet.com"), - ), - }, - { - Name: "Huawei", - match: certEmail("mobile@huawei.com"), - }, - { - Name: "Palo Alto Networks", - match: matchAny( - issuerContains("Palo Alto Networks"), - issuerContains("PAN-FW"), - ), - }, - { - Name: "Sophos", - match: issuerContains("Sophos"), - }, - { - Name: "Ubiquiti", - match: matchAny( - issuerContains("UniFi"), - issuerContains("Ubiquiti"), - ), - }, +func manufacturers() []*Manufacturer { + manufacturersOnce.Do(func() { + manufacturersList = []*Manufacturer{ + { + Name: "Aruba Networks", + match: issuerContains("Aruba"), + }, + { + Name: "Cisco", + match: issuerContains("Cisco"), + }, + { + Name: "Fortinet", + match: matchAny( + issuerContains("Fortinet"), + certEmail("support@fortinet.com"), + ), + }, + { + Name: "Huawei", + match: certEmail("mobile@huawei.com"), + }, + { + Name: "Palo Alto Networks", + match: matchAny( + issuerContains("Palo Alto Networks"), + issuerContains("PAN-FW"), + ), + }, + { + Name: "Sophos", + match: issuerContains("Sophos"), + }, + { + Name: "Ubiquiti", + match: matchAny( + issuerContains("UniFi"), + issuerContains("Ubiquiti"), + ), + }, + } + }) + return manufacturersList } +var ( + manufacturersOnce sync.Once + manufacturersList []*Manufacturer +) + type matchFunc func(*x509.Certificate) bool func issuerContains(s string) matchFunc { diff --git a/net/tlsdial/tlsdial.go b/net/tlsdial/tlsdial.go index 80f3bfc06c4e8..ee4771d8db613 100644 --- a/net/tlsdial/tlsdial.go +++ b/net/tlsdial/tlsdial.go @@ -28,6 +28,7 @@ import ( "tailscale.com/derp/derpconst" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/net/bakedroots" @@ -36,12 +37,6 @@ import ( var counterFallbackOK int32 // atomic -// If SSLKEYLOGFILE is set, it's a file to which we write our TLS private keys -// in a way that WireShark can read. -// -// See https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format -var sslKeyLogFile = os.Getenv("SSLKEYLOGFILE") - var debug = envknob.RegisterBool("TS_DEBUG_TLS_DIAL") // tlsdialWarningPrinted tracks whether we've printed a warning about a given @@ -80,13 +75,19 @@ func Config(ht *health.Tracker, base *tls.Config) *tls.Config { // the real TCP connection) because host is the ultimate hostname, but this // tls.Config is used for both the proxy and the ultimate target. - if n := sslKeyLogFile; n != "" { - f, err := os.OpenFile(n, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) - if err != nil { - log.Fatal(err) + if buildfeatures.HasDebug { + // If SSLKEYLOGFILE is set, it's a file to which we write our TLS private keys + // in a way that WireShark can read. + // + // See https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format + if n := os.Getenv("SSLKEYLOGFILE"); n != "" { + f, err := os.OpenFile(n, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) + if err != nil { + log.Fatal(err) + } + log.Printf("WARNING: writing to SSLKEYLOGFILE %v", n) + conf.KeyLogWriter = f } - log.Printf("WARNING: writing to SSLKEYLOGFILE %v", n) - conf.KeyLogWriter = f } if conf.InsecureSkipVerify { @@ -164,10 +165,12 @@ func Config(ht *health.Tracker, base *tls.Config) *tls.Config { if debug() { log.Printf("tlsdial(sys %q): %v", dialedHost, errSys) } + if !buildfeatures.HasBakedRoots || (errSys == nil && !debug()) { + return errSys + } - // Always verify with our baked-in Let's Encrypt certificate, - // so we can log an informational message. This is useful for - // detecting SSL MiTM. + // If we have baked-in LetsEncrypt roots and we either failed above, or + // debug logging is enabled, also verify with LetsEncrypt. opts.Roots = bakedroots.Get() _, bakedErr := cs.PeerCertificates[0].Verify(opts) if debug() { @@ -239,8 +242,8 @@ func SetConfigExpectedCert(c *tls.Config, certDNSName string) { if debug() { log.Printf("tlsdial(sys %q/%q): %v", c.ServerName, certDNSName, errSys) } - if errSys == nil { - return nil + if !buildfeatures.HasBakedRoots || errSys == nil { + return errSys } opts.Roots = bakedroots.Get() _, err := certs[0].Verify(opts) diff --git a/net/tlsdial/tlsdial_test.go b/net/tlsdial/tlsdial_test.go index e2c4cdd4f51cb..a288d765306e1 100644 --- a/net/tlsdial/tlsdial_test.go +++ b/net/tlsdial/tlsdial_test.go @@ -16,6 +16,7 @@ import ( "tailscale.com/health" "tailscale.com/net/bakedroots" + "tailscale.com/util/eventbus/eventbustest" ) func TestFallbackRootWorks(t *testing.T) { @@ -85,7 +86,7 @@ func TestFallbackRootWorks(t *testing.T) { }, DisableKeepAlives: true, // for test cleanup ease } - ht := new(health.Tracker) + ht := health.NewTracker(eventbustest.NewBus(t)) tr.TLSClientConfig = Config(ht, tr.TLSClientConfig) c := &http.Client{Transport: tr} diff --git a/net/tsdial/tsdial.go b/net/tsdial/tsdial.go index e4e4e9e8b0f92..c7483a125a07a 100644 --- a/net/tsdial/tsdial.go +++ b/net/tsdial/tsdial.go @@ -19,6 +19,8 @@ import ( "time" "github.com/gaissmai/bart" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/net/dnscache" "tailscale.com/net/netknob" "tailscale.com/net/netmon" @@ -28,6 +30,7 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/util/clientmetric" + "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/testenv" "tailscale.com/version" @@ -44,6 +47,13 @@ func NewDialer(netMon *netmon.Monitor) *Dialer { return d } +// NewFromFuncForDebug is like NewDialer but takes a netx.DialFunc +// and no netMon. It's meant exclusively for the "tailscale debug ts2021" +// debug command, and perhaps tests. +func NewFromFuncForDebug(logf logger.Logf, dial netx.DialFunc) *Dialer { + return &Dialer{sysDialForTest: dial, Logf: logf} +} + // Dialer dials out of tailscaled, while taking care of details while // handling the dozens of edge cases depending on the server mode // (TUN, netstack), the OS network sandboxing style (macOS/iOS @@ -86,6 +96,9 @@ type Dialer struct { dnsCache *dnscache.MessageCache // nil until first non-empty SetExitDNSDoH nextSysConnID int activeSysConns map[int]net.Conn // active connections not yet closed + bus *eventbus.Bus // only used for comparison with already set bus. + eventClient *eventbus.Client + eventBusSubs eventbus.Monitor } // sysConn wraps a net.Conn that was created using d.SystemDial. @@ -125,6 +138,9 @@ func (d *Dialer) TUNName() string { // // For example, "http://100.68.82.120:47830/dns-query". func (d *Dialer) SetExitDNSDoH(doh string) { + if !buildfeatures.HasUseExitNode { + return + } d.mu.Lock() defer d.mu.Unlock() if d.exitDNSDoHBase == doh { @@ -158,6 +174,9 @@ func (d *Dialer) SetRoutes(routes, localRoutes []netip.Prefix) { } func (d *Dialer) Close() error { + if d.eventClient != nil { + d.eventBusSubs.Close() + } d.mu.Lock() defer d.mu.Unlock() d.closed = true @@ -186,6 +205,14 @@ func (d *Dialer) SetNetMon(netMon *netmon.Monitor) { d.netMonUnregister = nil } d.netMon = netMon + // Having multiple watchers could lead to problems, + // so remove the eventClient if it exists. + // This should really not happen, but better checking for it than not. + // TODO(cmol): Should this just be a panic? + if d.eventClient != nil { + d.eventBusSubs.Close() + d.eventClient = nil + } d.netMonUnregister = d.netMon.RegisterChangeCallback(d.linkChanged) } @@ -197,6 +224,38 @@ func (d *Dialer) NetMon() *netmon.Monitor { return d.netMon } +func (d *Dialer) SetBus(bus *eventbus.Bus) { + d.mu.Lock() + defer d.mu.Unlock() + if d.bus == bus { + return + } else if d.bus != nil { + panic("different eventbus has already been set") + } + // Having multiple watchers could lead to problems, + // so unregister the callback if it exists. + if d.netMonUnregister != nil { + d.netMonUnregister() + } + d.bus = bus + d.eventClient = bus.Client("tsdial.Dialer") + d.eventBusSubs = d.eventClient.Monitor(d.linkChangeWatcher(d.eventClient)) +} + +func (d *Dialer) linkChangeWatcher(ec *eventbus.Client) func(*eventbus.Client) { + linkChangeSub := eventbus.Subscribe[netmon.ChangeDelta](ec) + return func(ec *eventbus.Client) { + for { + select { + case <-ec.Done(): + return + case cd := <-linkChangeSub.Events(): + d.linkChanged(&cd) + } + } + } +} + var ( metricLinkChangeConnClosed = clientmetric.NewCounter("tsdial_linkchange_closes") metricChangeDeltaNoDefaultRoute = clientmetric.NewCounter("tsdial_changedelta_no_default_route") @@ -322,7 +381,7 @@ func (d *Dialer) userDialResolve(ctx context.Context, network, addr string) (net } var r net.Resolver - if exitDNSDoH != "" { + if buildfeatures.HasUseExitNode && buildfeatures.HasPeerAPIClient && exitDNSDoH != "" { r.PreferGo = true r.Dial = func(ctx context.Context, network, address string) (net.Conn, error) { return &dohConn{ @@ -377,7 +436,7 @@ func (d *Dialer) SetSystemDialerForTest(fn netx.DialFunc) { // Control and (in the future, as of 2022-04-27) DERPs.. func (d *Dialer) SystemDial(ctx context.Context, network, addr string) (net.Conn, error) { d.mu.Lock() - if d.netMon == nil { + if d.netMon == nil && d.sysDialForTest == nil { d.mu.Unlock() if testenv.InTest() { panic("SystemDial requires a netmon.Monitor; call SetNetMon first") @@ -459,6 +518,9 @@ func (d *Dialer) UserDial(ctx context.Context, network, addr string) (net.Conn, // network must a "tcp" type, and addr must be an ip:port. Name resolution // is not supported. func (d *Dialer) dialPeerAPI(ctx context.Context, network, addr string) (net.Conn, error) { + if !buildfeatures.HasPeerAPIClient { + return nil, feature.ErrUnavailable + } switch network { case "tcp", "tcp6", "tcp4": default: @@ -501,6 +563,9 @@ func (d *Dialer) getPeerDialer() *net.Dialer { // The returned Client must not be mutated; it's owned by the Dialer // and shared by callers. func (d *Dialer) PeerAPIHTTPClient() *http.Client { + if !buildfeatures.HasPeerAPIClient { + panic("unreachable") + } d.peerClientOnce.Do(func() { t := http.DefaultTransport.(*http.Transport).Clone() t.Dial = nil diff --git a/net/tshttpproxy/tshttpproxy_linux.go b/net/tshttpproxy/tshttpproxy_linux.go index b241c256d4798..7e086e4929bc7 100644 --- a/net/tshttpproxy/tshttpproxy_linux.go +++ b/net/tshttpproxy/tshttpproxy_linux.go @@ -9,6 +9,7 @@ import ( "net/http" "net/url" + "tailscale.com/feature/buildfeatures" "tailscale.com/version/distro" ) @@ -17,7 +18,7 @@ func init() { } func linuxSysProxyFromEnv(req *http.Request) (*url.URL, error) { - if distro.Get() == distro.Synology { + if buildfeatures.HasSynology && distro.Get() == distro.Synology { return synologyProxyFromConfigCached(req) } return nil, nil diff --git a/net/tstun/linkattrs_notlinux.go b/net/tstun/linkattrs_notlinux.go deleted file mode 100644 index 77d227934083e..0000000000000 --- a/net/tstun/linkattrs_notlinux.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !linux || android - -package tstun - -import "github.com/tailscale/wireguard-go/tun" - -func setLinkAttrs(iface tun.Device) error { - return nil -} diff --git a/net/tstun/netstack_disabled.go b/net/tstun/netstack_disabled.go new file mode 100644 index 0000000000000..c1266b30559d4 --- /dev/null +++ b/net/tstun/netstack_disabled.go @@ -0,0 +1,69 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_netstack + +package tstun + +type netstack_PacketBuffer struct { + GSOOptions netstack_GSO +} + +func (*netstack_PacketBuffer) DecRef() { panic("unreachable") } +func (*netstack_PacketBuffer) Size() int { panic("unreachable") } + +type netstack_GSOType int + +const ( + netstack_GSONone netstack_GSOType = iota + netstack_GSOTCPv4 + netstack_GSOTCPv6 + netstack_GSOGvisor +) + +type netstack_GSO struct { + // Type is one of GSONone, GSOTCPv4, etc. + Type netstack_GSOType + // NeedsCsum is set if the checksum offload is enabled. + NeedsCsum bool + // CsumOffset is offset after that to place checksum. + CsumOffset uint16 + + // Mss is maximum segment size. + MSS uint16 + // L3Len is L3 (IP) header length. + L3HdrLen uint16 + + // MaxSize is maximum GSO packet size. + MaxSize uint32 +} + +func (p *netstack_PacketBuffer) NetworkHeader() slicer { + panic("unreachable") +} + +func (p *netstack_PacketBuffer) TransportHeader() slicer { + panic("unreachable") +} + +func (p *netstack_PacketBuffer) ToBuffer() netstack_Buffer { panic("unreachable") } + +func (p *netstack_PacketBuffer) Data() asRanger { + panic("unreachable") +} + +type asRanger struct{} + +func (asRanger) AsRange() toSlicer { panic("unreachable") } + +type toSlicer struct{} + +func (toSlicer) ToSlice() []byte { panic("unreachable") } + +type slicer struct{} + +func (s slicer) Slice() []byte { panic("unreachable") } + +type netstack_Buffer struct{} + +func (netstack_Buffer) Flatten() []byte { panic("unreachable") } diff --git a/net/tstun/netstack_enabled.go b/net/tstun/netstack_enabled.go new file mode 100644 index 0000000000000..8fc1a2e20e35a --- /dev/null +++ b/net/tstun/netstack_enabled.go @@ -0,0 +1,22 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_netstack + +package tstun + +import ( + "gvisor.dev/gvisor/pkg/tcpip/stack" +) + +type ( + netstack_PacketBuffer = stack.PacketBuffer + netstack_GSO = stack.GSO +) + +const ( + netstack_GSONone = stack.GSONone + netstack_GSOTCPv4 = stack.GSOTCPv4 + netstack_GSOTCPv6 = stack.GSOTCPv6 + netstack_GSOGvisor = stack.GSOGvisor +) diff --git a/net/tstun/tun.go b/net/tstun/tun.go index bfdaddf58b283..19b0a53f5be6c 100644 --- a/net/tstun/tun.go +++ b/net/tstun/tun.go @@ -18,12 +18,16 @@ import ( "github.com/tailscale/wireguard-go/tun" "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/types/logger" ) -// CrateTAP is the hook set by feature/tap. +// CreateTAP is the hook maybe set by feature/tap. var CreateTAP feature.Hook[func(logf logger.Logf, tapName, bridgeName string) (tun.Device, error)] +// HookSetLinkAttrs is the hook maybe set by feature/linkspeed. +var HookSetLinkAttrs feature.Hook[func(tun.Device) error] + // modprobeTunHook is a Linux-specific hook to run "/sbin/modprobe tun". var modprobeTunHook feature.Hook[func() error] @@ -78,8 +82,12 @@ func New(logf logger.Logf, tunName string) (tun.Device, string, error) { dev.Close() return nil, "", err } - if err := setLinkAttrs(dev); err != nil { - logf("setting link attributes: %v", err) + if buildfeatures.HasLinkSpeed { + if f, ok := HookSetLinkAttrs.GetOk(); ok { + if err := f(dev); err != nil { + logf("setting link attributes: %v", err) + } + } } name, err := interfaceName(dev) if err != nil { diff --git a/net/tstun/wrap.go b/net/tstun/wrap.go index 442184065aa92..70cc7118ac208 100644 --- a/net/tstun/wrap.go +++ b/net/tstun/wrap.go @@ -22,10 +22,8 @@ import ( "github.com/tailscale/wireguard-go/device" "github.com/tailscale/wireguard-go/tun" "go4.org/mem" - "gvisor.dev/gvisor/pkg/tcpip/stack" "tailscale.com/disco" - tsmetrics "tailscale.com/metrics" - "tailscale.com/net/connstats" + "tailscale.com/feature/buildfeatures" "tailscale.com/net/packet" "tailscale.com/net/packet/checksum" "tailscale.com/net/tsaddr" @@ -34,6 +32,7 @@ import ( "tailscale.com/types/ipproto" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/types/netlogfunc" "tailscale.com/util/clientmetric" "tailscale.com/util/usermetric" "tailscale.com/wgengine/filter" @@ -204,8 +203,8 @@ type Wrapper struct { // disableTSMPRejected disables TSMP rejected responses. For tests. disableTSMPRejected bool - // stats maintains per-connection counters. - stats atomic.Pointer[connstats.Statistics] + // connCounter maintains per-connection counters. + connCounter syncs.AtomicValue[netlogfunc.ConnectionCounter] captureHook syncs.AtomicValue[packet.CaptureCallback] @@ -213,8 +212,8 @@ type Wrapper struct { } type metrics struct { - inboundDroppedPacketsTotal *tsmetrics.MultiLabelMap[usermetric.DropLabels] - outboundDroppedPacketsTotal *tsmetrics.MultiLabelMap[usermetric.DropLabels] + inboundDroppedPacketsTotal *usermetric.MultiLabelMap[usermetric.DropLabels] + outboundDroppedPacketsTotal *usermetric.MultiLabelMap[usermetric.DropLabels] } func registerMetrics(reg *usermetric.Registry) *metrics { @@ -228,7 +227,7 @@ func registerMetrics(reg *usermetric.Registry) *metrics { type tunInjectedRead struct { // Only one of packet or data should be set, and are read in that order of // precedence. - packet *stack.PacketBuffer + packet *netstack_PacketBuffer data []byte } @@ -312,7 +311,9 @@ func (t *Wrapper) now() time.Time { // // The map ownership passes to the Wrapper. It must be non-nil. func (t *Wrapper) SetDestIPActivityFuncs(m map[netip.Addr]func()) { - t.destIPActivity.Store(m) + if buildfeatures.HasLazyWG { + t.destIPActivity.Store(m) + } } // SetDiscoKey sets the current discovery key. @@ -948,12 +949,14 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { for _, data := range res.data { p.Decode(data[res.dataOffset:]) - if m := t.destIPActivity.Load(); m != nil { - if fn := m[p.Dst.Addr()]; fn != nil { - fn() + if buildfeatures.HasLazyWG { + if m := t.destIPActivity.Load(); m != nil { + if fn := m[p.Dst.Addr()]; fn != nil { + fn() + } } } - if captHook != nil { + if buildfeatures.HasCapture && captHook != nil { captHook(packet.FromLocal, t.now(), p.Buffer(), p.CaptureMeta) } if !t.disableFilter { @@ -973,8 +976,10 @@ func (t *Wrapper) Read(buffs [][]byte, sizes []int, offset int) (int, error) { panic(fmt.Sprintf("short copy: %d != %d", n, len(data)-res.dataOffset)) } sizes[buffsPos] = n - if stats := t.stats.Load(); stats != nil { - stats.UpdateTxVirtual(p.Buffer()) + if buildfeatures.HasNetLog { + if update := t.connCounter.Load(); update != nil { + updateConnCounter(update, p.Buffer(), false) + } } buffsPos++ } @@ -998,7 +1003,10 @@ const ( minTCPHeaderSize = 20 ) -func stackGSOToTunGSO(pkt []byte, gso stack.GSO) (tun.GSOOptions, error) { +func stackGSOToTunGSO(pkt []byte, gso netstack_GSO) (tun.GSOOptions, error) { + if !buildfeatures.HasNetstack { + panic("unreachable") + } options := tun.GSOOptions{ CsumStart: gso.L3HdrLen, CsumOffset: gso.CsumOffset, @@ -1006,12 +1014,12 @@ func stackGSOToTunGSO(pkt []byte, gso stack.GSO) (tun.GSOOptions, error) { NeedsCsum: gso.NeedsCsum, } switch gso.Type { - case stack.GSONone: + case netstack_GSONone: options.GSOType = tun.GSONone return options, nil - case stack.GSOTCPv4: + case netstack_GSOTCPv4: options.GSOType = tun.GSOTCPv4 - case stack.GSOTCPv6: + case netstack_GSOTCPv6: options.GSOType = tun.GSOTCPv6 default: return tun.GSOOptions{}, fmt.Errorf("unsupported gVisor GSOType: %v", gso.Type) @@ -1034,7 +1042,10 @@ func stackGSOToTunGSO(pkt []byte, gso stack.GSO) (tun.GSOOptions, error) { // both before and after partial checksum updates where later checksum // offloading still expects a partial checksum. // TODO(jwhited): plumb partial checksum awareness into net/packet/checksum. -func invertGSOChecksum(pkt []byte, gso stack.GSO) { +func invertGSOChecksum(pkt []byte, gso netstack_GSO) { + if !buildfeatures.HasNetstack { + panic("unreachable") + } if gso.NeedsCsum != true { return } @@ -1048,10 +1059,13 @@ func invertGSOChecksum(pkt []byte, gso stack.GSO) { // injectedRead handles injected reads, which bypass filters. func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []int, offset int) (n int, err error) { - var gso stack.GSO + var gso netstack_GSO pkt := outBuffs[0][offset:] if res.packet != nil { + if !buildfeatures.HasNetstack { + panic("unreachable") + } bufN := copy(pkt, res.packet.NetworkHeader().Slice()) bufN += copy(pkt[bufN:], res.packet.TransportHeader().Slice()) bufN += copy(pkt[bufN:], res.packet.Data().AsRange().ToSlice()) @@ -1074,9 +1088,11 @@ func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []i pc.snat(p) invertGSOChecksum(pkt, gso) - if m := t.destIPActivity.Load(); m != nil { - if fn := m[p.Dst.Addr()]; fn != nil { - fn() + if buildfeatures.HasLazyWG { + if m := t.destIPActivity.Load(); m != nil { + if fn := m[p.Dst.Addr()]; fn != nil { + fn() + } } } @@ -1089,9 +1105,11 @@ func (t *Wrapper) injectedRead(res tunInjectedRead, outBuffs [][]byte, sizes []i n, err = tun.GSOSplit(pkt, gsoOptions, outBuffs, sizes, offset) } - if stats := t.stats.Load(); stats != nil { - for i := 0; i < n; i++ { - stats.UpdateTxVirtual(outBuffs[i][offset : offset+sizes[i]]) + if buildfeatures.HasNetLog { + if update := t.connCounter.Load(); update != nil { + for i := 0; i < n; i++ { + updateConnCounter(update, outBuffs[i][offset:offset+sizes[i]], false) + } } } @@ -1257,9 +1275,11 @@ func (t *Wrapper) Write(buffs [][]byte, offset int) (int, error) { } func (t *Wrapper) tdevWrite(buffs [][]byte, offset int) (int, error) { - if stats := t.stats.Load(); stats != nil { - for i := range buffs { - stats.UpdateRxVirtual((buffs)[i][offset:]) + if buildfeatures.HasNetLog { + if update := t.connCounter.Load(); update != nil { + for i := range buffs { + updateConnCounter(update, buffs[i][offset:], true) + } } } return t.tdev.Write(buffs, offset) @@ -1297,7 +1317,10 @@ func (t *Wrapper) SetJailedFilter(filt *filter.Filter) { // // This path is typically used to deliver synthesized packets to the // host networking stack. -func (t *Wrapper) InjectInboundPacketBuffer(pkt *stack.PacketBuffer, buffs [][]byte, sizes []int) error { +func (t *Wrapper) InjectInboundPacketBuffer(pkt *netstack_PacketBuffer, buffs [][]byte, sizes []int) error { + if !buildfeatures.HasNetstack { + panic("unreachable") + } buf := buffs[0][PacketStartOffset:] bufN := copy(buf, pkt.NetworkHeader().Slice()) @@ -1436,7 +1459,10 @@ func (t *Wrapper) InjectOutbound(pkt []byte) error { // InjectOutboundPacketBuffer logically behaves as InjectOutbound. It takes ownership of one // reference count on the packet, and the packet may be mutated. The packet refcount will be // decremented after the injected buffer has been read. -func (t *Wrapper) InjectOutboundPacketBuffer(pkt *stack.PacketBuffer) error { +func (t *Wrapper) InjectOutboundPacketBuffer(pkt *netstack_PacketBuffer) error { + if !buildfeatures.HasNetstack { + panic("unreachable") + } size := pkt.Size() if size > MaxPacketSize { pkt.DecRef() @@ -1472,10 +1498,12 @@ func (t *Wrapper) Unwrap() tun.Device { return t.tdev } -// SetStatistics specifies a per-connection statistics aggregator. +// SetConnectionCounter specifies a per-connection statistics aggregator. // Nil may be specified to disable statistics gathering. -func (t *Wrapper) SetStatistics(stats *connstats.Statistics) { - t.stats.Store(stats) +func (t *Wrapper) SetConnectionCounter(fn netlogfunc.ConnectionCounter) { + if buildfeatures.HasNetLog { + t.connCounter.Store(fn) + } } var ( @@ -1491,5 +1519,18 @@ var ( ) func (t *Wrapper) InstallCaptureHook(cb packet.CaptureCallback) { + if !buildfeatures.HasCapture { + return + } t.captureHook.Store(cb) } + +func updateConnCounter(update netlogfunc.ConnectionCounter, b []byte, receive bool) { + var p packet.Parsed + p.Decode(b) + if receive { + update(p.IPProto, p.Dst, p.Src, 1, len(b), true) + } else { + update(p.IPProto, p.Src, p.Dst, 1, len(b), false) + } +} diff --git a/net/tstun/wrap_linux.go b/net/tstun/wrap_linux.go index 136ddfe1efb2d..7498f107b5fda 100644 --- a/net/tstun/wrap_linux.go +++ b/net/tstun/wrap_linux.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && !ts_omit_gro + package tstun import ( diff --git a/net/tstun/wrap_noop.go b/net/tstun/wrap_noop.go index c743072ca6ba2..8ad04bafe94c1 100644 --- a/net/tstun/wrap_noop.go +++ b/net/tstun/wrap_noop.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !linux +//go:build !linux || ts_omit_gro package tstun diff --git a/net/tstun/wrap_test.go b/net/tstun/wrap_test.go index 223ee34f4336a..75cf5afb21f8f 100644 --- a/net/tstun/wrap_test.go +++ b/net/tstun/wrap_test.go @@ -5,7 +5,6 @@ package tstun import ( "bytes" - "context" "encoding/binary" "encoding/hex" "expvar" @@ -27,7 +26,6 @@ import ( "gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/tcpip/stack" "tailscale.com/disco" - "tailscale.com/net/connstats" "tailscale.com/net/netaddr" "tailscale.com/net/packet" "tailscale.com/tstest" @@ -370,9 +368,8 @@ func TestFilter(t *testing.T) { }() var buf [MaxPacketSize]byte - stats := connstats.NewStatistics(0, 0, nil) - defer stats.Shutdown(context.Background()) - tun.SetStatistics(stats) + var stats netlogtype.CountsByConnection + tun.SetConnectionCounter(stats.Add) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var n int @@ -380,9 +377,10 @@ func TestFilter(t *testing.T) { var filtered bool sizes := make([]int, 1) - tunStats, _ := stats.TestExtract() + tunStats := stats.Clone() + stats.Reset() if len(tunStats) > 0 { - t.Errorf("connstats.Statistics.Extract = %v, want {}", stats) + t.Errorf("netlogtype.CountsByConnection = %v, want {}", tunStats) } if tt.dir == in { @@ -415,7 +413,8 @@ func TestFilter(t *testing.T) { } } - got, _ := stats.TestExtract() + got := stats.Clone() + stats.Reset() want := map[netlogtype.Connection]netlogtype.Counts{} var wasUDP bool if !tt.drop { diff --git a/net/udprelay/server.go b/net/udprelay/server.go index 123813c165dfc..83831dd698164 100644 --- a/net/udprelay/server.go +++ b/net/udprelay/server.go @@ -31,6 +31,7 @@ import ( "tailscale.com/net/sockopts" "tailscale.com/net/stun" "tailscale.com/net/udprelay/endpoint" + "tailscale.com/net/udprelay/status" "tailscale.com/tstime" "tailscale.com/types/key" "tailscale.com/types/logger" @@ -95,6 +96,8 @@ type serverEndpoint struct { boundAddrPorts [2]netip.AddrPort // or zero value if a handshake has never completed for that relay leg lastSeen [2]time.Time // TODO(jwhited): consider using mono.Time challenge [2][disco.BindUDPRelayChallengeLen]byte + packetsRx [2]uint64 // num packets received from/sent by each client after they are bound + bytesRx [2]uint64 // num bytes received from/sent by each client after they are bound lamportID uint64 vni uint32 @@ -223,9 +226,13 @@ func (e *serverEndpoint) handlePacket(from netip.AddrPort, gh packet.GeneveHeade switch { case from == e.boundAddrPorts[0]: e.lastSeen[0] = time.Now() + e.packetsRx[0]++ + e.bytesRx[0] += uint64(len(b)) return b, e.boundAddrPorts[1] case from == e.boundAddrPorts[1]: e.lastSeen[1] = time.Now() + e.packetsRx[1]++ + e.bytesRx[1] += uint64(len(b)) return b, e.boundAddrPorts[0] default: // unrecognized source @@ -529,6 +536,7 @@ func (s *Server) listenOn(port int) error { s.uc6 = bc s.uc6Port = uint16(portUint) } + s.logf("listening on %s:%d", network, portUint) } return nil } @@ -782,3 +790,41 @@ func (s *Server) AllocateEndpoint(discoA, discoB key.DiscoPublic) (endpoint.Serv SteadyStateLifetime: tstime.GoDuration{Duration: s.steadyStateLifetime}, }, nil } + +// extractClientInfo constructs a [status.ClientInfo] for one of the two peer +// relay clients involved in this session. +func extractClientInfo(idx int, ep *serverEndpoint) status.ClientInfo { + if idx != 0 && idx != 1 { + panic(fmt.Sprintf("idx passed to extractClientInfo() must be 0 or 1; got %d", idx)) + } + + return status.ClientInfo{ + Endpoint: ep.boundAddrPorts[idx], + ShortDisco: ep.discoPubKeys.Get()[idx].ShortString(), + PacketsTx: ep.packetsRx[idx], + BytesTx: ep.bytesRx[idx], + } +} + +// GetSessions returns a slice of peer relay session statuses, with each +// entry containing detailed info about the server and clients involved in +// each session. This information is intended for debugging/status UX, and +// should not be relied on for any purpose outside of that. +func (s *Server) GetSessions() []status.ServerSession { + s.mu.Lock() + defer s.mu.Unlock() + if s.closed { + return nil + } + var sessions = make([]status.ServerSession, 0, len(s.byDisco)) + for _, se := range s.byDisco { + c1 := extractClientInfo(0, se) + c2 := extractClientInfo(1, se) + sessions = append(sessions, status.ServerSession{ + VNI: se.vni, + Client1: c1, + Client2: c2, + }) + } + return sessions +} diff --git a/net/udprelay/status/status.go b/net/udprelay/status/status.go new file mode 100644 index 0000000000000..3866efada2542 --- /dev/null +++ b/net/udprelay/status/status.go @@ -0,0 +1,75 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package status contains types relating to the status of peer relay sessions +// between peer relay client nodes via a peer relay server. +package status + +import ( + "net/netip" +) + +// ServerStatus contains the listening UDP port and active sessions (if any) for +// this node's peer relay server at a point in time. +type ServerStatus struct { + // UDPPort is the UDP port number that the peer relay server forwards over, + // as configured by the user with 'tailscale set --relay-server-port='. + // If the port has not been configured, UDPPort will be nil. + UDPPort *int + // Sessions is a slice of detailed status information about each peer + // relay session that this node's peer relay server is involved with. It + // may be empty. + Sessions []ServerSession +} + +// ClientInfo contains status-related information about a single peer relay +// client involved in a single peer relay session. +type ClientInfo struct { + // Endpoint is the [netip.AddrPort] of this peer relay client's underlay + // endpoint participating in the session, or a zero value if the client + // has not completed a handshake. + Endpoint netip.AddrPort + // ShortDisco is a string representation of this peer relay client's disco + // public key. + // + // TODO: disco keys are pretty meaningless to end users, and they are also + // ephemeral. We really need node keys (or translation to first ts addr), + // but those are not fully plumbed into the [udprelay.Server]. Disco keys + // can also be ambiguous to a node key, but we could add node key into a + // [disco.AllocateUDPRelayEndpointRequest] in similar fashion to + // [disco.Ping]. There's also the problem of netmap trimming, where we + // can't verify a node key maps to a disco key. + ShortDisco string + // PacketsTx is the number of packets this peer relay client has sent to + // the other client via the relay server after completing a handshake. This + // is identical to the number of packets that the peer relay server has + // received from this client. + PacketsTx uint64 + // BytesTx is the total overlay bytes this peer relay client has sent to + // the other client via the relay server after completing a handshake. This + // is identical to the total overlay bytes that the peer relay server has + // received from this client. + BytesTx uint64 +} + +// ServerSession contains status information for a single session between two +// peer relay clients, which are relayed via one peer relay server. This is the +// status as seen by the peer relay server; each client node may have a +// different view of the session's current status based on connectivity and +// where the client is in the peer relay endpoint setup (allocation, binding, +// pinging, active). +type ServerSession struct { + // VNI is the Virtual Network Identifier for this peer relay session, which + // comes from the Geneve header and is unique to this session. + VNI uint32 + // Client1 contains status information about one of the two peer relay + // clients involved in this session. Note that 'Client1' does NOT mean this + // was/wasn't the allocating client, or the first client to bind, etc; this + // is just one client of two. + Client1 ClientInfo + // Client2 contains status information about one of the two peer relay + // clients involved in this session. Note that 'Client2' does NOT mean this + // was/wasn't the allocating client, or the second client to bind, etc; this + // is just one client of two. + Client2 ClientInfo +} diff --git a/net/wsconn/wsconn.go b/net/wsconn/wsconn.go index 22b511ea81273..3c83ffd8c320f 100644 --- a/net/wsconn/wsconn.go +++ b/net/wsconn/wsconn.go @@ -2,9 +2,7 @@ // SPDX-License-Identifier: BSD-3-Clause // Package wsconn contains an adapter type that turns -// a websocket connection into a net.Conn. It a temporary fork of the -// netconn.go file from the github.com/coder/websocket package while we wait for -// https://github.com/nhooyr/websocket/pull/350 to be merged. +// a websocket connection into a net.Conn. package wsconn import ( diff --git a/prober/derp.go b/prober/derp.go index c7a82317dcabc..52e56fd4eff1e 100644 --- a/prober/derp.go +++ b/prober/derp.go @@ -8,6 +8,7 @@ import ( "cmp" "context" crand "crypto/rand" + "crypto/tls" "encoding/binary" "encoding/json" "errors" @@ -68,7 +69,7 @@ type derpProber struct { ProbeMap ProbeClass // Probe classes for probing individual derpers. - tlsProbeFn func(string) ProbeClass + tlsProbeFn func(string, *tls.Config) ProbeClass udpProbeFn func(string, int) ProbeClass meshProbeFn func(string, string) ProbeClass bwProbeFn func(string, string, int64) ProbeClass @@ -206,7 +207,7 @@ func (d *derpProber) probeMapFn(ctx context.Context) error { if d.probes[n] == nil { log.Printf("adding DERP TLS probe for %s (%s) every %v", server.Name, region.RegionName, d.tlsInterval) derpPort := cmp.Or(server.DERPPort, 443) - d.probes[n] = d.p.Run(n, d.tlsInterval, labels, d.tlsProbeFn(fmt.Sprintf("%s:%d", server.HostName, derpPort))) + d.probes[n] = d.p.Run(n, d.tlsInterval, labels, d.tlsProbeFn(fmt.Sprintf("%s:%d", server.HostName, derpPort), nil)) } } diff --git a/prober/derp_test.go b/prober/derp_test.go index 93b8d760b3f18..08a65d6978f13 100644 --- a/prober/derp_test.go +++ b/prober/derp_test.go @@ -16,6 +16,7 @@ import ( "tailscale.com/derp" "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/net/netmon" "tailscale.com/tailcfg" "tailscale.com/types/key" @@ -74,7 +75,7 @@ func TestDerpProber(t *testing.T) { p: p, derpMapURL: srv.URL, tlsInterval: time.Second, - tlsProbeFn: func(_ string) ProbeClass { return FuncProbe(func(context.Context) error { return nil }) }, + tlsProbeFn: func(_ string, _ *tls.Config) ProbeClass { return FuncProbe(func(context.Context) error { return nil }) }, udpInterval: time.Second, udpProbeFn: func(_ string, _ int) ProbeClass { return FuncProbe(func(context.Context) error { return nil }) }, meshInterval: time.Second, @@ -145,12 +146,12 @@ func TestDerpProber(t *testing.T) { func TestRunDerpProbeNodePair(t *testing.T) { // os.Setenv("DERP_DEBUG_LOGS", "true") serverPrivateKey := key.NewNode() - s := derp.NewServer(serverPrivateKey, t.Logf) + s := derpserver.New(serverPrivateKey, t.Logf) defer s.Close() httpsrv := &http.Server{ TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), - Handler: derphttp.Handler(s), + Handler: derpserver.Handler(s), } ln, err := net.Listen("tcp4", "localhost:0") if err != nil { diff --git a/prober/dns_example_test.go b/prober/dns_example_test.go index a8326fd721232..089816919489a 100644 --- a/prober/dns_example_test.go +++ b/prober/dns_example_test.go @@ -5,6 +5,7 @@ package prober_test import ( "context" + "crypto/tls" "flag" "fmt" "log" @@ -40,7 +41,7 @@ func ExampleForEachAddr() { // This function is called every time we discover a new IP address to check. makeTLSProbe := func(addr netip.Addr) []*prober.Probe { - pf := prober.TLSWithIP(*hostname, netip.AddrPortFrom(addr, 443)) + pf := prober.TLSWithIP(netip.AddrPortFrom(addr, 443), &tls.Config{ServerName: *hostname}) if *verbose { logger := logger.WithPrefix(log.Printf, fmt.Sprintf("[tls %s]: ", addr)) pf = probeLogWrapper(logger, pf) diff --git a/prober/prober.go b/prober/prober.go index af0e199343b2d..9073a95029163 100644 --- a/prober/prober.go +++ b/prober/prober.go @@ -317,7 +317,7 @@ func (p *Probe) loop() { p.run() // Wait and then retry if probe fails. We use the inverse of the // configured negative interval as our sleep period. - // TODO(percy):implement exponential backoff, possibly using logtail/backoff. + // TODO(percy):implement exponential backoff, possibly using util/backoff. select { case <-time.After(-1 * p.interval): p.run() diff --git a/prober/tls.go b/prober/tls.go index 4fb4aa9c6becf..3ce5354357d71 100644 --- a/prober/tls.go +++ b/prober/tls.go @@ -7,14 +7,13 @@ import ( "context" "crypto/tls" "crypto/x509" + "errors" "fmt" "io" - "net" "net/http" "net/netip" + "slices" "time" - - "tailscale.com/util/multierr" ) const expiresSoon = 7 * 24 * time.Hour // 7 days from now @@ -28,33 +27,31 @@ const letsEncryptStartedStaplingCRL int64 = 1746576000 // 2025-05-07 00:00:00 UT // The ProbeFunc connects to a hostPort (host:port string), does a TLS // handshake, verifies that the hostname matches the presented certificate, // checks certificate validity time and OCSP revocation status. -func TLS(hostPort string) ProbeClass { +// +// The TLS config is optional and may be nil. +func TLS(hostPort string, config *tls.Config) ProbeClass { return ProbeClass{ Probe: func(ctx context.Context) error { - certDomain, _, err := net.SplitHostPort(hostPort) - if err != nil { - return err - } - return probeTLS(ctx, certDomain, hostPort) + return probeTLS(ctx, config, hostPort) }, Class: "tls", } } -// TLSWithIP is like TLS, but dials the provided dialAddr instead -// of using DNS resolution. The certDomain is the expected name in -// the cert (and the SNI name to send). -func TLSWithIP(certDomain string, dialAddr netip.AddrPort) ProbeClass { +// TLSWithIP is like TLS, but dials the provided dialAddr instead of using DNS +// resolution. Use config.ServerName to send SNI and validate the name in the +// cert. +func TLSWithIP(dialAddr netip.AddrPort, config *tls.Config) ProbeClass { return ProbeClass{ Probe: func(ctx context.Context) error { - return probeTLS(ctx, certDomain, dialAddr.String()) + return probeTLS(ctx, config, dialAddr.String()) }, Class: "tls", } } -func probeTLS(ctx context.Context, certDomain string, dialHostPort string) error { - dialer := &tls.Dialer{Config: &tls.Config{ServerName: certDomain}} +func probeTLS(ctx context.Context, config *tls.Config, dialHostPort string) error { + dialer := &tls.Dialer{Config: config} conn, err := dialer.DialContext(ctx, "tcp", dialHostPort) if err != nil { return fmt.Errorf("connecting to %q: %w", dialHostPort, err) @@ -71,7 +68,7 @@ func probeTLS(ctx context.Context, certDomain string, dialHostPort string) error func validateConnState(ctx context.Context, cs *tls.ConnectionState) (returnerr error) { var errs []error defer func() { - returnerr = multierr.New(errs...) + returnerr = errors.Join(errs...) }() latestAllowedExpiration := time.Now().Add(expiresSoon) @@ -108,6 +105,10 @@ func validateConnState(ctx context.Context, cs *tls.ConnectionState) (returnerr } if len(leafCert.CRLDistributionPoints) == 0 { + if !slices.Contains(leafCert.Issuer.Organization, "Let's Encrypt") { + // LE certs contain a CRL, but certs from other CAs might not. + return + } if leafCert.NotBefore.Before(time.Unix(letsEncryptStartedStaplingCRL, 0)) { // Certificate might not have a CRL. return diff --git a/prober/tls_test.go b/prober/tls_test.go index f6ca4aeb19be6..86fba91b98836 100644 --- a/prober/tls_test.go +++ b/prober/tls_test.go @@ -83,7 +83,7 @@ func TestTLSConnection(t *testing.T) { srv.StartTLS() defer srv.Close() - err = probeTLS(context.Background(), "fail.example.com", srv.Listener.Addr().String()) + err = probeTLS(context.Background(), &tls.Config{ServerName: "fail.example.com"}, srv.Listener.Addr().String()) // The specific error message here is platform-specific ("certificate is not trusted" // on macOS and "certificate signed by unknown authority" on Linux), so only check // that it contains the word 'certificate'. @@ -269,40 +269,54 @@ func TestCRL(t *testing.T) { name string cert *x509.Certificate crlBytes []byte + issuer pkix.Name wantErr string }{ { "ValidCert", leafCertParsed, emptyRlBytes, + caCert.Issuer, "", }, { "RevokedCert", leafCertParsed, rlBytes, + caCert.Issuer, "has been revoked on", }, { "EmptyCRL", leafCertParsed, emptyRlBytes, + caCert.Issuer, "", }, { - "NoCRL", + "NoCRLLetsEncrypt", leafCertParsed, nil, + pkix.Name{CommonName: "tlsprobe.test", Organization: []string{"Let's Encrypt"}}, "no CRL server presented in leaf cert for", }, + { + "NoCRLOtherCA", + leafCertParsed, + nil, + caCert.Issuer, + "", + }, { "NotBeforeCRLStaplingDate", noCRLStapledParsed, nil, + caCert.Issuer, "", }, } { t.Run(tt.name, func(t *testing.T) { + tt.cert.Issuer = tt.issuer cs := &tls.ConnectionState{PeerCertificates: []*x509.Certificate{tt.cert, caCert}} if tt.crlBytes != nil { crlServer.crlBytes = tt.crlBytes diff --git a/pull-toolchain.sh b/pull-toolchain.sh index f5a19e7d75de1..eb8febf6bb32d 100755 --- a/pull-toolchain.sh +++ b/pull-toolchain.sh @@ -11,6 +11,10 @@ if [ "$upstream" != "$current" ]; then echo "$upstream" >go.toolchain.rev fi -if [ -n "$(git diff-index --name-only HEAD -- go.toolchain.rev)" ]; then +./tool/go version 2>/dev/null | awk '{print $3}' | sed 's/^go//' > go.toolchain.version + +./update-flake.sh + +if [ -n "$(git diff-index --name-only HEAD -- go.toolchain.rev go.toolchain.rev.sri go.toolchain.version)" ]; then echo "pull-toolchain.sh: changes imported. Use git commit to make them permanent." >&2 fi diff --git a/release/dist/dist.go b/release/dist/dist.go index 802d9041bab23..6fb0102993cbd 100644 --- a/release/dist/dist.go +++ b/release/dist/dist.go @@ -20,7 +20,6 @@ import ( "sync" "time" - "tailscale.com/util/multierr" "tailscale.com/version/mkversion" ) @@ -176,7 +175,7 @@ func (b *Build) Build(targets []Target) (files []string, err error) { } sort.Strings(files) - return files, multierr.New(errs...) + return files, errors.Join(errs...) } // Once runs fn if Once hasn't been called with name before. diff --git a/safesocket/safesocket.go b/safesocket/safesocket.go index 721b694dcf86c..287cdca599f77 100644 --- a/safesocket/safesocket.go +++ b/safesocket/safesocket.go @@ -11,6 +11,9 @@ import ( "net" "runtime" "time" + + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" ) type closeable interface { @@ -31,7 +34,8 @@ func ConnCloseWrite(c net.Conn) error { } var processStartTime = time.Now() -var tailscaledProcExists = func() bool { return false } // set by safesocket_ps.go + +var tailscaledProcExists feature.Hook[func() bool] // tailscaledStillStarting reports whether tailscaled is probably // still starting up. That is, it reports whether the caller should @@ -50,7 +54,8 @@ func tailscaledStillStarting() bool { if d > 5*time.Second { return false } - return tailscaledProcExists() + f, ok := tailscaledProcExists.GetOk() + return ok && f() } // ConnectContext connects to tailscaled using a unix socket or named pipe. @@ -104,7 +109,12 @@ func LocalTCPPortAndToken() (port int, token string, err error) { // PlatformUsesPeerCreds reports whether the current platform uses peer credentials // to authenticate connections. -func PlatformUsesPeerCreds() bool { return GOOSUsesPeerCreds(runtime.GOOS) } +func PlatformUsesPeerCreds() bool { + if !buildfeatures.HasUnixSocketIdentity { + return false + } + return GOOSUsesPeerCreds(runtime.GOOS) +} // GOOSUsesPeerCreds is like PlatformUsesPeerCreds but takes a // runtime.GOOS value instead of using the current one. diff --git a/safesocket/safesocket_ps.go b/safesocket/safesocket_ps.go index 48a8dd483478b..d3f409df58d15 100644 --- a/safesocket/safesocket_ps.go +++ b/safesocket/safesocket_ps.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build (linux && !android) || windows || (darwin && !ios) || freebsd +//go:build ((linux && !android) || windows || (darwin && !ios) || freebsd) && !ts_omit_cliconndiag package safesocket @@ -12,7 +12,7 @@ import ( ) func init() { - tailscaledProcExists = func() bool { + tailscaledProcExists.Set(func() bool { procs, err := ps.Processes() if err != nil { return false @@ -30,5 +30,5 @@ func init() { } } return false - } + }) } diff --git a/scripts/installer.sh b/scripts/installer.sh index 4d968cd2b7285..b40177005821b 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -110,6 +110,17 @@ main() { APT_KEY_TYPE="keyring" fi ;; + industrial-os) + OS="debian" + PACKAGETYPE="apt" + if [ "$(printf %.1s "$VERSION_ID")" -lt 5 ]; then + VERSION="buster" + APT_KEY_TYPE="legacy" + else + VERSION="bullseye" + APT_KEY_TYPE="keyring" + fi + ;; parrot|mendel) OS="debian" PACKAGETYPE="apt" diff --git a/sessionrecording/connect.go b/sessionrecording/connect.go index dc697d071dad2..8abf9dd7e9142 100644 --- a/sessionrecording/connect.go +++ b/sessionrecording/connect.go @@ -7,7 +7,6 @@ package sessionrecording import ( "context" - "crypto/tls" "encoding/json" "errors" "fmt" @@ -19,11 +18,9 @@ import ( "sync/atomic" "time" - "golang.org/x/net/http2" "tailscale.com/net/netx" "tailscale.com/tailcfg" "tailscale.com/util/httpm" - "tailscale.com/util/multierr" ) const ( @@ -93,7 +90,7 @@ func ConnectToRecorder(ctx context.Context, recs []netip.AddrPort, dial netx.Dia } return pw, attempts, errChan, nil } - return nil, attempts, nil, multierr.New(errs...) + return nil, attempts, nil, errors.Join(errs...) } // supportsV2 checks whether a recorder instance supports the /v2/record @@ -113,6 +110,97 @@ func supportsV2(ctx context.Context, hc *http.Client, ap netip.AddrPort) bool { return resp.StatusCode == http.StatusOK && resp.ProtoMajor > 1 } +// supportsEvent checks whether a recorder instance supports the /v2/event +// endpoint. +func supportsEvent(ctx context.Context, hc *http.Client, ap netip.AddrPort) (bool, error) { + ctx, cancel := context.WithTimeout(ctx, http2ProbeTimeout) + defer cancel() + req, err := http.NewRequestWithContext(ctx, httpm.HEAD, fmt.Sprintf("http://%s/v2/event", ap), nil) + if err != nil { + return false, err + } + resp, err := hc.Do(req) + if err != nil { + return false, err + } + + defer resp.Body.Close() + + if resp.StatusCode == http.StatusOK { + return true, nil + } + + if resp.StatusCode != http.StatusNotFound { + body, err := io.ReadAll(resp.Body) + if err != nil { + // Handle the case where reading the body itself fails + return false, fmt.Errorf("server returned non-OK status: %s, and failed to read body: %w", resp.Status, err) + } + + return false, fmt.Errorf("server returned non-OK status: %d: %s", resp.StatusCode, string(body)) + } + + return false, nil +} + +const addressNotSupportEventv2 = `recorder at address %q does not support "/v2/event" endpoint` + +type EventAPINotSupportedErr struct { + ap netip.AddrPort +} + +func (e EventAPINotSupportedErr) Error() string { + return fmt.Sprintf(addressNotSupportEventv2, e.ap) +} + +// SendEvent sends an event the tsrecorders /v2/event endpoint. +func SendEvent(ap netip.AddrPort, event io.Reader, dial netx.DialFunc) (retErr error) { + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + if retErr != nil { + cancel() + } + }() + + client := clientHTTP1(ctx, dial) + + supported, err := supportsEvent(ctx, client, ap) + if err != nil { + return fmt.Errorf("error checking support for `/v2/event` endpoint: %w", err) + } + + if !supported { + return EventAPINotSupportedErr{ + ap: ap, + } + } + + req, err := http.NewRequestWithContext(ctx, "POST", fmt.Sprintf("http://%s/v2/event", ap.String()), event) + if err != nil { + return fmt.Errorf("error creating request: %w", err) + } + + req.Header.Set("Content-Type", "application/json") + + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("error sending request: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + // Handle the case where reading the body itself fails + return fmt.Errorf("server returned non-OK status: %s, and failed to read body: %w", resp.Status, err) + } + + return fmt.Errorf("server returned non-OK status: %d: %s", resp.StatusCode, string(body)) + } + + return nil +} + // connectV1 connects to the legacy /record endpoint on the recorder. It is // used for backwards-compatibility with older tsrecorder instances. // @@ -312,14 +400,15 @@ func clientHTTP1(dialCtx context.Context, dial netx.DialFunc) *http.Client { // requests (HTTP/2 over plaintext). Unfortunately the same client does not // work for HTTP/1 so we need to split these up. func clientHTTP2(dialCtx context.Context, dial netx.DialFunc) *http.Client { + var p http.Protocols + p.SetUnencryptedHTTP2(true) return &http.Client{ - Transport: &http2.Transport{ - // Allow "http://" scheme in URLs. - AllowHTTP: true, + Transport: &http.Transport{ + Protocols: &p, // Pretend like we're using TLS, but actually use the provided // DialFunc underneath. This is necessary to convince the transport // to actually dial. - DialTLSContext: func(ctx context.Context, network, addr string, _ *tls.Config) (net.Conn, error) { + DialTLSContext: func(ctx context.Context, network, addr string) (net.Conn, error) { perAttemptCtx, cancel := context.WithTimeout(ctx, perDialAttemptTimeout) defer cancel() go func() { diff --git a/sessionrecording/connect_test.go b/sessionrecording/connect_test.go index c0fcf6d40c617..cacf061d79b79 100644 --- a/sessionrecording/connect_test.go +++ b/sessionrecording/connect_test.go @@ -9,11 +9,13 @@ import ( "crypto/rand" "crypto/sha256" "encoding/json" + "fmt" "io" "net" "net/http" "net/http/httptest" "net/netip" + "strings" "testing" "time" @@ -148,9 +150,9 @@ func TestConnectToRecorder(t *testing.T) { // Wire up h2c-compatible HTTP/2 server. This is optional // because the v1 recorder didn't support HTTP/2 and we try to // mimic that. - h2s := &http2.Server{} - srv.Config.Handler = h2c.NewHandler(mux, h2s) - if err := http2.ConfigureServer(srv.Config, h2s); err != nil { + s := &http2.Server{} + srv.Config.Handler = h2c.NewHandler(mux, s) + if err := http2.ConfigureServer(srv.Config, s); err != nil { t.Errorf("configuring HTTP/2 support in server: %v", err) } } @@ -187,3 +189,97 @@ func TestConnectToRecorder(t *testing.T) { }) } } + +func TestSendEvent(t *testing.T) { + t.Run("supported", func(t *testing.T) { + eventBody := `{"foo":"bar"}` + eventRecieved := make(chan []byte, 1) + mux := http.NewServeMux() + mux.HandleFunc("HEAD /v2/event", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + mux.HandleFunc("POST /v2/event", func(w http.ResponseWriter, r *http.Request) { + body, err := io.ReadAll(r.Body) + if err != nil { + t.Error(err) + } + eventRecieved <- body + w.WriteHeader(http.StatusOK) + }) + + srv := httptest.NewUnstartedServer(mux) + s := &http2.Server{} + srv.Config.Handler = h2c.NewHandler(mux, s) + if err := http2.ConfigureServer(srv.Config, s); err != nil { + t.Fatalf("configuring HTTP/2 support in server: %v", err) + } + srv.Start() + t.Cleanup(srv.Close) + + d := new(net.Dialer) + addr := netip.MustParseAddrPort(srv.Listener.Addr().String()) + err := SendEvent(addr, bytes.NewBufferString(eventBody), d.DialContext) + if err != nil { + t.Fatalf("SendEvent: %v", err) + } + + if recv := string(<-eventRecieved); recv != eventBody { + t.Errorf("mismatch in event body, sent %q, received %q", eventBody, recv) + } + }) + + t.Run("not_supported", func(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("HEAD /v2/event", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + }) + + srv := httptest.NewUnstartedServer(mux) + s := &http2.Server{} + srv.Config.Handler = h2c.NewHandler(mux, s) + if err := http2.ConfigureServer(srv.Config, s); err != nil { + t.Fatalf("configuring HTTP/2 support in server: %v", err) + } + srv.Start() + t.Cleanup(srv.Close) + + d := new(net.Dialer) + addr := netip.MustParseAddrPort(srv.Listener.Addr().String()) + err := SendEvent(addr, nil, d.DialContext) + if err == nil { + t.Fatal("expected an error, got nil") + } + if !strings.Contains(err.Error(), fmt.Sprintf(addressNotSupportEventv2, srv.Listener.Addr().String())) { + t.Fatalf("unexpected error: %v", err) + } + }) + + t.Run("server_error", func(t *testing.T) { + mux := http.NewServeMux() + mux.HandleFunc("HEAD /v2/event", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + mux.HandleFunc("POST /v2/event", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + }) + + srv := httptest.NewUnstartedServer(mux) + s := &http2.Server{} + srv.Config.Handler = h2c.NewHandler(mux, s) + if err := http2.ConfigureServer(srv.Config, s); err != nil { + t.Fatalf("configuring HTTP/2 support in server: %v", err) + } + srv.Start() + t.Cleanup(srv.Close) + + d := new(net.Dialer) + addr := netip.MustParseAddrPort(srv.Listener.Addr().String()) + err := SendEvent(addr, nil, d.DialContext) + if err == nil { + t.Fatal("expected an error, got nil") + } + if !strings.Contains(err.Error(), "server returned non-OK status") { + t.Fatalf("unexpected error: %v", err) + } + }) +} diff --git a/sessionrecording/event.go b/sessionrecording/event.go new file mode 100644 index 0000000000000..8f8172cc4b303 --- /dev/null +++ b/sessionrecording/event.go @@ -0,0 +1,118 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package sessionrecording + +import ( + "net/url" + + "tailscale.com/tailcfg" +) + +const ( + KubernetesAPIEventType = "kubernetes-api-request" +) + +// Event represents the top-level structure of a tsrecorder event. +type Event struct { + // Type specifies the kind of event being recorded (e.g., "kubernetes-api-request"). + Type string `json:"type"` + + // ID is a reference of the path that this event is stored at in tsrecorder + ID string `json:"id"` + + // Timestamp is the time when the event was recorded represented as a unix timestamp. + Timestamp int64 `json:"timestamp"` + + // UserAgent is the UerAgent specified in the request, which helps identify + // the client software that initiated the request. + UserAgent string `json:"userAgent"` + + // Request holds details of the HTTP request. + Request Request `json:"request"` + + // Kubernetes contains Kubernetes-specific information about the request (if + // the type is `kubernetes-api-request`) + Kubernetes KubernetesRequestInfo `json:"kubernetes"` + + // Source provides details about the client that initiated the request. + Source Source `json:"source"` + + // Destination provides details about the node receiving the request. + Destination Destination `json:"destination"` +} + +// copied from https://github.com/kubernetes/kubernetes/blob/11ade2f7dd264c2f52a4a1342458abbbaa3cb2b1/staging/src/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go#L44 +// KubernetesRequestInfo contains Kubernetes specific information in the request (if the type is `kubernetes-api-request`) +type KubernetesRequestInfo struct { + // IsResourceRequest indicates whether or not the request is for an API resource or subresource + IsResourceRequest bool + // Path is the URL path of the request + Path string + // Verb is the kube verb associated with the request for API requests, not the http verb. This includes things like list and watch. + // for non-resource requests, this is the lowercase http verb + Verb string + + APIPrefix string + APIGroup string + APIVersion string + + Namespace string + // Resource is the name of the resource being requested. This is not the kind. For example: pods + Resource string + // Subresource is the name of the subresource being requested. This is a different resource, scoped to the parent resource, but it may have a different kind. + // For instance, /pods has the resource "pods" and the kind "Pod", while /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod" + // (because status operates on pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource "binding", and kind "Binding". + Subresource string + // Name is empty for some verbs, but if the request directly indicates a name (not in body content) then this field is filled in. + Name string + // Parts are the path parts for the request, always starting with /{resource}/{name} + Parts []string + + // FieldSelector contains the unparsed field selector from a request. It is only present if the apiserver + // honors field selectors for the verb this request is associated with. + FieldSelector string + // LabelSelector contains the unparsed field selector from a request. It is only present if the apiserver + // honors field selectors for the verb this request is associated with. + LabelSelector string +} + +type Source struct { + // Node is the FQDN of the node originating the connection. + // It is also the MagicDNS name for the node. + // It does not have a trailing dot. + // e.g. "host.tail-scale.ts.net" + Node string `json:"node"` + + // NodeID is the node ID of the node originating the connection. + NodeID tailcfg.StableNodeID `json:"nodeID"` + + // Tailscale-specific fields: + // NodeTags is the list of tags on the node originating the connection (if any). + NodeTags []string `json:"nodeTags,omitempty"` + + // NodeUserID is the user ID of the node originating the connection (if not tagged). + NodeUserID tailcfg.UserID `json:"nodeUserID,omitempty"` // if not tagged + + // NodeUser is the LoginName of the node originating the connection (if not tagged). + NodeUser string `json:"nodeUser,omitempty"` +} + +type Destination struct { + // Node is the FQDN of the node receiving the connection. + // It is also the MagicDNS name for the node. + // It does not have a trailing dot. + // e.g. "host.tail-scale.ts.net" + Node string `json:"node"` + + // NodeID is the node ID of the node receiving the connection. + NodeID tailcfg.StableNodeID `json:"nodeID"` +} + +// Request holds information about a request. +type Request struct { + Method string `json:"method"` + Path string `json:"path"` + Body []byte `json:"body"` + QueryParameters url.Values `json:"queryParameters"` +} diff --git a/sessionrecording/header.go b/sessionrecording/header.go index 545bf06bd5984..2208522168dec 100644 --- a/sessionrecording/header.go +++ b/sessionrecording/header.go @@ -62,7 +62,6 @@ type CastHeader struct { ConnectionID string `json:"connectionID"` // Fields that are only set for Kubernetes API server proxy session recordings: - Kubernetes *Kubernetes `json:"kubernetes,omitempty"` } diff --git a/shell.nix b/shell.nix index 883d71befe9d6..ec345998afe30 100644 --- a/shell.nix +++ b/shell.nix @@ -16,4 +16,4 @@ ) { src = ./.; }).shellNix -# nix-direnv cache busting line: sha256-8aE6dWMkTLdWRD9WnLVSzpOQQh61voEnjZAJHtbGCSs= +# nix-direnv cache busting line: sha256-rV3C2Vi48FCifGt58OdEO4+Av0HRIs8sUJVvp/gEBLw= diff --git a/smallzstd/testdata b/smallzstd/testdata deleted file mode 100644 index 76640fdc57df0..0000000000000 --- a/smallzstd/testdata +++ /dev/null @@ -1,14 +0,0 @@ -{"logtail":{"client_time":"2020-07-01T14:49:40.196597018-07:00","server_time":"2020-07-01T21:49:40.198371511Z"},"text":"9.8M/25.6M magicsock: starting endpoint update (periodic)\n"} -{"logtail":{"client_time":"2020-07-01T14:49:40.345925455-07:00","server_time":"2020-07-01T21:49:40.347904717Z"},"text":"9.9M/25.6M netcheck: udp=true v6=false mapvarydest=false hair=false v4a=202.188.7.1:41641 derp=2 derpdist=1v4:7ms,2v4:3ms,4v4:18ms\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.347155742-07:00","server_time":"2020-07-01T21:49:43.34828658Z"},"text":"9.9M/25.6M control: map response long-poll timed out!\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.347539333-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"9.9M/25.6M control: PollNetMap: context canceled\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.347767812-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"10.0M/25.6M control: sendStatus: mapRoutine1: state:authenticated\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.347817165-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"10.0M/25.6M blockEngineUpdates(false)\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.347989028-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"10.0M/25.6M wgcfg: [SViTM] skipping subnet route\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.349997554-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"9.3M/25.6M Received error: PollNetMap: context canceled\n"} -{"logtail":{"client_time":"2020-07-01T14:49:43.350072606-07:00","server_time":"2020-07-01T21:49:43.358809354Z"},"text":"9.3M/25.6M control: mapRoutine: backoff: 30136 msec\n"} -{"logtail":{"client_time":"2020-07-01T14:49:47.998364646-07:00","server_time":"2020-07-01T21:49:47.999333754Z"},"text":"9.5M/25.6M [W1NbE] - [UcppE] Send handshake init [127.3.3.40:1, 6.1.1.6:37388*, 10.3.2.6:41641]\n"} -{"logtail":{"client_time":"2020-07-01T14:49:47.99881914-07:00","server_time":"2020-07-01T21:49:48.009859543Z"},"text":"9.6M/25.6M magicsock: adding connection to derp-1 for [W1NbE]\n"} -{"logtail":{"client_time":"2020-07-01T14:49:47.998904932-07:00","server_time":"2020-07-01T21:49:48.009859543Z"},"text":"9.6M/25.6M magicsock: 2 active derp conns: derp-1=cr0s,wr0s derp-2=cr16h0m0s,wr14h38m0s\n"} -{"logtail":{"client_time":"2020-07-01T14:49:47.999045606-07:00","server_time":"2020-07-01T21:49:48.009859543Z"},"text":"9.6M/25.6M derphttp.Client.Recv: connecting to derp-1 (nyc)\n"} -{"logtail":{"client_time":"2020-07-01T14:49:48.091104119-07:00","server_time":"2020-07-01T21:49:48.09280535Z"},"text":"9.6M/25.6M magicsock: rx [W1NbE] from 6.1.1.6:37388 (1/3), set as new priority\n"} diff --git a/smallzstd/zstd.go b/smallzstd/zstd.go deleted file mode 100644 index 1d80854224359..0000000000000 --- a/smallzstd/zstd.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// Package smallzstd produces zstd encoders and decoders optimized for -// low memory usage, at the expense of compression efficiency. -// -// This package is optimized primarily for the memory cost of -// compressing and decompressing data. We reduce this cost in two -// major ways: disable parallelism within the library (i.e. don't use -// multiple CPU cores to decompress), and drop the compression window -// down from the defaults of 4-16MiB, to 8kiB. -// -// Decompressors cost 2x the window size in RAM to run, so by using an -// 8kiB window, we can run ~1000 more decompressors per unit of memory -// than with the defaults. -// -// Depending on context, the benefit is either being able to run more -// decoders (e.g. in our logs processing system), or having a lower -// memory footprint when using compression in network protocols -// (e.g. in tailscaled, which should have a minimal RAM cost). -package smallzstd - -import ( - "io" - - "github.com/klauspost/compress/zstd" -) - -// WindowSize is the window size used for zstd compression. Decoder -// memory usage scales linearly with WindowSize. -const WindowSize = 8 << 10 // 8kiB - -// NewDecoder returns a zstd.Decoder configured for low memory usage, -// at the expense of decompression performance. -func NewDecoder(r io.Reader, options ...zstd.DOption) (*zstd.Decoder, error) { - defaults := []zstd.DOption{ - // Default is GOMAXPROCS, which costs many KiB in stacks. - zstd.WithDecoderConcurrency(1), - // Default is to allocate more upfront for performance. We - // prefer lower memory use and a bit of GC load. - zstd.WithDecoderLowmem(true), - // You might expect to see zstd.WithDecoderMaxMemory - // here. However, it's not terribly safe to use if you're - // doing stateless decoding, because it sets the maximum - // amount of memory the decompressed data can occupy, rather - // than the window size of the zstd stream. This means a very - // compressible piece of data might violate the max memory - // limit here, even if the window size (and thus total memory - // required to decompress the data) is small. - // - // As a result, we don't set a decoder limit here, and rely on - // the encoder below producing "cheap" streams. Callers are - // welcome to set their own max memory setting, if - // contextually there is a clearly correct value (e.g. it's - // known from the upper layer protocol that the decoded data - // can never be more than 1MiB). - } - - return zstd.NewReader(r, append(defaults, options...)...) -} - -// NewEncoder returns a zstd.Encoder configured for low memory usage, -// both during compression and at decompression time, at the expense -// of performance and compression efficiency. -func NewEncoder(w io.Writer, options ...zstd.EOption) (*zstd.Encoder, error) { - defaults := []zstd.EOption{ - // Default is GOMAXPROCS, which costs many KiB in stacks. - zstd.WithEncoderConcurrency(1), - // Default is several MiB, which bloats both encoders and - // their corresponding decoders. - zstd.WithWindowSize(WindowSize), - // Encode zero-length inputs in a way that the `zstd` utility - // can read, because interoperability is handy. - zstd.WithZeroFrames(true), - } - - return zstd.NewWriter(w, append(defaults, options...)...) -} diff --git a/smallzstd/zstd_test.go b/smallzstd/zstd_test.go deleted file mode 100644 index d1225bfac6058..0000000000000 --- a/smallzstd/zstd_test.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package smallzstd - -import ( - "os" - "testing" - - "github.com/klauspost/compress/zstd" -) - -func BenchmarkSmallEncoder(b *testing.B) { - benchEncoder(b, func() (*zstd.Encoder, error) { return NewEncoder(nil) }) -} - -func BenchmarkSmallEncoderWithBuild(b *testing.B) { - benchEncoderWithConstruction(b, func() (*zstd.Encoder, error) { return NewEncoder(nil) }) -} - -func BenchmarkStockEncoder(b *testing.B) { - benchEncoder(b, func() (*zstd.Encoder, error) { return zstd.NewWriter(nil) }) -} - -func BenchmarkStockEncoderWithBuild(b *testing.B) { - benchEncoderWithConstruction(b, func() (*zstd.Encoder, error) { return zstd.NewWriter(nil) }) -} - -func BenchmarkSmallDecoder(b *testing.B) { - benchDecoder(b, func() (*zstd.Decoder, error) { return NewDecoder(nil) }) -} - -func BenchmarkSmallDecoderWithBuild(b *testing.B) { - benchDecoderWithConstruction(b, func() (*zstd.Decoder, error) { return NewDecoder(nil) }) -} - -func BenchmarkStockDecoder(b *testing.B) { - benchDecoder(b, func() (*zstd.Decoder, error) { return zstd.NewReader(nil) }) -} - -func BenchmarkStockDecoderWithBuild(b *testing.B) { - benchDecoderWithConstruction(b, func() (*zstd.Decoder, error) { return zstd.NewReader(nil) }) -} - -func benchEncoder(b *testing.B, mk func() (*zstd.Encoder, error)) { - b.ReportAllocs() - - in := testdata(b) - out := make([]byte, 0, 10<<10) // 10kiB - - e, err := mk() - if err != nil { - b.Fatalf("making encoder: %v", err) - } - - b.ResetTimer() - for range b.N { - e.EncodeAll(in, out) - } -} - -func benchEncoderWithConstruction(b *testing.B, mk func() (*zstd.Encoder, error)) { - b.ReportAllocs() - - in := testdata(b) - out := make([]byte, 0, 10<<10) // 10kiB - - b.ResetTimer() - for range b.N { - e, err := mk() - if err != nil { - b.Fatalf("making encoder: %v", err) - } - - e.EncodeAll(in, out) - } -} - -func benchDecoder(b *testing.B, mk func() (*zstd.Decoder, error)) { - b.ReportAllocs() - - in := compressedTestdata(b) - out := make([]byte, 0, 10<<10) - - d, err := mk() - if err != nil { - b.Fatalf("creating decoder: %v", err) - } - - b.ResetTimer() - for range b.N { - d.DecodeAll(in, out) - } -} - -func benchDecoderWithConstruction(b *testing.B, mk func() (*zstd.Decoder, error)) { - b.ReportAllocs() - - in := compressedTestdata(b) - out := make([]byte, 0, 10<<10) - - b.ResetTimer() - for range b.N { - d, err := mk() - if err != nil { - b.Fatalf("creating decoder: %v", err) - } - - d.DecodeAll(in, out) - } -} - -func testdata(b *testing.B) []byte { - b.Helper() - in, err := os.ReadFile("testdata") - if err != nil { - b.Fatalf("reading testdata: %v", err) - } - return in -} - -func compressedTestdata(b *testing.B) []byte { - b.Helper() - uncomp := testdata(b) - e, err := NewEncoder(nil) - if err != nil { - b.Fatalf("creating encoder: %v", err) - } - return e.EncodeAll(uncomp, nil) -} diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go index b249a10639c30..7d12ab45f8552 100644 --- a/ssh/tailssh/tailssh.go +++ b/ssh/tailssh/tailssh.go @@ -32,7 +32,6 @@ import ( gossh "golang.org/x/crypto/ssh" "tailscale.com/envknob" "tailscale.com/ipn/ipnlocal" - "tailscale.com/logtail/backoff" "tailscale.com/net/tsaddr" "tailscale.com/net/tsdial" "tailscale.com/sessionrecording" @@ -41,6 +40,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/netmap" + "tailscale.com/util/backoff" "tailscale.com/util/clientmetric" "tailscale.com/util/httpm" "tailscale.com/util/mak" diff --git a/ssh/tailssh/tailssh_test.go b/ssh/tailssh/tailssh_test.go index 96fb87f4903c0..3b6d3c52c391c 100644 --- a/ssh/tailssh/tailssh_test.go +++ b/ssh/tailssh/tailssh_test.go @@ -36,6 +36,7 @@ import ( gossh "golang.org/x/crypto/ssh" "golang.org/x/net/http2" "golang.org/x/net/http2/h2c" + "tailscale.com/cmd/testwrapper/flakytest" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/store/mem" "tailscale.com/net/memnet" @@ -489,6 +490,8 @@ func newSSHRule(action *tailcfg.SSHAction) *tailcfg.SSHRule { } func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/7707") + if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { t.Skipf("skipping on %q; only runs on linux and darwin", runtime.GOOS) } @@ -1062,7 +1065,7 @@ func TestSSHAuthFlow(t *testing.T) { func TestSSH(t *testing.T) { logf := tstest.WhileTestRunningLogger(t) sys := tsd.NewSystem() - eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker(), sys.UserMetricsRegistry(), sys.Bus.Get()) + eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker.Get(), sys.UserMetricsRegistry(), sys.Bus.Get()) if err != nil { t.Fatal(err) } diff --git a/tailcfg/c2ntypes.go b/tailcfg/c2ntypes.go index 66f95785c4a83..d78baef1c29a4 100644 --- a/tailcfg/c2ntypes.go +++ b/tailcfg/c2ntypes.go @@ -5,7 +5,10 @@ package tailcfg -import "net/netip" +import ( + "encoding/json" + "net/netip" +) // C2NSSHUsernamesRequest is the request for the /ssh/usernames. // A GET request without a request body is equivalent to the zero value of this type. @@ -117,3 +120,29 @@ type C2NVIPServicesResponse struct { // changes. This value matches what is reported in latest [Hostinfo.ServicesHash]. ServicesHash string } + +// C2NDebugNetmapRequest is the request (from control to node) for the +// /debug/netmap handler. +type C2NDebugNetmapRequest struct { + // Candidate is an optional full MapResponse to be used for generating a candidate + // network map. If unset, only the current network map is returned. + Candidate *MapResponse `json:"candidate,omitzero"` + + // OmitFields is an optional list of netmap fields to omit from the response. + // If unset, no fields are omitted. + OmitFields []string `json:"omitFields,omitzero"` +} + +// C2NDebugNetmapResponse is the response (from node to control) from the +// /debug/netmap handler. It contains the current network map and, if a +// candidate full MapResponse was provided in the request, a candidate network +// map generated from it. +// To avoid import cycles, and reflect the non-stable nature of +// netmap.NetworkMap values, they are returned as json.RawMessage. +type C2NDebugNetmapResponse struct { + // Current is the current network map (netmap.NetworkMap). + Current json.RawMessage `json:"current"` + + // Candidate is a network map produced based on the candidate MapResponse. + Candidate json.RawMessage `json:"candidate,omitzero"` +} diff --git a/tailcfg/proto_port_range.go b/tailcfg/proto_port_range.go index f65c58804d44d..03505dbd131e7 100644 --- a/tailcfg/proto_port_range.go +++ b/tailcfg/proto_port_range.go @@ -5,7 +5,6 @@ package tailcfg import ( "errors" - "fmt" "strconv" "strings" @@ -70,14 +69,7 @@ func (ppr ProtoPortRange) String() string { buf.Write(text) buf.Write([]byte(":")) } - pr := ppr.Ports - if pr.First == pr.Last { - fmt.Fprintf(&buf, "%d", pr.First) - } else if pr == PortRangeAny { - buf.WriteByte('*') - } else { - fmt.Fprintf(&buf, "%d-%d", pr.First, pr.Last) - } + buf.WriteString(ppr.Ports.String()) return buf.String() } @@ -104,7 +96,7 @@ func parseProtoPortRange(ipProtoPort string) (*ProtoPortRange, error) { if !strings.Contains(ipProtoPort, ":") { ipProtoPort = "*:" + ipProtoPort } - protoStr, portRange, err := parseHostPortRange(ipProtoPort) + protoStr, portRange, err := ParseHostPortRange(ipProtoPort) if err != nil { return nil, err } @@ -126,9 +118,9 @@ func parseProtoPortRange(ipProtoPort string) (*ProtoPortRange, error) { return ppr, nil } -// parseHostPortRange parses hostport as HOST:PORTS where HOST is +// ParseHostPortRange parses hostport as HOST:PORTS where HOST is // returned unchanged and PORTS is is either "*" or PORTLOW-PORTHIGH ranges. -func parseHostPortRange(hostport string) (host string, ports PortRange, err error) { +func ParseHostPortRange(hostport string) (host string, ports PortRange, err error) { hostport = strings.ToLower(hostport) colon := strings.LastIndexByte(hostport, ':') if colon < 0 { diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 94d0b19d5c700..a95d0559c2bec 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -5,7 +5,7 @@ // the node and the coordination server. package tailcfg -//go:generate go run tailscale.com/cmd/viewer --type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService --clonefunc +//go:generate go run tailscale.com/cmd/viewer --type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService,SSHPolicy --clonefunc import ( "bytes" @@ -17,9 +17,11 @@ import ( "net/netip" "reflect" "slices" + "strconv" "strings" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/types/dnstype" "tailscale.com/types/key" "tailscale.com/types/opt" @@ -170,7 +172,12 @@ type CapabilityVersion int // - 123: 2025-07-28: fix deadlock regression from cryptokey routing change (issue #16651) // - 124: 2025-08-08: removed NodeAttrDisableMagicSockCryptoRouting support, crypto routing is now mandatory // - 125: 2025-08-11: dnstype.Resolver adds UseWithExitNode field. -const CurrentCapabilityVersion CapabilityVersion = 125 +// - 126: 2025-09-17: Client uses seamless key renewal unless disabled by control (tailscale/corp#31479) +// - 127: 2025-09-19: can handle C2N /debug/netmap. +// - 128: 2025-10-02: can handle C2N /debug/health. +// - 129: 2025-10-04: Fixed sleep/wake deadlock in magicsock when using peer relay (PR #17449) +// - 130: 2025-10-06: client can send key.HardwareAttestationPublic and key.HardwareAttestationKeySignature in MapRequest +const CurrentCapabilityVersion CapabilityVersion = 130 // ID is an integer ID for a user, node, or login allocated by the // control plane. @@ -921,6 +928,10 @@ type TPMInfo struct { // https://trustedcomputinggroup.org/resource/tpm-library-specification/. // Before revision 184, TCG used the "01.83" format for revision 183. SpecRevision int `json:",omitempty"` + + // FamilyIndicator is the TPM spec family, like "2.0". + // Read from TPM_PT_FAMILY_INDICATOR. + FamilyIndicator string `json:",omitempty"` } // Present reports whether a TPM device is present on this machine. @@ -1085,6 +1096,9 @@ func (ni *NetInfo) String() string { } func (ni *NetInfo) portMapSummary() string { + if !buildfeatures.HasPortMapper { + return "x" + } if !ni.HavePortMap && ni.UPnP == "" && ni.PMP == "" && ni.PCP == "" { return "?" } @@ -1360,6 +1374,17 @@ type MapRequest struct { NodeKey key.NodePublic DiscoKey key.DiscoPublic + // HardwareAttestationKey is the public key of the node's hardware-backed + // identity attestation key, if any. + HardwareAttestationKey key.HardwareAttestationPublic `json:",omitzero"` + // HardwareAttestationKeySignature is the signature of + // "$UNIX_TIMESTAMP|$NODE_KEY" using its hardware attestation key, if any. + HardwareAttestationKeySignature []byte `json:",omitempty"` + // HardwareAttestationKeySignatureTimestamp is the time at which the + // HardwareAttestationKeySignature was created, if any. This UNIX timestamp + // value is prepended to the node key when signing. + HardwareAttestationKeySignatureTimestamp time.Time `json:",omitzero"` + // Stream is whether the client wants to receive multiple MapResponses over // the same HTTP connection. // @@ -1463,6 +1488,15 @@ func (pr PortRange) Contains(port uint16) bool { var PortRangeAny = PortRange{0, 65535} +func (pr PortRange) String() string { + if pr.First == pr.Last { + return strconv.FormatUint(uint64(pr.First), 10) + } else if pr == PortRangeAny { + return "*" + } + return fmt.Sprintf("%d-%d", pr.First, pr.Last) +} + // NetPortRange represents a range of ports that's allowed for one or more IPs. type NetPortRange struct { _ structs.Incomparable @@ -2255,7 +2289,14 @@ type ControlDialPlan struct { // connecting to the control server. type ControlIPCandidate struct { // IP is the address to attempt connecting to. - IP netip.Addr + IP netip.Addr `json:",omitzero"` + + // ACEHost, if non-empty, means that the client should connect to the + // control plane using an HTTPS CONNECT request to the provided hostname. If + // the IP field is also set, then the IP is the IP address of the ACEHost + // (and not the control plane) and DNS should not be used. The target (the + // argument to CONNECT) is always the control plane's hostname, not an IP. + ACEHost string `json:",omitempty"` // DialStartSec is the number of seconds after the beginning of the // connection process to wait before trying this candidate. @@ -2523,8 +2564,19 @@ const ( // This cannot be set simultaneously with NodeAttrLinuxMustUseIPTables. NodeAttrLinuxMustUseNfTables NodeCapability = "linux-netfilter?v=nftables" - // NodeAttrSeamlessKeyRenewal makes clients enable beta functionality - // of renewing node keys without breaking connections. + // NodeAttrDisableSeamlessKeyRenewal disables seamless key renewal, which is + // enabled by default in clients as of 2025-09-17 (1.90 and later). + // + // We will use this attribute to manage the rollout, and disable seamless in + // clients with known bugs. + // http://go/seamless-key-renewal + NodeAttrDisableSeamlessKeyRenewal NodeCapability = "disable-seamless-key-renewal" + + // NodeAttrSeamlessKeyRenewal was used to opt-in to seamless key renewal + // during its private alpha. + // + // Deprecated: NodeAttrSeamlessKeyRenewal is deprecated as of CapabilityVersion 126, + // because seamless key renewal is now enabled by default. NodeAttrSeamlessKeyRenewal NodeCapability = "seamless-key-renewal" // NodeAttrProbeUDPLifetime makes the client probe UDP path lifetime at the @@ -2664,6 +2716,12 @@ const ( // numbers, apostrophe, spaces, and hyphens. This may not be true for the default. // Values can look like "foo.com" or "Foo's Test Tailnet - Staging". NodeAttrTailnetDisplayName NodeCapability = "tailnet-display-name" + + // NodeAttrClientSideReachability configures the node to determine + // reachability itself when choosing connectors. When absent, the + // default behavior is to trust the control plane when it claims that a + // node is no longer online, but that is not a reliable signal. + NodeAttrClientSideReachability = "client-side-reachability" ) // SetDNSRequest is a request to add a DNS record. @@ -2707,6 +2765,9 @@ type SetDNSResponse struct{} // node health changes to: // // POST https:///machine/update-health. +// +// As of 2025-10-02, we stopped sending this to the control plane proactively. +// It was never useful enough with its current design and needs more thought. type HealthChangeRequest struct { Subsys string // a health.Subsystem value in string form Error string // or empty if cleared @@ -2851,7 +2912,7 @@ type SSHAction struct { // SessionDuration, if non-zero, is how long the session can stay open // before being forcefully terminated. - SessionDuration time.Duration `json:"sessionDuration,omitempty"` + SessionDuration time.Duration `json:"sessionDuration,omitempty,format:nano"` // AllowAgentForwarding, if true, allows accepted connections to forward // the ssh agent if requested. diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index 95f8905b84e69..9aa7673886bc6 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -651,9 +651,35 @@ var _VIPServiceCloneNeedsRegeneration = VIPService(struct { Active bool }{}) +// Clone makes a deep copy of SSHPolicy. +// The result aliases no memory with the original. +func (src *SSHPolicy) Clone() *SSHPolicy { + if src == nil { + return nil + } + dst := new(SSHPolicy) + *dst = *src + if src.Rules != nil { + dst.Rules = make([]*SSHRule, len(src.Rules)) + for i := range dst.Rules { + if src.Rules[i] == nil { + dst.Rules[i] = nil + } else { + dst.Rules[i] = src.Rules[i].Clone() + } + } + } + return dst +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _SSHPolicyCloneNeedsRegeneration = SSHPolicy(struct { + Rules []*SSHRule +}{}) + // Clone duplicates src into dst and reports whether it succeeded. // To succeed, must be of types <*T, *T> or <*T, **T>, -// where T is one of User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService. +// where T is one of User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService,SSHPolicy. func Clone(dst, src any) bool { switch src := src.(type) { case *User: @@ -836,6 +862,15 @@ func Clone(dst, src any) bool { *dst = src.Clone() return true } + case *SSHPolicy: + switch dst := dst.(type) { + case *SSHPolicy: + *dst = *src.Clone() + return true + case **SSHPolicy: + *dst = src.Clone() + return true + } } return false } diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index e44d0bbef326b..88dd90096ab55 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -21,7 +21,7 @@ import ( "tailscale.com/types/views" ) -//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService +//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,RegisterResponseAuth,RegisterRequest,DERPHomeParams,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan,Location,UserProfile,VIPService,SSHPolicy // View returns a read-only view of User. func (p *User) View() UserView { @@ -2604,3 +2604,94 @@ var _VIPServiceViewNeedsRegeneration = VIPService(struct { Ports []ProtoPortRange Active bool }{}) + +// View returns a read-only view of SSHPolicy. +func (p *SSHPolicy) View() SSHPolicyView { + return SSHPolicyView{ж: p} +} + +// SSHPolicyView provides a read-only view over SSHPolicy. +// +// Its methods should only be called if `Valid()` returns true. +type SSHPolicyView struct { + // ж is the underlying mutable value, named with a hard-to-type + // character that looks pointy like a pointer. + // It is named distinctively to make you think of how dangerous it is to escape + // to callers. You must not let callers be able to mutate it. + ж *SSHPolicy +} + +// Valid reports whether v's underlying value is non-nil. +func (v SSHPolicyView) Valid() bool { return v.ж != nil } + +// AsStruct returns a clone of the underlying value which aliases no memory with +// the original. +func (v SSHPolicyView) AsStruct() *SSHPolicy { + if v.ж == nil { + return nil + } + return v.ж.Clone() +} + +// MarshalJSON implements [jsonv1.Marshaler]. +func (v SSHPolicyView) MarshalJSON() ([]byte, error) { + return jsonv1.Marshal(v.ж) +} + +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v SSHPolicyView) MarshalJSONTo(enc *jsontext.Encoder) error { + return jsonv2.MarshalEncode(enc, v.ж) +} + +// UnmarshalJSON implements [jsonv1.Unmarshaler]. +func (v *SSHPolicyView) UnmarshalJSON(b []byte) error { + if v.ж != nil { + return errors.New("already initialized") + } + if len(b) == 0 { + return nil + } + var x SSHPolicy + if err := jsonv1.Unmarshal(b, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (v *SSHPolicyView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { + if v.ж != nil { + return errors.New("already initialized") + } + var x SSHPolicy + if err := jsonv2.UnmarshalDecode(dec, &x); err != nil { + return err + } + v.ж = &x + return nil +} + +// Rules are the rules to process for an incoming SSH connection. The first +// matching rule takes its action and stops processing further rules. +// +// When an incoming connection first starts, all rules are evaluated in +// "none" auth mode, where the client hasn't even been asked to send a +// public key. All SSHRule.Principals requiring a public key won't match. If +// a rule matches on the first pass and its Action is reject, the +// authentication fails with that action's rejection message, if any. +// +// If the first pass rule evaluation matches nothing without matching an +// Action with Reject set, the rules are considered to see whether public +// keys might still result in a match. If not, "none" auth is terminated +// before proceeding to public key mode. If so, the client is asked to try +// public key authentication and the rules are evaluated again for each of +// the client's present keys. +func (v SSHPolicyView) Rules() views.SliceView[*SSHRule, SSHRuleView] { + return views.SliceOfViews[*SSHRule, SSHRuleView](v.ж.Rules) +} + +// A compilation failure here means this code must be regenerated, with the command at the top of this file. +var _SSHPolicyViewNeedsRegeneration = SSHPolicy(struct { + Rules []*SSHRule +}{}) diff --git a/tempfork/httprec/httprec.go b/tempfork/httprec/httprec.go index 13786aaf60e05..07ca673fea885 100644 --- a/tempfork/httprec/httprec.go +++ b/tempfork/httprec/httprec.go @@ -14,9 +14,6 @@ import ( "net/http" "net/textproto" "strconv" - "strings" - - "golang.org/x/net/http/httpguts" ) // ResponseRecorder is an implementation of [http.ResponseWriter] that @@ -59,10 +56,6 @@ func NewRecorder() *ResponseRecorder { } } -// DefaultRemoteAddr is the default remote address to return in RemoteAddr if -// an explicit DefaultRemoteAddr isn't set on [ResponseRecorder]. -const DefaultRemoteAddr = "1.2.3.4" - // Header implements [http.ResponseWriter]. It returns the response // headers to mutate within a handler. To test the headers that were // written after a handler completes, use the [ResponseRecorder.Result] method and see @@ -206,37 +199,6 @@ func (rw *ResponseRecorder) Result() *http.Response { res.Body = http.NoBody } res.ContentLength = parseContentLength(res.Header.Get("Content-Length")) - - if trailers, ok := rw.snapHeader["Trailer"]; ok { - res.Trailer = make(http.Header, len(trailers)) - for _, k := range trailers { - for _, k := range strings.Split(k, ",") { - k = http.CanonicalHeaderKey(textproto.TrimString(k)) - if !httpguts.ValidTrailerHeader(k) { - // Ignore since forbidden by RFC 7230, section 4.1.2. - continue - } - vv, ok := rw.HeaderMap[k] - if !ok { - continue - } - vv2 := make([]string, len(vv)) - copy(vv2, vv) - res.Trailer[k] = vv2 - } - } - } - for k, vv := range rw.HeaderMap { - if !strings.HasPrefix(k, http.TrailerPrefix) { - continue - } - if res.Trailer == nil { - res.Trailer = make(http.Header) - } - for _, v := range vv { - res.Trailer.Add(strings.TrimPrefix(k, http.TrailerPrefix), v) - } - } return res } diff --git a/tka/aum.go b/tka/aum.go index 07a34b4f62458..08d70897ee70f 100644 --- a/tka/aum.go +++ b/tka/aum.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/builder.go b/tka/builder.go index ec38bb6fa15f7..642f39d77422d 100644 --- a/tka/builder.go +++ b/tka/builder.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/deeplink.go b/tka/deeplink.go index 5cf24fc5c2c82..5570a19d7371b 100644 --- a/tka/deeplink.go +++ b/tka/deeplink.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/disabled_stub.go b/tka/disabled_stub.go new file mode 100644 index 0000000000000..15bf12c333fc8 --- /dev/null +++ b/tka/disabled_stub.go @@ -0,0 +1,149 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_tailnetlock + +package tka + +import ( + "crypto/ed25519" + "errors" + + "tailscale.com/types/key" + "tailscale.com/types/logger" + "tailscale.com/types/tkatype" +) + +type Authority struct { + head AUM + oldestAncestor AUM + state State +} + +func (*Authority) Head() AUMHash { return AUMHash{} } + +func (AUMHash) MarshalText() ([]byte, error) { return nil, errNoTailnetLock } + +type State struct{} + +// AUMKind describes valid AUM types. +type AUMKind uint8 + +type AUMHash [32]byte + +type AUM struct { + MessageKind AUMKind `cbor:"1,keyasint"` + PrevAUMHash []byte `cbor:"2,keyasint"` + + // Key encodes a public key to be added to the key authority. + // This field is used for AddKey AUMs. + Key *Key `cbor:"3,keyasint,omitempty"` + + // KeyID references a public key which is part of the key authority. + // This field is used for RemoveKey and UpdateKey AUMs. + KeyID tkatype.KeyID `cbor:"4,keyasint,omitempty"` + + // State describes the full state of the key authority. + // This field is used for Checkpoint AUMs. + State *State `cbor:"5,keyasint,omitempty"` + + // Votes and Meta describe properties of a key in the key authority. + // These fields are used for UpdateKey AUMs. + Votes *uint `cbor:"6,keyasint,omitempty"` + Meta map[string]string `cbor:"7,keyasint,omitempty"` + + // Signatures lists the signatures over this AUM. + // CBOR key 23 is the last key which can be encoded as a single byte. + Signatures []tkatype.Signature `cbor:"23,keyasint,omitempty"` +} + +type Chonk interface { + // AUM returns the AUM with the specified digest. + // + // If the AUM does not exist, then os.ErrNotExist is returned. + AUM(hash AUMHash) (AUM, error) + + // ChildAUMs returns all AUMs with a specified previous + // AUM hash. + ChildAUMs(prevAUMHash AUMHash) ([]AUM, error) + + // CommitVerifiedAUMs durably stores the provided AUMs. + // Callers MUST ONLY provide AUMs which are verified (specifically, + // a call to aumVerify() must return a nil error). + // as the implementation assumes that only verified AUMs are stored. + CommitVerifiedAUMs(updates []AUM) error + + // Heads returns AUMs for which there are no children. In other + // words, the latest AUM in all possible chains (the 'leaves'). + Heads() ([]AUM, error) + + // SetLastActiveAncestor is called to record the oldest-known AUM + // that contributed to the current state. This value is used as + // a hint on next startup to determine which chain to pick when computing + // the current state, if there are multiple distinct chains. + SetLastActiveAncestor(hash AUMHash) error + + // LastActiveAncestor returns the oldest-known AUM that was (in a + // previous run) an ancestor of the current state. This is used + // as a hint to pick the correct chain in the event that the Chonk stores + // multiple distinct chains. + LastActiveAncestor() (*AUMHash, error) +} + +// SigKind describes valid NodeKeySignature types. +type SigKind uint8 + +type NodeKeySignature struct { + // SigKind identifies the variety of signature. + SigKind SigKind `cbor:"1,keyasint"` + // Pubkey identifies the key.NodePublic which is being authorized. + // SigCredential signatures do not use this field. + Pubkey []byte `cbor:"2,keyasint,omitempty"` + + // KeyID identifies which key in the tailnet key authority should + // be used to verify this signature. Only set for SigDirect and + // SigCredential signature kinds. + KeyID []byte `cbor:"3,keyasint,omitempty"` + + // Signature is the packed (R, S) ed25519 signature over all other + // fields of the structure. + Signature []byte `cbor:"4,keyasint,omitempty"` + + // Nested describes a NodeKeySignature which authorizes the node-key + // used as Pubkey. Only used for SigRotation signatures. + Nested *NodeKeySignature `cbor:"5,keyasint,omitempty"` + + // WrappingPubkey specifies the ed25519 public key which must be used + // to sign a Signature which embeds this one. + // + // For SigRotation signatures multiple levels deep, intermediate + // signatures may omit this value, in which case the parent WrappingPubkey + // is used. + // + // SigCredential signatures use this field to specify the public key + // they are certifying, following the usual semanticsfor WrappingPubkey. + WrappingPubkey []byte `cbor:"6,keyasint,omitempty"` +} + +type DeeplinkValidationResult struct { +} + +func (h *AUMHash) UnmarshalText(text []byte) error { + return errNoTailnetLock +} + +var errNoTailnetLock = errors.New("tailnet lock is not enabled") + +func DecodeWrappedAuthkey(wrappedAuthKey string, logf logger.Logf) (authKey string, isWrapped bool, sig *NodeKeySignature, priv ed25519.PrivateKey) { + return wrappedAuthKey, false, nil, nil +} + +func ResignNKS(priv key.NLPrivate, nodeKey key.NodePublic, oldNKS tkatype.MarshaledSignature) (tkatype.MarshaledSignature, error) { + return nil, nil +} + +func SignByCredential(privKey []byte, wrapped *NodeKeySignature, nodeKey key.NodePublic) (tkatype.MarshaledSignature, error) { + return nil, nil +} + +func (s NodeKeySignature) String() string { return "" } diff --git a/tka/key.go b/tka/key.go index 07736795d8e58..dca1b4416560b 100644 --- a/tka/key.go +++ b/tka/key.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" - "github.com/hdevalence/ed25519consensus" "tailscale.com/types/tkatype" ) @@ -136,24 +135,3 @@ func (k Key) StaticValidate() error { } return nil } - -// Verify returns a nil error if the signature is valid over the -// provided AUM BLAKE2s digest, using the given key. -func signatureVerify(s *tkatype.Signature, aumDigest tkatype.AUMSigHash, key Key) error { - // NOTE(tom): Even if we can compute the public from the KeyID, - // its possible for the KeyID to be attacker-controlled - // so we should use the public contained in the state machine. - switch key.Kind { - case Key25519: - if len(key.Public) != ed25519.PublicKeySize { - return fmt.Errorf("ed25519 key has wrong length: %d", len(key.Public)) - } - if ed25519consensus.Verify(ed25519.PublicKey(key.Public), aumDigest[:], s.Signature) { - return nil - } - return errors.New("invalid signature") - - default: - return fmt.Errorf("unhandled key type: %v", key.Kind) - } -} diff --git a/tka/sig.go b/tka/sig.go index c82f9715c33fb..7b1838d409130 100644 --- a/tka/sig.go +++ b/tka/sig.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/sig_test.go b/tka/sig_test.go index d64575e7c7b45..99c25f8e57ae6 100644 --- a/tka/sig_test.go +++ b/tka/sig_test.go @@ -507,7 +507,7 @@ func TestDecodeWrappedAuthkey(t *testing.T) { } func TestResignNKS(t *testing.T) { - // Tailnet lock keypair of a signing node. + // Tailnet Lock keypair of a signing node. authPub, authPriv := testingKey25519(t, 1) authKey := Key{Kind: Key25519, Public: authPub, Votes: 2} diff --git a/tka/state.go b/tka/state.go index 0a459bd9a1b24..0a30c56a02fa8 100644 --- a/tka/state.go +++ b/tka/state.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/state_test.go b/tka/state_test.go index 060bd9350dd06..32b6563145ee7 100644 --- a/tka/state_test.go +++ b/tka/state_test.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/sync.go b/tka/sync.go index 6131f54d0dfca..6c2b7cbb8c81a 100644 --- a/tka/sync.go +++ b/tka/sync.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( diff --git a/tka/tailchonk.go b/tka/tailchonk.go index 32d2215dec9a1..cb683c273d033 100644 --- a/tka/tailchonk.go +++ b/tka/tailchonk.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package tka import ( @@ -9,6 +11,7 @@ import ( "fmt" "os" "path/filepath" + "slices" "sync" "time" @@ -125,19 +128,6 @@ func (c *Mem) AUM(hash AUMHash) (AUM, error) { return aum, nil } -// Orphans returns all AUMs which do not have a parent. -func (c *Mem) Orphans() ([]AUM, error) { - c.l.RLock() - defer c.l.RUnlock() - out := make([]AUM, 0, 6) - for _, a := range c.aums { - if _, ok := a.Parent(); !ok { - out = append(out, a) - } - } - return out, nil -} - // ChildAUMs returns all AUMs with a specified previous // AUM hash. func (c *Mem) ChildAUMs(prevAUMHash AUMHash) ([]AUM, error) { @@ -217,10 +207,14 @@ func ChonkDir(dir string) (*FS, error) { // CBOR was chosen because we are already using it and it serializes // much smaller than JSON for AUMs. The 'keyasint' thing isn't essential // but again it saves a bunch of bytes. +// +// We have removed the following fields from fsHashInfo, but they may be +// present in data stored in existing deployments. Do not reuse these values, +// to avoid getting unexpected values from legacy data: +// - cbor:1, Children type fsHashInfo struct { - Children []AUMHash `cbor:"1,keyasint"` - AUM *AUM `cbor:"2,keyasint"` - CreatedUnix int64 `cbor:"3,keyasint,omitempty"` + AUM *AUM `cbor:"2,keyasint"` + CreatedUnix int64 `cbor:"3,keyasint,omitempty"` // PurgedUnix is set when the AUM is deleted. The value is // the unix epoch at the time it was deleted. @@ -296,32 +290,15 @@ func (c *FS) ChildAUMs(prevAUMHash AUMHash) ([]AUM, error) { c.mu.RLock() defer c.mu.RUnlock() - info, err := c.get(prevAUMHash) - if err != nil { - if os.IsNotExist(err) { - // not knowing about this hash is not an error - return nil, nil - } - return nil, err - } - // NOTE(tom): We don't check PurgedUnix here because 'purged' - // only applies to that specific AUM (i.e. info.AUM) and not to - // any information about children stored against that hash. + var out []AUM - out := make([]AUM, len(info.Children)) - for i, h := range info.Children { - c, err := c.get(h) - if err != nil { - // We expect any AUM recorded as a child on its parent to exist. - return nil, fmt.Errorf("reading child %d of %x: %v", i, h, err) - } - if c.AUM == nil || c.PurgedUnix > 0 { - return nil, fmt.Errorf("child %d of %x: AUM not stored", i, h) + err := c.scanHashes(func(info *fsHashInfo) { + if info.AUM != nil && bytes.Equal(info.AUM.PrevAUMHash, prevAUMHash[:]) { + out = append(out, *info.AUM) } - out[i] = *c.AUM - } + }) - return out, nil + return out, err } func (c *FS) get(h AUMHash) (*fsHashInfo, error) { @@ -357,13 +334,45 @@ func (c *FS) Heads() ([]AUM, error) { c.mu.RLock() defer c.mu.RUnlock() + // Scan the complete list of AUMs, and build a list of all parent hashes. + // This tells us which AUMs have children. + var parentHashes []AUMHash + + allAUMs, err := c.AllAUMs() + if err != nil { + return nil, err + } + + for _, h := range allAUMs { + aum, err := c.AUM(h) + if err != nil { + return nil, err + } + parent, hasParent := aum.Parent() + if !hasParent { + continue + } + if !slices.Contains(parentHashes, parent) { + parentHashes = append(parentHashes, parent) + } + } + + // Now scan a second time, and only include AUMs which weren't marked as + // the parent of any other AUM. out := make([]AUM, 0, 6) // 6 is arbitrary. - err := c.scanHashes(func(info *fsHashInfo) { - if len(info.Children) == 0 && info.AUM != nil && info.PurgedUnix == 0 { - out = append(out, *info.AUM) + + for _, h := range allAUMs { + if slices.Contains(parentHashes, h) { + continue } - }) - return out, err + aum, err := c.AUM(h) + if err != nil { + return nil, err + } + out = append(out, aum) + } + + return out, nil } // AllAUMs returns all AUMs stored in the chonk. @@ -373,7 +382,7 @@ func (c *FS) AllAUMs() ([]AUMHash, error) { out := make([]AUMHash, 0, 6) // 6 is arbitrary. err := c.scanHashes(func(info *fsHashInfo) { - if info.AUM != nil && info.PurgedUnix == 0 { + if info.AUM != nil { out = append(out, info.AUM.Hash()) } }) @@ -402,6 +411,9 @@ func (c *FS) scanHashes(eachHashInfo func(*fsHashInfo)) error { if err != nil { return fmt.Errorf("reading %x: %v", h, err) } + if info.PurgedUnix > 0 { + continue + } eachHashInfo(info) } @@ -456,24 +468,6 @@ func (c *FS) CommitVerifiedAUMs(updates []AUM) error { for i, aum := range updates { h := aum.Hash() - // We keep track of children against their parent so that - // ChildAUMs() do not need to scan all AUMs. - parent, hasParent := aum.Parent() - if hasParent { - err := c.commit(parent, func(info *fsHashInfo) { - // Only add it if its not already there. - for i := range info.Children { - if info.Children[i] == h { - return - } - } - info.Children = append(info.Children, h) - }) - if err != nil { - return fmt.Errorf("committing update[%d] to parent %x: %v", i, parent, err) - } - } - err := c.commit(h, func(info *fsHashInfo) { info.PurgedUnix = 0 // just in-case it was set for some reason info.AUM = &aum diff --git a/tka/tailchonk_test.go b/tka/tailchonk_test.go index 86d5642a3bd10..08686598033b8 100644 --- a/tka/tailchonk_test.go +++ b/tka/tailchonk_test.go @@ -5,7 +5,6 @@ package tka import ( "bytes" - "fmt" "os" "path/filepath" "sync" @@ -15,8 +14,16 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "golang.org/x/crypto/blake2s" + "tailscale.com/util/must" ) +// This package has implementation-specific tests for Mem and FS. +// +// We also have tests for the Chonk interface in `chonktest`, which exercises +// both Mem and FS. Those tests are in a separate package so they can be shared +// with other repos; we don't call the shared test helpers from this package +// to avoid creating a circular dependency. + // randHash derives a fake blake2s hash from the test name // and the given seed. func randHash(t *testing.T, seed int64) [blake2s.Size]byte { @@ -30,130 +37,8 @@ func TestImplementsChonk(t *testing.T) { t.Logf("chonks: %v", impls) } -func TestTailchonk_ChildAUMs(t *testing.T) { - for _, chonk := range []Chonk{&Mem{}, &FS{base: t.TempDir()}} { - t.Run(fmt.Sprintf("%T", chonk), func(t *testing.T) { - parentHash := randHash(t, 1) - data := []AUM{ - { - MessageKind: AUMRemoveKey, - KeyID: []byte{1, 2}, - PrevAUMHash: parentHash[:], - }, - { - MessageKind: AUMRemoveKey, - KeyID: []byte{3, 4}, - PrevAUMHash: parentHash[:], - }, - } - - if err := chonk.CommitVerifiedAUMs(data); err != nil { - t.Fatalf("CommitVerifiedAUMs failed: %v", err) - } - stored, err := chonk.ChildAUMs(parentHash) - if err != nil { - t.Fatalf("ChildAUMs failed: %v", err) - } - if diff := cmp.Diff(data, stored); diff != "" { - t.Errorf("stored AUM differs (-want, +got):\n%s", diff) - } - }) - } -} - -func TestTailchonk_AUMMissing(t *testing.T) { - for _, chonk := range []Chonk{&Mem{}, &FS{base: t.TempDir()}} { - t.Run(fmt.Sprintf("%T", chonk), func(t *testing.T) { - var notExists AUMHash - notExists[:][0] = 42 - if _, err := chonk.AUM(notExists); err != os.ErrNotExist { - t.Errorf("chonk.AUM(notExists).err = %v, want %v", err, os.ErrNotExist) - } - }) - } -} - -func TestTailchonkMem_Orphans(t *testing.T) { - chonk := Mem{} - - parentHash := randHash(t, 1) - orphan := AUM{MessageKind: AUMNoOp} - aums := []AUM{ - orphan, - // A parent is specified, so we shouldnt see it in GetOrphans() - { - MessageKind: AUMRemoveKey, - KeyID: []byte{3, 4}, - PrevAUMHash: parentHash[:], - }, - } - if err := chonk.CommitVerifiedAUMs(aums); err != nil { - t.Fatalf("CommitVerifiedAUMs failed: %v", err) - } - - stored, err := chonk.Orphans() - if err != nil { - t.Fatalf("Orphans failed: %v", err) - } - if diff := cmp.Diff([]AUM{orphan}, stored); diff != "" { - t.Errorf("stored AUM differs (-want, +got):\n%s", diff) - } -} - -func TestTailchonk_ReadChainFromHead(t *testing.T) { - for _, chonk := range []Chonk{&Mem{}, &FS{base: t.TempDir()}} { - - t.Run(fmt.Sprintf("%T", chonk), func(t *testing.T) { - genesis := AUM{MessageKind: AUMRemoveKey, KeyID: []byte{1, 2}} - gHash := genesis.Hash() - intermediate := AUM{PrevAUMHash: gHash[:]} - iHash := intermediate.Hash() - leaf := AUM{PrevAUMHash: iHash[:]} - - commitSet := []AUM{ - genesis, - intermediate, - leaf, - } - if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { - t.Fatalf("CommitVerifiedAUMs failed: %v", err) - } - // t.Logf("genesis hash = %X", genesis.Hash()) - // t.Logf("intermediate hash = %X", intermediate.Hash()) - // t.Logf("leaf hash = %X", leaf.Hash()) - - // Read the chain from the leaf backwards. - gotLeafs, err := chonk.Heads() - if err != nil { - t.Fatalf("Heads failed: %v", err) - } - if diff := cmp.Diff([]AUM{leaf}, gotLeafs); diff != "" { - t.Fatalf("leaf AUM differs (-want, +got):\n%s", diff) - } - - parent, _ := gotLeafs[0].Parent() - gotIntermediate, err := chonk.AUM(parent) - if err != nil { - t.Fatalf("AUM() failed: %v", err) - } - if diff := cmp.Diff(intermediate, gotIntermediate); diff != "" { - t.Errorf("intermediate AUM differs (-want, +got):\n%s", diff) - } - - parent, _ = gotIntermediate.Parent() - gotGenesis, err := chonk.AUM(parent) - if err != nil { - t.Fatalf("AUM() failed: %v", err) - } - if diff := cmp.Diff(genesis, gotGenesis); diff != "" { - t.Errorf("genesis AUM differs (-want, +got):\n%s", diff) - } - }) - } -} - func TestTailchonkFS_Commit(t *testing.T) { - chonk := &FS{base: t.TempDir()} + chonk := must.Get(ChonkDir(t.TempDir())) parentHash := randHash(t, 1) aum := AUM{MessageKind: AUMNoOp, PrevAUMHash: parentHash[:]} @@ -171,9 +56,6 @@ func TestTailchonkFS_Commit(t *testing.T) { if _, err := os.Stat(filepath.Join(dir, base)); err != nil { t.Errorf("stat of AUM file failed: %v", err) } - if _, err := os.Stat(filepath.Join(chonk.base, "M7", "M7LL2NDB4NKCZIUPVS6RDM2GUOIMW6EEAFVBWMVCPUANQJPHT3SQ")); err != nil { - t.Errorf("stat of AUM parent failed: %v", err) - } info, err := chonk.get(aum.Hash()) if err != nil { @@ -185,7 +67,7 @@ func TestTailchonkFS_Commit(t *testing.T) { } func TestTailchonkFS_CommitTime(t *testing.T) { - chonk := &FS{base: t.TempDir()} + chonk := must.Get(ChonkDir(t.TempDir())) parentHash := randHash(t, 1) aum := AUM{MessageKind: AUMNoOp, PrevAUMHash: parentHash[:]} @@ -201,60 +83,6 @@ func TestTailchonkFS_CommitTime(t *testing.T) { } } -func TestTailchonkFS_PurgeAUMs(t *testing.T) { - chonk := &FS{base: t.TempDir()} - parentHash := randHash(t, 1) - aum := AUM{MessageKind: AUMNoOp, PrevAUMHash: parentHash[:]} - - if err := chonk.CommitVerifiedAUMs([]AUM{aum}); err != nil { - t.Fatal(err) - } - if err := chonk.PurgeAUMs([]AUMHash{aum.Hash()}); err != nil { - t.Fatal(err) - } - - if _, err := chonk.AUM(aum.Hash()); err != os.ErrNotExist { - t.Errorf("AUM() on purged AUM returned err = %v, want ErrNotExist", err) - } - - info, err := chonk.get(aum.Hash()) - if err != nil { - t.Fatal(err) - } - if info.PurgedUnix == 0 { - t.Errorf("recently-created AUM PurgedUnix = %d, want non-zero", info.PurgedUnix) - } -} - -func TestTailchonkFS_AllAUMs(t *testing.T) { - chonk := &FS{base: t.TempDir()} - genesis := AUM{MessageKind: AUMRemoveKey, KeyID: []byte{1, 2}} - gHash := genesis.Hash() - intermediate := AUM{PrevAUMHash: gHash[:]} - iHash := intermediate.Hash() - leaf := AUM{PrevAUMHash: iHash[:]} - - commitSet := []AUM{ - genesis, - intermediate, - leaf, - } - if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { - t.Fatalf("CommitVerifiedAUMs failed: %v", err) - } - - hashes, err := chonk.AllAUMs() - if err != nil { - t.Fatal(err) - } - hashesLess := func(a, b AUMHash) bool { - return bytes.Compare(a[:], b[:]) < 0 - } - if diff := cmp.Diff([]AUMHash{genesis.Hash(), intermediate.Hash(), leaf.Hash()}, hashes, cmpopts.SortSlices(hashesLess)); diff != "" { - t.Fatalf("AllAUMs() output differs (-want, +got):\n%s", diff) - } -} - func TestMarkActiveChain(t *testing.T) { type aumTemplate struct { AUM AUM @@ -611,11 +439,12 @@ func (c *compactingChonkFake) CommitTime(hash AUMHash) (time.Time, error) { return c.aumAge[hash], nil } +func hashesLess(x, y AUMHash) bool { + return bytes.Compare(x[:], y[:]) < 0 +} + func (c *compactingChonkFake) PurgeAUMs(hashes []AUMHash) error { - lessHashes := func(a, b AUMHash) bool { - return bytes.Compare(a[:], b[:]) < 0 - } - if diff := cmp.Diff(c.wantDelete, hashes, cmpopts.SortSlices(lessHashes)); diff != "" { + if diff := cmp.Diff(c.wantDelete, hashes, cmpopts.SortSlices(hashesLess)); diff != "" { c.t.Errorf("deletion set differs (-want, +got):\n%s", diff) } return nil diff --git a/tka/tka.go b/tka/tka.go index ade621bc689e3..234c87fe1b89c 100644 --- a/tka/tka.go +++ b/tka/tka.go @@ -1,7 +1,9 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Package tka (WIP) implements the Tailnet Key Authority. +//go:build !ts_omit_tailnetlock + +// Package tka implements the Tailnet Key Authority (TKA) for Tailnet Lock. package tka import ( diff --git a/tka/verify.go b/tka/verify.go new file mode 100644 index 0000000000000..e4e22e5518e8b --- /dev/null +++ b/tka/verify.go @@ -0,0 +1,36 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !ts_omit_tailnetlock + +package tka + +import ( + "crypto/ed25519" + "errors" + "fmt" + + "github.com/hdevalence/ed25519consensus" + "tailscale.com/types/tkatype" +) + +// signatureVerify returns a nil error if the signature is valid over the +// provided AUM BLAKE2s digest, using the given key. +func signatureVerify(s *tkatype.Signature, aumDigest tkatype.AUMSigHash, key Key) error { + // NOTE(tom): Even if we can compute the public from the KeyID, + // its possible for the KeyID to be attacker-controlled + // so we should use the public contained in the state machine. + switch key.Kind { + case Key25519: + if len(key.Public) != ed25519.PublicKeySize { + return fmt.Errorf("ed25519 key has wrong length: %d", len(key.Public)) + } + if ed25519consensus.Verify(ed25519.PublicKey(key.Public), aumDigest[:], s.Signature) { + return nil + } + return errors.New("invalid signature") + + default: + return fmt.Errorf("unhandled key type: %v", key.Kind) + } +} diff --git a/tka/verify_disabled.go b/tka/verify_disabled.go new file mode 100644 index 0000000000000..ba72f93e27d8f --- /dev/null +++ b/tka/verify_disabled.go @@ -0,0 +1,18 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_tailnetlock + +package tka + +import ( + "errors" + + "tailscale.com/types/tkatype" +) + +// signatureVerify returns a nil error if the signature is valid over the +// provided AUM BLAKE2s digest, using the given key. +func signatureVerify(s *tkatype.Signature, aumDigest tkatype.AUMSigHash, key Key) error { + return errors.New("tailnetlock disabled in build") +} diff --git a/tool/gocross/exec_other.go b/tool/gocross/exec_other.go index 7bce0c0993620..4dd74f84d7d2b 100644 --- a/tool/gocross/exec_other.go +++ b/tool/gocross/exec_other.go @@ -6,6 +6,7 @@ package main import ( + "errors" "os" "os/exec" ) @@ -16,5 +17,14 @@ func doExec(cmd string, args []string, env []string) error { c.Stdin = os.Stdin c.Stdout = os.Stdout c.Stderr = os.Stderr - return c.Run() + err := c.Run() + + // Propagate ExitErrors within this func to give us similar semantics to + // the Unix variant. + var ee *exec.ExitError + if errors.As(err, &ee) { + os.Exit(ee.ExitCode()) + } + + return err } diff --git a/tool/gocross/gocross.go b/tool/gocross/gocross.go index c71012d73778b..41fab3d584260 100644 --- a/tool/gocross/gocross.go +++ b/tool/gocross/gocross.go @@ -114,7 +114,11 @@ func main() { } - doExec(filepath.Join(toolchain, "bin/go"), args, os.Environ()) + // Note that doExec only returns if the exec call failed. + if err := doExec(filepath.Join(toolchain, "bin", "go"), args, os.Environ()); err != nil { + fmt.Fprintf(os.Stderr, "executing process: %v\n", err) + os.Exit(1) + } } //go:embed gocross-wrapper.sh diff --git a/tsconst/health.go b/tsconst/health.go new file mode 100644 index 0000000000000..5db9b1fc286ec --- /dev/null +++ b/tsconst/health.go @@ -0,0 +1,26 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tsconst + +const ( + HealthWarnableUpdateAvailable = "update-available" + HealthWarnableSecurityUpdateAvailable = "security-update-available" + HealthWarnableIsUsingUnstableVersion = "is-using-unstable-version" + HealthWarnableNetworkStatus = "network-status" + HealthWarnableWantRunningFalse = "wantrunning-false" + HealthWarnableLocalLogConfigError = "local-log-config-error" + HealthWarnableLoginState = "login-state" + HealthWarnableNotInMapPoll = "not-in-map-poll" + HealthWarnableNoDERPHome = "no-derp-home" + HealthWarnableNoDERPConnection = "no-derp-connection" + HealthWarnableDERPTimedOut = "derp-timed-out" + HealthWarnableDERPRegionError = "derp-region-error" + HealthWarnableNoUDP4Bind = "no-udp4-bind" + HealthWarnableMapResponseTimeout = "mapresponse-timeout" + HealthWarnableTLSConnectionFailed = "tls-connection-failed" + HealthWarnableMagicsockReceiveFuncError = "magicsock-receive-func-error" + HealthWarnableTestWarnable = "test-warnable" + HealthWarnableApplyDiskConfig = "apply-disk-config" + HealthWarnableWarmingUp = "warming-up" +) diff --git a/tsconst/linuxfw.go b/tsconst/linuxfw.go new file mode 100644 index 0000000000000..ce571e40239ed --- /dev/null +++ b/tsconst/linuxfw.go @@ -0,0 +1,43 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tsconst + +// Linux firewall constants used by Tailscale. + +// The following bits are added to packet marks for Tailscale use. +// +// We tried to pick bits sufficiently out of the way that it's +// unlikely to collide with existing uses. We have 4 bytes of mark +// bits to play with. We leave the lower byte alone on the assumption +// that sysadmins would use those. Kubernetes uses a few bits in the +// second byte, so we steer clear of that too. +// +// Empirically, most of the documentation on packet marks on the +// internet gives the impression that the marks are 16 bits +// wide. Based on this, we theorize that the upper two bytes are +// relatively unused in the wild, and so we consume bits 16:23 (the +// third byte). +// +// The constants are in the iptables/iproute2 string format for +// matching and setting the bits, so they can be directly embedded in +// commands. +const ( + // The mask for reading/writing the 'firewall mask' bits on a packet. + // See the comment on the const block on why we only use the third byte. + // + // We claim bits 16:23 entirely. For now we only use the lower four + // bits, leaving the higher 4 bits for future use. + LinuxFwmarkMask = "0xff0000" + LinuxFwmarkMaskNum = 0xff0000 + + // Packet is from Tailscale and to a subnet route destination, so + // is allowed to be routed through this machine. + LinuxSubnetRouteMark = "0x40000" + LinuxSubnetRouteMarkNum = 0x40000 + + // Packet was originated by tailscaled itself, and must not be + // routed over the Tailscale network. + LinuxBypassMark = "0x80000" + LinuxBypassMarkNum = 0x80000 +) diff --git a/tsconst/interface.go b/tsconst/tsconst.go similarity index 100% rename from tsconst/interface.go rename to tsconst/tsconst.go diff --git a/tsd/tsd.go b/tsd/tsd.go index bd333bd31b027..8223254dae942 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -60,6 +60,7 @@ type System struct { DriveForLocal SubSystem[drive.FileSystemForLocal] DriveForRemote SubSystem[drive.FileSystemForRemote] PolicyClient SubSystem[policyclient.Client] + HealthTracker SubSystem[*health.Tracker] // InitialConfig is initial server config, if any. // It is nil if the node is not in declarative mode. @@ -74,22 +75,37 @@ type System struct { controlKnobs controlknobs.Knobs proxyMap proxymap.Mapper - healthTracker health.Tracker userMetricsRegistry usermetric.Registry } // NewSystem constructs a new otherwise-empty [System] with a // freshly-constructed event bus populated. -func NewSystem() *System { +func NewSystem() *System { return NewSystemWithBus(eventbus.New()) } + +// NewSystemWithBus constructs a new otherwise-empty [System] with an +// eventbus provided by the caller. The provided bus must not be nil. +// This is mainly intended for testing; for production use call [NewBus]. +func NewSystemWithBus(bus *eventbus.Bus) *System { + if bus == nil { + panic("nil eventbus") + } sys := new(System) - sys.Set(eventbus.New()) + sys.Set(bus) + + tracker := health.NewTracker(bus) + sys.Set(tracker) + return sys } +// LocalBackend is a fake name for *ipnlocal.LocalBackend to avoid an import cycle. +type LocalBackend = any + // NetstackImpl is the interface that *netstack.Impl implements. // It's an interface for circular dependency reasons: netstack.Impl // references LocalBackend, and LocalBackend has a tsd.System. type NetstackImpl interface { + Start(LocalBackend) error UpdateNetstackIPs(*netmap.NetworkMap) } @@ -130,6 +146,8 @@ func (s *System) Set(v any) { s.DriveForRemote.Set(v) case policyclient.Client: s.PolicyClient.Set(v) + case *health.Tracker: + s.HealthTracker.Set(v) default: panic(fmt.Sprintf("unknown type %T", v)) } @@ -159,11 +177,6 @@ func (s *System) ProxyMapper() *proxymap.Mapper { return &s.proxyMap } -// HealthTracker returns the system health tracker. -func (s *System) HealthTracker() *health.Tracker { - return &s.healthTracker -} - // UserMetricsRegistry returns the system usermetrics. func (s *System) UserMetricsRegistry() *usermetric.Registry { return &s.userMetricsRegistry diff --git a/tsnet/depaware.txt b/tsnet/depaware.txt index 74f3f8c539a66..cd734e9959041 100644 --- a/tsnet/depaware.txt +++ b/tsnet/depaware.txt @@ -5,92 +5,15 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy - L github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ - L github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/middleware from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/query from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/protocol/restjson from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/aws/protocol/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/aws/ratelimit from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/aws/retry from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client+ - L github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 from github.com/aws/aws-sdk-go-v2/aws/signer/v4 - L github.com/aws/aws-sdk-go-v2/aws/signer/v4 from github.com/aws/aws-sdk-go-v2/internal/auth/smithy+ - L github.com/aws/aws-sdk-go-v2/aws/transport/http from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/config from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/credentials from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client from github.com/aws/aws-sdk-go-v2/credentials/endpointcreds - L github.com/aws/aws-sdk-go-v2/credentials/processcreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/ssocreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/credentials/stscreds from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config from github.com/aws/aws-sdk-go-v2/feature/ec2/imds - L github.com/aws/aws-sdk-go-v2/internal/auth from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ - L github.com/aws/aws-sdk-go-v2/internal/auth/smithy from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/configsources from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/context from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 from github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints+ - L github.com/aws/aws-sdk-go-v2/internal/ini from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/aws-sdk-go-v2/internal/middleware from github.com/aws/aws-sdk-go-v2/service/sso+ - L github.com/aws/aws-sdk-go-v2/internal/rand from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdk from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/aws-sdk-go-v2/internal/sdkio from github.com/aws/aws-sdk-go-v2/credentials/processcreds - L github.com/aws/aws-sdk-go-v2/internal/shareddefaults from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/internal/strings from github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 - L github.com/aws/aws-sdk-go-v2/internal/sync/singleflight from github.com/aws/aws-sdk-go-v2/aws - L github.com/aws/aws-sdk-go-v2/internal/timeconv from github.com/aws/aws-sdk-go-v2/aws/retry - L github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/internal/presigned-url from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/ssm from tailscale.com/ipn/store/awsstore - L github.com/aws/aws-sdk-go-v2/service/ssm/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm - L github.com/aws/aws-sdk-go-v2/service/ssm/types from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/aws-sdk-go-v2/service/sso from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/sso/types from github.com/aws/aws-sdk-go-v2/service/sso - L github.com/aws/aws-sdk-go-v2/service/ssooidc from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/ssooidc/types from github.com/aws/aws-sdk-go-v2/service/ssooidc - L github.com/aws/aws-sdk-go-v2/service/sts from github.com/aws/aws-sdk-go-v2/config+ - L github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/aws-sdk-go-v2/service/sts/types from github.com/aws/aws-sdk-go-v2/credentials/stscreds+ - L github.com/aws/smithy-go from github.com/aws/aws-sdk-go-v2/aws/protocol/restjson+ - L github.com/aws/smithy-go/auth from github.com/aws/aws-sdk-go-v2/internal/auth+ - L github.com/aws/smithy-go/auth/bearer from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/context from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/document from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding from github.com/aws/smithy-go/encoding/json+ - L github.com/aws/smithy-go/encoding/httpbinding from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ - L github.com/aws/smithy-go/encoding/json from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/encoding/xml from github.com/aws/aws-sdk-go-v2/service/sts - L github.com/aws/smithy-go/endpoints from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/internal/sync/singleflight from github.com/aws/smithy-go/auth/bearer - L github.com/aws/smithy-go/io from github.com/aws/aws-sdk-go-v2/feature/ec2/imds+ - L github.com/aws/smithy-go/logging from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/metrics from github.com/aws/aws-sdk-go-v2/aws/retry+ - L github.com/aws/smithy-go/middleware from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/private/requestcompression from github.com/aws/aws-sdk-go-v2/config - L github.com/aws/smithy-go/ptr from github.com/aws/aws-sdk-go-v2/aws+ - L github.com/aws/smithy-go/rand from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/time from github.com/aws/aws-sdk-go-v2/service/ssm+ - L github.com/aws/smithy-go/tracing from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http from github.com/aws/aws-sdk-go-v2/aws/middleware+ - L github.com/aws/smithy-go/transport/http/internal/io from github.com/aws/smithy-go/transport/http - L github.com/aws/smithy-go/waiter from github.com/aws/aws-sdk-go-v2/service/ssm LDW github.com/coder/websocket from tailscale.com/util/eventbus LDW github.com/coder/websocket/internal/errd from github.com/coder/websocket LDW github.com/coder/websocket/internal/util from github.com/coder/websocket LDW github.com/coder/websocket/internal/xsync from github.com/coder/websocket - L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw - W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ + W 💣 github.com/dblohm7/wingoes from tailscale.com/net/tshttpproxy+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ W 💣 github.com/dblohm7/wingoes/com/automation from tailscale.com/util/osdiag/internal/wsc W github.com/dblohm7/wingoes/internal from github.com/dblohm7/wingoes/com W 💣 github.com/dblohm7/wingoes/pe from tailscale.com/util/osdiag+ - LW 💣 github.com/digitalocean/go-smbios/smbios from tailscale.com/posture github.com/fxamacker/cbor/v2 from tailscale.com/tka github.com/gaissmai/bart from tailscale.com/net/ipset+ github.com/gaissmai/bart/internal/bitset from github.com/gaissmai/bart+ @@ -101,22 +24,11 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/go-json-experiment/json/internal/jsonopts from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/internal/jsonwire from github.com/go-json-experiment/json+ github.com/go-json-experiment/json/jsontext from github.com/go-json-experiment/json+ - W 💣 github.com/go-ole/go-ole from github.com/go-ole/go-ole/oleutil+ - W 💣 github.com/go-ole/go-ole/oleutil from tailscale.com/wgengine/winnet L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns github.com/golang/groupcache/lru from tailscale.com/net/dnscache github.com/google/btree from gvisor.dev/gvisor/pkg/tcpip/header+ - L github.com/google/nftables from tailscale.com/util/linuxfw - L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt - L 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+ - L github.com/google/nftables/expr from github.com/google/nftables+ - L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+ - L github.com/google/nftables/xt from github.com/google/nftables/expr+ - DWI github.com/google/uuid from github.com/prometheus-community/pro-bing+ - github.com/hdevalence/ed25519consensus from tailscale.com/clientupdate/distsign+ - L 💣 github.com/illarion/gonotify/v3 from tailscale.com/net/dns - L github.com/illarion/gonotify/v3/syscallf from github.com/illarion/gonotify/v3 - L github.com/jmespath/go-jmespath from github.com/aws/aws-sdk-go-v2/service/ssm + DI github.com/google/uuid from github.com/prometheus-community/pro-bing + github.com/hdevalence/ed25519consensus from tailscale.com/tka L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/netmon L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink github.com/klauspost/compress from github.com/klauspost/compress/zstd @@ -126,16 +38,12 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/klauspost/compress/internal/snapref from github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd - L github.com/mdlayher/genetlink from tailscale.com/net/tstun - L 💣 github.com/mdlayher/netlink from github.com/google/nftables+ + L 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+ L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+ - L github.com/mdlayher/netlink/nltest from github.com/google/nftables - L github.com/mdlayher/sdnotify from tailscale.com/util/systemd LA 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink+ - github.com/miekg/dns from tailscale.com/net/dns/recursive LDW 💣 github.com/mitchellh/go-ps from tailscale.com/safesocket DI github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack - L 💣 github.com/safchain/ethtool from tailscale.com/doctor/ethtool+ + L 💣 github.com/safchain/ethtool from tailscale.com/net/netkernelconf W 💣 github.com/tailscale/certstore from tailscale.com/control/controlclient W 💣 github.com/tailscale/go-winio from tailscale.com/safesocket W 💣 github.com/tailscale/go-winio/internal/fs from github.com/tailscale/go-winio @@ -149,9 +57,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/tailscale/goupnp/soap from github.com/tailscale/goupnp+ github.com/tailscale/goupnp/ssdp from github.com/tailscale/goupnp LDW github.com/tailscale/hujson from tailscale.com/ipn/conffile - L 💣 github.com/tailscale/netlink from tailscale.com/net/routetable+ - L 💣 github.com/tailscale/netlink/nl from github.com/tailscale/netlink - github.com/tailscale/peercred from tailscale.com/ipn/ipnauth + LDAI github.com/tailscale/peercred from tailscale.com/ipn/ipnauth LDW github.com/tailscale/web-client-prebuilt from tailscale.com/client/web 💣 github.com/tailscale/wireguard-go/conn from github.com/tailscale/wireguard-go/device+ W 💣 github.com/tailscale/wireguard-go/conn/winrio from github.com/tailscale/wireguard-go/conn @@ -163,7 +69,6 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) github.com/tailscale/wireguard-go/rwcancel from github.com/tailscale/wireguard-go/device+ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ - L github.com/vishvananda/netns from github.com/tailscale/netlink+ github.com/x448/float16 from github.com/fxamacker/cbor/v2 💣 go4.org/mem from tailscale.com/client/local+ go4.org/netipx from tailscale.com/ipn/ipnlocal+ @@ -214,32 +119,37 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/appc from tailscale.com/ipn/ipnlocal 💣 tailscale.com/atomicfile from tailscale.com/ipn+ tailscale.com/client/local from tailscale.com/client/web+ + tailscale.com/client/tailscale from tailscale.com/internal/client/tailscale tailscale.com/client/tailscale/apitype from tailscale.com/client/local+ LDW tailscale.com/client/web from tailscale.com/ipn/ipnlocal - tailscale.com/clientupdate from tailscale.com/client/web+ - LW tailscale.com/clientupdate/distsign from tailscale.com/clientupdate tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnext+ - tailscale.com/control/controlhttp from tailscale.com/control/controlclient + tailscale.com/control/controlhttp from tailscale.com/control/ts2021 tailscale.com/control/controlhttp/controlhttpcommon from tailscale.com/control/controlhttp tailscale.com/control/controlknobs from tailscale.com/control/controlclient+ + tailscale.com/control/ts2021 from tailscale.com/control/controlclient tailscale.com/derp from tailscale.com/derp/derphttp+ - tailscale.com/derp/derpconst from tailscale.com/derp+ + tailscale.com/derp/derpconst from tailscale.com/derp/derphttp+ tailscale.com/derp/derphttp from tailscale.com/ipn/localapi+ - tailscale.com/disco from tailscale.com/derp+ - tailscale.com/doctor from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/ethtool from tailscale.com/ipn/ipnlocal - 💣 tailscale.com/doctor/permissions from tailscale.com/ipn/ipnlocal - tailscale.com/doctor/routetable from tailscale.com/ipn/ipnlocal + tailscale.com/disco from tailscale.com/net/tstun+ tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ tailscale.com/envknob/featureknob from tailscale.com/client/web+ tailscale.com/feature from tailscale.com/ipn/ipnext+ + tailscale.com/feature/buildfeatures from tailscale.com/wgengine/magicsock+ + tailscale.com/feature/c2n from tailscale.com/tsnet + tailscale.com/feature/condlite/expvar from tailscale.com/wgengine/magicsock + tailscale.com/feature/condregister/oauthkey from tailscale.com/tsnet + tailscale.com/feature/condregister/portmapper from tailscale.com/tsnet + tailscale.com/feature/condregister/useproxy from tailscale.com/tsnet + tailscale.com/feature/oauthkey from tailscale.com/feature/condregister/oauthkey + tailscale.com/feature/portmapper from tailscale.com/feature/condregister/portmapper tailscale.com/feature/syspolicy from tailscale.com/logpolicy + tailscale.com/feature/useproxy from tailscale.com/feature/condregister/useproxy tailscale.com/health from tailscale.com/control/controlclient+ tailscale.com/health/healthmsg from tailscale.com/ipn/ipnlocal+ tailscale.com/hostinfo from tailscale.com/client/web+ - tailscale.com/internal/noiseconn from tailscale.com/control/controlclient + tailscale.com/internal/client/tailscale from tailscale.com/tsnet+ tailscale.com/ipn from tailscale.com/client/local+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnext+ @@ -247,34 +157,26 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/ipn/ipnlocal from tailscale.com/ipn/localapi+ tailscale.com/ipn/ipnstate from tailscale.com/client/local+ tailscale.com/ipn/localapi from tailscale.com/tsnet - tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal tailscale.com/ipn/store from tailscale.com/ipn/ipnlocal+ - L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store - L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ - L tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ - L tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore - tailscale.com/kube/kubetypes from tailscale.com/envknob+ + tailscale.com/kube/kubetypes from tailscale.com/envknob LDW tailscale.com/licenses from tailscale.com/client/web tailscale.com/log/filelogger from tailscale.com/logpolicy tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal tailscale.com/logpolicy from tailscale.com/ipn/ipnlocal+ tailscale.com/logtail from tailscale.com/control/controlclient+ - tailscale.com/logtail/backoff from tailscale.com/control/controlclient+ tailscale.com/logtail/filch from tailscale.com/log/sockstatlog+ - tailscale.com/metrics from tailscale.com/derp+ + tailscale.com/metrics from tailscale.com/tsweb+ tailscale.com/net/bakedroots from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/net/batching from tailscale.com/wgengine/magicsock tailscale.com/net/captivedetection from tailscale.com/ipn/ipnlocal+ - tailscale.com/net/connstats from tailscale.com/net/tstun+ tailscale.com/net/dns from tailscale.com/ipn/ipnlocal+ tailscale.com/net/dns/publicdns from tailscale.com/net/dns+ - tailscale.com/net/dns/recursive from tailscale.com/net/dnsfallback tailscale.com/net/dns/resolvconffile from tailscale.com/net/dns+ tailscale.com/net/dns/resolver from tailscale.com/net/dns+ tailscale.com/net/dnscache from tailscale.com/control/controlclient+ tailscale.com/net/dnsfallback from tailscale.com/control/controlclient+ - tailscale.com/net/flowtrack from tailscale.com/net/packet+ + tailscale.com/net/flowtrack from tailscale.com/wgengine+ tailscale.com/net/ipset from tailscale.com/ipn/ipnlocal+ tailscale.com/net/memnet from tailscale.com/tsnet tailscale.com/net/netaddr from tailscale.com/ipn+ @@ -284,57 +186,55 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/net/netknob from tailscale.com/logpolicy+ 💣 tailscale.com/net/netmon from tailscale.com/control/controlclient+ 💣 tailscale.com/net/netns from tailscale.com/derp/derphttp+ - W 💣 tailscale.com/net/netstat from tailscale.com/portlist tailscale.com/net/netutil from tailscale.com/client/local+ tailscale.com/net/netx from tailscale.com/control/controlclient+ tailscale.com/net/packet from tailscale.com/ipn/ipnlocal+ tailscale.com/net/packet/checksum from tailscale.com/net/tstun tailscale.com/net/ping from tailscale.com/net/netcheck+ - tailscale.com/net/portmapper from tailscale.com/ipn/localapi+ + tailscale.com/net/portmapper from tailscale.com/feature/portmapper + tailscale.com/net/portmapper/portmappertype from tailscale.com/net/netcheck+ tailscale.com/net/proxymux from tailscale.com/tsnet - tailscale.com/net/routetable from tailscale.com/doctor/routetable 💣 tailscale.com/net/sockopts from tailscale.com/wgengine/magicsock tailscale.com/net/socks5 from tailscale.com/tsnet tailscale.com/net/sockstats from tailscale.com/control/controlclient+ tailscale.com/net/stun from tailscale.com/ipn/localapi+ - L tailscale.com/net/tcpinfo from tailscale.com/derp tailscale.com/net/tlsdial from tailscale.com/control/controlclient+ tailscale.com/net/tlsdial/blockblame from tailscale.com/net/tlsdial tailscale.com/net/tsaddr from tailscale.com/client/web+ tailscale.com/net/tsdial from tailscale.com/control/controlclient+ - 💣 tailscale.com/net/tshttpproxy from tailscale.com/clientupdate/distsign+ + 💣 tailscale.com/net/tshttpproxy from tailscale.com/feature/useproxy tailscale.com/net/tstun from tailscale.com/tsd+ tailscale.com/net/udprelay/endpoint from tailscale.com/wgengine/magicsock + tailscale.com/net/udprelay/status from tailscale.com/client/local tailscale.com/omit from tailscale.com/ipn/conffile tailscale.com/paths from tailscale.com/client/local+ - 💣 tailscale.com/portlist from tailscale.com/ipn/ipnlocal - tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/local+ tailscale.com/syncs from tailscale.com/control/controlhttp+ tailscale.com/tailcfg from tailscale.com/client/local+ tailscale.com/tempfork/acme from tailscale.com/ipn/ipnlocal tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock - tailscale.com/tempfork/httprec from tailscale.com/control/controlclient + tailscale.com/tempfork/httprec from tailscale.com/feature/c2n tailscale.com/tka from tailscale.com/client/local+ tailscale.com/tsconst from tailscale.com/ipn/ipnlocal+ tailscale.com/tsd from tailscale.com/ipn/ipnext+ tailscale.com/tstime from tailscale.com/control/controlclient+ tailscale.com/tstime/mono from tailscale.com/net/tstun+ - tailscale.com/tstime/rate from tailscale.com/derp+ - tailscale.com/tsweb from tailscale.com/util/eventbus + tailscale.com/tstime/rate from tailscale.com/wgengine/filter + LDW tailscale.com/tsweb from tailscale.com/util/eventbus tailscale.com/tsweb/varz from tailscale.com/tsweb+ - tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/bools from tailscale.com/tsnet tailscale.com/types/dnstype from tailscale.com/client/local+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/ipproto from tailscale.com/ipn+ tailscale.com/types/key from tailscale.com/client/local+ - tailscale.com/types/lazy from tailscale.com/clientupdate+ + tailscale.com/types/lazy from tailscale.com/hostinfo+ tailscale.com/types/logger from tailscale.com/appc+ tailscale.com/types/logid from tailscale.com/ipn/ipnlocal+ tailscale.com/types/mapx from tailscale.com/ipn/ipnext - tailscale.com/types/netlogtype from tailscale.com/net/connstats+ + tailscale.com/types/netlogfunc from tailscale.com/net/tstun+ + tailscale.com/types/netlogtype from tailscale.com/wgengine/netlog tailscale.com/types/netmap from tailscale.com/control/controlclient+ tailscale.com/types/nettype from tailscale.com/ipn/localapi+ tailscale.com/types/opt from tailscale.com/control/controlknobs+ @@ -345,25 +245,25 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/types/structs from tailscale.com/control/controlclient+ tailscale.com/types/tkatype from tailscale.com/client/local+ tailscale.com/types/views from tailscale.com/appc+ + tailscale.com/util/backoff from tailscale.com/control/controlclient+ + tailscale.com/util/checkchange from tailscale.com/ipn/ipnlocal+ tailscale.com/util/cibuild from tailscale.com/health tailscale.com/util/clientmetric from tailscale.com/appc+ tailscale.com/util/cloudenv from tailscale.com/hostinfo+ - tailscale.com/util/cmpver from tailscale.com/clientupdate+ + LW tailscale.com/util/cmpver from tailscale.com/net/dns+ tailscale.com/util/ctxkey from tailscale.com/client/tailscale/apitype+ - 💣 tailscale.com/util/deephash from tailscale.com/ipn/ipnlocal+ - LA 💣 tailscale.com/util/dirwalk from tailscale.com/metrics+ + 💣 tailscale.com/util/deephash from tailscale.com/util/syspolicy/setting + LA 💣 tailscale.com/util/dirwalk from tailscale.com/metrics tailscale.com/util/dnsname from tailscale.com/appc+ - tailscale.com/util/eventbus from tailscale.com/ipn/localapi+ + tailscale.com/util/eventbus from tailscale.com/client/local+ tailscale.com/util/execqueue from tailscale.com/appc+ tailscale.com/util/goroutines from tailscale.com/ipn/ipnlocal tailscale.com/util/groupmember from tailscale.com/client/web+ 💣 tailscale.com/util/hashx from tailscale.com/util/deephash tailscale.com/util/httpm from tailscale.com/client/web+ tailscale.com/util/lineiter from tailscale.com/hostinfo+ - L tailscale.com/util/linuxfw from tailscale.com/net/netns+ tailscale.com/util/mak from tailscale.com/appc+ - tailscale.com/util/multierr from tailscale.com/control/controlclient+ - tailscale.com/util/must from tailscale.com/clientupdate/distsign+ + tailscale.com/util/must from tailscale.com/logpolicy+ tailscale.com/util/nocasemaps from tailscale.com/types/ipproto 💣 tailscale.com/util/osdiag from tailscale.com/ipn/localapi W 💣 tailscale.com/util/osdiag/internal/wsc from tailscale.com/util/osdiag @@ -385,13 +285,12 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/util/syspolicy/rsop from tailscale.com/ipn/localapi+ tailscale.com/util/syspolicy/setting from tailscale.com/client/local+ tailscale.com/util/syspolicy/source from tailscale.com/util/syspolicy+ - tailscale.com/util/systemd from tailscale.com/control/controlclient+ tailscale.com/util/testenv from tailscale.com/control/controlclient+ tailscale.com/util/truncate from tailscale.com/logtail tailscale.com/util/usermetric from tailscale.com/health+ tailscale.com/util/vizerror from tailscale.com/tailcfg+ - 💣 tailscale.com/util/winutil from tailscale.com/clientupdate+ - W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/clientupdate+ + 💣 tailscale.com/util/winutil from tailscale.com/hostinfo+ + W 💣 tailscale.com/util/winutil/authenticode from tailscale.com/util/osdiag W 💣 tailscale.com/util/winutil/gp from tailscale.com/net/dns+ W tailscale.com/util/winutil/policy from tailscale.com/ipn/ipnlocal W 💣 tailscale.com/util/winutil/winenv from tailscale.com/hostinfo+ @@ -410,15 +309,12 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) tailscale.com/wgengine/wgcfg/nmcfg from tailscale.com/ipn/ipnlocal 💣 tailscale.com/wgengine/wgint from tailscale.com/wgengine+ tailscale.com/wgengine/wglog from tailscale.com/wgengine - W 💣 tailscale.com/wgengine/winnet from tailscale.com/wgengine/router golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ LD golang.org/x/crypto/blowfish from golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/chacha20 from golang.org/x/crypto/chacha20poly1305+ - golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ - golang.org/x/crypto/cryptobyte from crypto/ecdsa+ - golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + golang.org/x/crypto/chacha20poly1305 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/curve25519 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/hkdf from tailscale.com/control/controlbase golang.org/x/crypto/internal/alias from golang.org/x/crypto/chacha20+ @@ -429,28 +325,27 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ LD golang.org/x/crypto/ssh from tailscale.com/ipn/ipnlocal LD golang.org/x/crypto/ssh/internal/bcrypt_pbkdf from golang.org/x/crypto/ssh - golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ + golang.org/x/exp/constraints from tailscale.com/tsweb/varz+ golang.org/x/exp/maps from tailscale.com/ipn/store/mem+ - golang.org/x/net/bpf from github.com/mdlayher/genetlink+ - golang.org/x/net/dns/dnsmessage from net+ - golang.org/x/net/http/httpguts from golang.org/x/net/http2+ - golang.org/x/net/http/httpproxy from net/http+ - golang.org/x/net/http2 from golang.org/x/net/http2/h2c+ - LDW golang.org/x/net/http2/h2c from tailscale.com/ipn/ipnlocal - golang.org/x/net/http2/hpack from golang.org/x/net/http2+ + golang.org/x/net/bpf from github.com/mdlayher/netlink+ + golang.org/x/net/dns/dnsmessage from tailscale.com/appc+ + golang.org/x/net/http/httpguts from tailscale.com/ipn/ipnlocal + golang.org/x/net/http/httpproxy from tailscale.com/net/tshttpproxy golang.org/x/net/icmp from github.com/prometheus-community/pro-bing+ golang.org/x/net/idna from golang.org/x/net/http/httpguts+ - golang.org/x/net/internal/httpcommon from golang.org/x/net/http2 golang.org/x/net/internal/iana from golang.org/x/net/icmp+ golang.org/x/net/internal/socket from golang.org/x/net/icmp+ LDW golang.org/x/net/internal/socks from golang.org/x/net/proxy - golang.org/x/net/ipv4 from github.com/miekg/dns+ - golang.org/x/net/ipv6 from github.com/miekg/dns+ + golang.org/x/net/ipv4 from github.com/prometheus-community/pro-bing+ + golang.org/x/net/ipv6 from github.com/prometheus-community/pro-bing+ LDW golang.org/x/net/proxy from tailscale.com/net/netns DI golang.org/x/net/route from tailscale.com/net/netmon+ + golang.org/x/oauth2 from golang.org/x/oauth2/clientcredentials + golang.org/x/oauth2/clientcredentials from tailscale.com/feature/oauthkey + golang.org/x/oauth2/internal from golang.org/x/oauth2+ golang.org/x/sync/errgroup from github.com/mdlayher/socket+ golang.org/x/sys/cpu from github.com/tailscale/certstore+ - LDAI golang.org/x/sys/unix from github.com/google/nftables+ + LDAI golang.org/x/sys/unix from github.com/jsimonetti/rtnetlink/internal/unix+ W golang.org/x/sys/windows from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/registry from github.com/dblohm7/wingoes+ W golang.org/x/sys/windows/svc from golang.org/x/sys/windows/svc/mgr+ @@ -461,12 +356,27 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) golang.org/x/text/unicode/bidi from golang.org/x/net/idna+ golang.org/x/text/unicode/norm from golang.org/x/net/idna golang.org/x/time/rate from gvisor.dev/gvisor/pkg/log+ - archive/tar from tailscale.com/clientupdate + vendor/golang.org/x/crypto/chacha20 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/crypto/chacha20poly1305 from crypto/internal/hpke+ + vendor/golang.org/x/crypto/cryptobyte from crypto/ecdsa+ + vendor/golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+ + vendor/golang.org/x/crypto/internal/alias from vendor/golang.org/x/crypto/chacha20+ + vendor/golang.org/x/crypto/internal/poly1305 from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/net/dns/dnsmessage from net + vendor/golang.org/x/net/http/httpguts from net/http+ + vendor/golang.org/x/net/http/httpproxy from net/http + vendor/golang.org/x/net/http2/hpack from net/http+ + vendor/golang.org/x/net/idna from net/http+ + vendor/golang.org/x/sys/cpu from vendor/golang.org/x/crypto/chacha20poly1305 + vendor/golang.org/x/text/secure/bidirule from vendor/golang.org/x/net/idna + vendor/golang.org/x/text/transform from vendor/golang.org/x/text/secure/bidirule+ + vendor/golang.org/x/text/unicode/bidi from vendor/golang.org/x/net/idna+ + vendor/golang.org/x/text/unicode/norm from vendor/golang.org/x/net/idna bufio from compress/flate+ - bytes from archive/tar+ + bytes from bufio+ cmp from encoding/json+ compress/flate from compress/gzip+ - compress/gzip from github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding+ + compress/gzip from internal/profile+ W compress/zlib from debug/pe container/heap from gvisor.dev/gvisor/pkg/tcpip/transport/tcp container/list from crypto/tls+ @@ -531,12 +441,12 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) crypto/sha3 from crypto/internal/fips140hash crypto/sha512 from crypto/ecdsa+ crypto/subtle from crypto/cipher+ - crypto/tls from github.com/aws/aws-sdk-go-v2/aws/transport/http+ + crypto/tls from github.com/prometheus-community/pro-bing+ crypto/tls/internal/fips140tls from crypto/tls crypto/x509 from crypto/tls+ DI crypto/x509/internal/macos from crypto/x509 crypto/x509/pkix from crypto/x509+ - DWI database/sql/driver from github.com/google/uuid + DI database/sql/driver from github.com/google/uuid W debug/dwarf from debug/pe W debug/pe from github.com/dblohm7/wingoes/pe embed from github.com/tailscale/web-client-prebuilt+ @@ -548,12 +458,12 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) encoding/hex from crypto/x509+ encoding/json from expvar+ encoding/pem from crypto/tls+ - encoding/xml from github.com/aws/aws-sdk-go-v2/aws/protocol/xml+ - errors from archive/tar+ - expvar from tailscale.com/derp+ + encoding/xml from github.com/tailscale/goupnp+ + errors from bufio+ + expvar from tailscale.com/health+ flag from tailscale.com/util/testenv - fmt from archive/tar+ - hash from compress/zlib+ + fmt from compress/flate+ + hash from crypto+ W hash/adler32 from compress/zlib hash/crc32 from compress/gzip+ hash/maphash from go4.org/mem @@ -570,7 +480,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) internal/filepathlite from os+ internal/fmtsort from fmt+ internal/goarch from crypto/internal/fips140deps/cpu+ - internal/godebug from archive/tar+ + internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ internal/goexperiment from hash/maphash+ internal/goos from crypto/x509+ @@ -579,7 +489,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ - internal/profile from net/http/pprof + LDW internal/profile from net/http/pprof internal/profilerecord from runtime+ internal/race from internal/poll+ internal/reflectlite from context+ @@ -593,7 +503,7 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) internal/runtime/strconv from internal/runtime/cgroup+ internal/runtime/sys from crypto/subtle+ LA internal/runtime/syscall from runtime+ - W internal/saferio from debug/pe + internal/saferio from debug/pe+ internal/singleflight from net internal/stringslite from embed+ internal/sync from sync+ @@ -606,14 +516,14 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) internal/testlog from os internal/trace/tracev2 from runtime+ internal/unsafeheader from internal/reflectlite+ - io from archive/tar+ - io/fs from archive/tar+ - io/ioutil from github.com/aws/aws-sdk-go-v2/aws/protocol/query+ + io from bufio+ + io/fs from crypto/x509+ + io/ioutil from github.com/godbus/dbus/v5+ iter from bytes+ log from expvar+ log/internal from log - maps from archive/tar+ - math from archive/tar+ + maps from crypto/x509+ + math from compress/flate+ math/big from crypto/dsa+ math/bits from bytes+ math/rand from github.com/fxamacker/cbor/v2+ @@ -623,39 +533,39 @@ tailscale.com/tsnet dependencies: (generated by github.com/tailscale/depaware) mime/quotedprintable from mime/multipart net from crypto/tls+ net/http from expvar+ - net/http/httptrace from github.com/aws/smithy-go/transport/http+ - net/http/httputil from github.com/aws/smithy-go/transport/http+ + net/http/httptrace from github.com/prometheus-community/pro-bing+ + net/http/httputil from tailscale.com/client/web+ net/http/internal from net/http+ net/http/internal/ascii from net/http+ net/http/internal/httpcommon from net/http - net/http/pprof from tailscale.com/ipn/localapi+ + LDW net/http/pprof from tailscale.com/ipn/localapi+ net/netip from crypto/x509+ - net/textproto from github.com/aws/aws-sdk-go-v2/aws/signer/v4+ + net/textproto from github.com/coder/websocket+ net/url from crypto/x509+ os from crypto/internal/sysrand+ - os/exec from github.com/aws/aws-sdk-go-v2/credentials/processcreds+ - os/user from archive/tar+ - path from archive/tar+ - path/filepath from archive/tar+ - reflect from archive/tar+ - regexp from github.com/aws/aws-sdk-go-v2/internal/endpoints+ + os/exec from github.com/godbus/dbus/v5+ + os/user from github.com/godbus/dbus/v5+ + path from debug/dwarf+ + path/filepath from crypto/x509+ + reflect from crypto/x509+ + regexp from github.com/tailscale/goupnp/httpu+ regexp/syntax from regexp - runtime from archive/tar+ - runtime/debug from github.com/aws/aws-sdk-go-v2/internal/sync/singleflight+ + runtime from crypto/internal/fips140+ + runtime/debug from github.com/coder/websocket/internal/xsync+ runtime/pprof from net/http/pprof+ - runtime/trace from net/http/pprof - slices from archive/tar+ + LDW runtime/trace from net/http/pprof + slices from crypto/tls+ sort from compress/flate+ - strconv from archive/tar+ - strings from archive/tar+ + strconv from compress/flate+ + strings from bufio+ W structs from internal/syscall/windows - sync from archive/tar+ + sync from compress/flate+ sync/atomic from context+ - syscall from archive/tar+ + syscall from crypto/internal/sysrand+ text/tabwriter from runtime/pprof LDW text/template from html/template LDW text/template/parse from html/template+ - time from archive/tar+ + time from compress/gzip+ unicode from bytes+ unicode/utf16 from crypto/x509+ unicode/utf8 from bufio+ diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 359fbc1c5246d..2944f63595a48 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -29,8 +29,13 @@ import ( "tailscale.com/client/local" "tailscale.com/control/controlclient" "tailscale.com/envknob" + _ "tailscale.com/feature/c2n" + _ "tailscale.com/feature/condregister/oauthkey" + _ "tailscale.com/feature/condregister/portmapper" + _ "tailscale.com/feature/condregister/useproxy" "tailscale.com/health" "tailscale.com/hostinfo" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnlocal" @@ -492,6 +497,16 @@ func (s *Server) TailscaleIPs() (ip4, ip6 netip.Addr) { return ip4, ip6 } +// LogtailWriter returns an [io.Writer] that writes to Tailscale's logging service and will be only visible to Tailscale's +// support team. Logs written there cannot be retrieved by the user. This method always returns a non-nil value. +func (s *Server) LogtailWriter() io.Writer { + if s.logtail == nil { + return io.Discard + } + + return s.logtail +} + func (s *Server) getAuthKey() string { if v := s.AuthKey; v != "" { return v @@ -567,7 +582,7 @@ func (s *Server) start() (reterr error) { sys := tsd.NewSystem() s.sys = sys - if err := s.startLogger(&closePool, sys.HealthTracker(), tsLogf); err != nil { + if err := s.startLogger(&closePool, sys.HealthTracker.Get(), tsLogf); err != nil { return err } @@ -578,6 +593,7 @@ func (s *Server) start() (reterr error) { closePool.add(s.netMon) s.dialer = &tsdial.Dialer{Logf: tsLogf} // mutated below (before used) + s.dialer.SetBus(sys.Bus.Get()) eng, err := wgengine.NewUserspaceEngine(tsLogf, wgengine.Config{ EventBus: sys.Bus.Get(), ListenPort: s.Port, @@ -585,7 +601,7 @@ func (s *Server) start() (reterr error) { Dialer: s.dialer, SetSubsystem: sys.Set, ControlKnobs: sys.ControlKnobs(), - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), }) if err != nil { @@ -593,7 +609,7 @@ func (s *Server) start() (reterr error) { } closePool.add(s.dialer) sys.Set(eng) - sys.HealthTracker().SetMetricsRegistry(sys.UserMetricsRegistry()) + sys.HealthTracker.Get().SetMetricsRegistry(sys.UserMetricsRegistry()) // TODO(oxtoacart): do we need to support Taildrive on tsnet, and if so, how? ns, err := netstack.Create(tsLogf, sys.Tun.Get(), eng, sys.MagicSock.Get(), s.dialer, sys.DNSManager.Get(), sys.ProxyMapper()) @@ -669,6 +685,14 @@ func (s *Server) start() (reterr error) { prefs.RunWebClient = s.RunWebClient prefs.AdvertiseTags = s.AdvertiseTags authKey := s.getAuthKey() + // Try to use an OAuth secret to generate an auth key if that functionality + // is available. + if f, ok := tailscale.HookResolveAuthKey.GetOk(); ok { + authKey, err = f(s.shutdownCtx, s.getAuthKey(), prefs.AdvertiseTags) + if err != nil { + return fmt.Errorf("resolving auth key: %w", err) + } + } err = lb.Start(ipn.Options{ UpdatePrefs: prefs, AuthKey: authKey, @@ -745,6 +769,7 @@ func (s *Server) startLogger(closePool *closeOnErrorPool, health *health.Tracker Stderr: io.Discard, // log everything to Buffer Buffer: s.logbuffer, CompressLogs: true, + Bus: s.sys.Bus.Get(), HTTPC: &http.Client{Transport: logpolicy.NewLogtailTransport(logtail.DefaultHost, s.netMon, health, tsLogf)}, MetricsDelta: clientmetric.EncodeLogTailMetricsDelta, } @@ -909,41 +934,6 @@ func (s *Server) getUDPHandlerForFlow(src, dst netip.AddrPort) (handler func(net return func(c nettype.ConnPacketConn) { ln.handle(c) }, true } -// I_Acknowledge_This_API_Is_Experimental must be set true to use AuthenticatedAPITransport() -// for now. -var I_Acknowledge_This_API_Is_Experimental = false - -// AuthenticatedAPITransport provides an HTTP transport that can be used with -// the control server API without needing additional authentication details. It -// authenticates using the current client's nodekey. -// -// It requires the user to set I_Acknowledge_This_API_Is_Experimental. -// -// For example: -// -// import "net/http" -// import "tailscale.com/client/tailscale/v2" -// import "tailscale.com/tsnet" -// -// var s *tsnet.Server -// ... -// rt, err := s.AuthenticatedAPITransport() -// // handler err ... -// var client tailscale.Client{HTTP: http.Client{ -// Timeout: 1*time.Minute, -// UserAgent: "your-useragent-here", -// Transport: rt, -// }} -func (s *Server) AuthenticatedAPITransport() (http.RoundTripper, error) { - if !I_Acknowledge_This_API_Is_Experimental { - return nil, errors.New("use of AuthenticatedAPITransport without setting I_Acknowledge_This_API_Is_Experimental") - } - if err := s.Start(); err != nil { - return nil, err - } - return s.lb.KeyProvingNoiseRoundTripper(), nil -} - // Listen announces only on the Tailscale network. // It will start the server if it has not been started yet. // diff --git a/tsnet/tsnet_test.go b/tsnet/tsnet_test.go index d00628453260f..1e22681fcfe36 100644 --- a/tsnet/tsnet_test.go +++ b/tsnet/tsnet_test.go @@ -43,6 +43,7 @@ import ( "tailscale.com/net/netns" "tailscale.com/tailcfg" "tailscale.com/tstest" + "tailscale.com/tstest/deptest" "tailscale.com/tstest/integration" "tailscale.com/tstest/integration/testcontrol" "tailscale.com/types/key" @@ -1302,3 +1303,15 @@ func mustDirect(t *testing.T, logf logger.Logf, lc1, lc2 *local.Client) { } t.Error("magicsock did not find a direct path from lc1 to lc2") } + +func TestDeps(t *testing.T) { + deptest.DepChecker{ + GOOS: "linux", + GOARCH: "amd64", + OnDep: func(dep string) { + if strings.Contains(dep, "portlist") { + t.Errorf("unexpected dep: %q", dep) + } + }, + }.Check(t) +} diff --git a/tstest/archtest/qemu_test.go b/tstest/archtest/qemu_test.go index 8b59ae5d9fee1..68ec38851069e 100644 --- a/tstest/archtest/qemu_test.go +++ b/tstest/archtest/qemu_test.go @@ -33,7 +33,6 @@ func TestInQemu(t *testing.T) { } inCI := cibuild.On() for _, arch := range arches { - arch := arch t.Run(arch.Goarch, func(t *testing.T) { t.Parallel() qemuUser := "qemu-" + arch.Qarch diff --git a/tstest/chonktest/chonktest.go b/tstest/chonktest/chonktest.go new file mode 100644 index 0000000000000..bfe394b28fcaf --- /dev/null +++ b/tstest/chonktest/chonktest.go @@ -0,0 +1,256 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package chonktest contains a shared set of tests for the Chonk +// interface used to store AUM messages in Tailnet Lock, which we can +// share between different implementations. +package chonktest + +import ( + "bytes" + "encoding/binary" + "math/rand" + "os" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "golang.org/x/crypto/blake2s" + "tailscale.com/tka" + "tailscale.com/util/must" +) + +// returns a random source based on the test name + extraSeed. +func testingRand(t *testing.T, extraSeed int64) *rand.Rand { + var seed int64 + if err := binary.Read(bytes.NewBuffer([]byte(t.Name())), binary.LittleEndian, &seed); err != nil { + panic(err) + } + return rand.New(rand.NewSource(seed + extraSeed)) +} + +// randHash derives a fake blake2s hash from the test name +// and the given seed. +func randHash(t *testing.T, seed int64) [blake2s.Size]byte { + var out [blake2s.Size]byte + testingRand(t, seed).Read(out[:]) + return out +} + +func hashesLess(x, y tka.AUMHash) bool { + return bytes.Compare(x[:], y[:]) < 0 +} + +func aumHashesLess(x, y tka.AUM) bool { + return hashesLess(x.Hash(), y.Hash()) +} + +// RunChonkTests is a set of tests for the behaviour of a Chonk. +// +// Any implementation of Chonk should pass these tests, so we know all +// Chonks behave in the same way. If you want to test behaviour that's +// specific to one implementation, write a separate test. +func RunChonkTests(t *testing.T, newChonk func(*testing.T) tka.Chonk) { + t.Run("ChildAUMs", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + parentHash := randHash(t, 1) + data := []tka.AUM{ + { + MessageKind: tka.AUMRemoveKey, + KeyID: []byte{1, 2}, + PrevAUMHash: parentHash[:], + }, + { + MessageKind: tka.AUMRemoveKey, + KeyID: []byte{3, 4}, + PrevAUMHash: parentHash[:], + }, + } + + if err := chonk.CommitVerifiedAUMs(data); err != nil { + t.Fatalf("CommitVerifiedAUMs failed: %v", err) + } + stored, err := chonk.ChildAUMs(parentHash) + if err != nil { + t.Fatalf("ChildAUMs failed: %v", err) + } + if diff := cmp.Diff(data, stored, cmpopts.SortSlices(aumHashesLess)); diff != "" { + t.Errorf("stored AUM differs (-want, +got):\n%s", diff) + } + }) + + t.Run("AUMMissing", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + var notExists tka.AUMHash + notExists[:][0] = 42 + if _, err := chonk.AUM(notExists); err != os.ErrNotExist { + t.Errorf("chonk.AUM(notExists).err = %v, want %v", err, os.ErrNotExist) + } + }) + + t.Run("ReadChainFromHead", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + genesis := tka.AUM{MessageKind: tka.AUMRemoveKey, KeyID: []byte{1, 2}} + gHash := genesis.Hash() + intermediate := tka.AUM{PrevAUMHash: gHash[:]} + iHash := intermediate.Hash() + leaf := tka.AUM{PrevAUMHash: iHash[:]} + + commitSet := []tka.AUM{ + genesis, + intermediate, + leaf, + } + if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { + t.Fatalf("CommitVerifiedAUMs failed: %v", err) + } + t.Logf("genesis hash = %X", genesis.Hash()) + t.Logf("intermediate hash = %X", intermediate.Hash()) + t.Logf("leaf hash = %X", leaf.Hash()) + + // Read the chain from the leaf backwards. + gotLeafs, err := chonk.Heads() + if err != nil { + t.Fatalf("Heads failed: %v", err) + } + if diff := cmp.Diff([]tka.AUM{leaf}, gotLeafs); diff != "" { + t.Fatalf("leaf AUM differs (-want, +got):\n%s", diff) + } + + parent, _ := gotLeafs[0].Parent() + gotIntermediate, err := chonk.AUM(parent) + if err != nil { + t.Fatalf("AUM() failed: %v", err) + } + if diff := cmp.Diff(intermediate, gotIntermediate); diff != "" { + t.Errorf("intermediate AUM differs (-want, +got):\n%s", diff) + } + + parent, _ = gotIntermediate.Parent() + gotGenesis, err := chonk.AUM(parent) + if err != nil { + t.Fatalf("AUM() failed: %v", err) + } + if diff := cmp.Diff(genesis, gotGenesis); diff != "" { + t.Errorf("genesis AUM differs (-want, +got):\n%s", diff) + } + }) + + t.Run("LastActiveAncestor", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + + aum := tka.AUM{MessageKind: tka.AUMRemoveKey, KeyID: []byte{1, 2}} + hash := aum.Hash() + + if err := chonk.SetLastActiveAncestor(hash); err != nil { + t.Fatal(err) + } + got, err := chonk.LastActiveAncestor() + if err != nil { + t.Fatal(err) + } + if got == nil || hash.String() != got.String() { + t.Errorf("LastActiveAncestor=%s, want %s", got, hash) + } + }) +} + +// RunCompactableChonkTests is a set of tests for the behaviour of a +// CompactableChonk. +// +// Any implementation of CompactableChonk should pass these tests, so we +// know all CompactableChonk behave in the same way. If you want to test +// behaviour that's specific to one implementation, write a separate test. +func RunCompactableChonkTests(t *testing.T, newChonk func(t *testing.T) tka.CompactableChonk) { + t.Run("PurgeAUMs", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + parentHash := randHash(t, 1) + aum := tka.AUM{MessageKind: tka.AUMNoOp, PrevAUMHash: parentHash[:]} + + if err := chonk.CommitVerifiedAUMs([]tka.AUM{aum}); err != nil { + t.Fatal(err) + } + if err := chonk.PurgeAUMs([]tka.AUMHash{aum.Hash()}); err != nil { + t.Fatal(err) + } + + if _, err := chonk.AUM(aum.Hash()); err != os.ErrNotExist { + t.Errorf("AUM() on purged AUM returned err = %v, want ErrNotExist", err) + } + }) + + t.Run("AllAUMs", func(t *testing.T) { + chonk := newChonk(t) + genesis := tka.AUM{MessageKind: tka.AUMRemoveKey, KeyID: []byte{1, 2}} + gHash := genesis.Hash() + intermediate := tka.AUM{PrevAUMHash: gHash[:]} + iHash := intermediate.Hash() + leaf := tka.AUM{PrevAUMHash: iHash[:]} + + commitSet := []tka.AUM{ + genesis, + intermediate, + leaf, + } + if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { + t.Fatalf("CommitVerifiedAUMs failed: %v", err) + } + + hashes, err := chonk.AllAUMs() + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff([]tka.AUMHash{genesis.Hash(), intermediate.Hash(), leaf.Hash()}, hashes, cmpopts.SortSlices(hashesLess)); diff != "" { + t.Fatalf("AllAUMs() output differs (-want, +got):\n%s", diff) + } + }) + + t.Run("ChildAUMsOfPurgedAUM", func(t *testing.T) { + t.Parallel() + chonk := newChonk(t) + parent := tka.AUM{MessageKind: tka.AUMRemoveKey, KeyID: []byte{0, 0}} + + parentHash := parent.Hash() + + child1 := tka.AUM{MessageKind: tka.AUMAddKey, KeyID: []byte{1, 1}, PrevAUMHash: parentHash[:]} + child2 := tka.AUM{MessageKind: tka.AUMAddKey, KeyID: []byte{2, 2}, PrevAUMHash: parentHash[:]} + child3 := tka.AUM{MessageKind: tka.AUMAddKey, KeyID: []byte{3, 3}, PrevAUMHash: parentHash[:]} + + child2Hash := child2.Hash() + grandchild2A := tka.AUM{MessageKind: tka.AUMAddKey, KeyID: []byte{2, 2, 2, 2}, PrevAUMHash: child2Hash[:]} + grandchild2B := tka.AUM{MessageKind: tka.AUMAddKey, KeyID: []byte{2, 2, 2, 2, 2}, PrevAUMHash: child2Hash[:]} + + commitSet := []tka.AUM{parent, child1, child2, child3, grandchild2A, grandchild2B} + + if err := chonk.CommitVerifiedAUMs(commitSet); err != nil { + t.Fatalf("CommitVerifiedAUMs failed: %v", err) + } + + // Check the set of hashes is correct + childHashes := must.Get(chonk.ChildAUMs(parentHash)) + if diff := cmp.Diff([]tka.AUM{child1, child2, child3}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { + t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) + } + + // Purge the parent AUM, and check the set of child AUMs is unchanged + chonk.PurgeAUMs([]tka.AUMHash{parent.Hash()}) + + childHashes = must.Get(chonk.ChildAUMs(parentHash)) + if diff := cmp.Diff([]tka.AUM{child1, child2, child3}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { + t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) + } + + // Now purge one of the child AUMs, and check it no longer appears as a child of the parent + chonk.PurgeAUMs([]tka.AUMHash{child3.Hash()}) + + childHashes = must.Get(chonk.ChildAUMs(parentHash)) + if diff := cmp.Diff([]tka.AUM{child1, child2}, childHashes, cmpopts.SortSlices(aumHashesLess)); diff != "" { + t.Fatalf("ChildAUMs() output differs (-want, +got):\n%s", diff) + } + }) +} diff --git a/tstest/chonktest/tailchonk_test.go b/tstest/chonktest/tailchonk_test.go new file mode 100644 index 0000000000000..ce6b043248de1 --- /dev/null +++ b/tstest/chonktest/tailchonk_test.go @@ -0,0 +1,53 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package chonktest + +import ( + "testing" + + "tailscale.com/tka" + "tailscale.com/util/must" +) + +func TestImplementsChonk(t *testing.T) { + for _, tt := range []struct { + name string + newChonk func(t *testing.T) tka.Chonk + }{ + { + name: "Mem", + newChonk: func(t *testing.T) tka.Chonk { + return &tka.Mem{} + }, + }, + { + name: "FS", + newChonk: func(t *testing.T) tka.Chonk { + return must.Get(tka.ChonkDir(t.TempDir())) + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + RunChonkTests(t, tt.newChonk) + }) + } +} + +func TestImplementsCompactableChonk(t *testing.T) { + for _, tt := range []struct { + name string + newChonk func(t *testing.T) tka.CompactableChonk + }{ + { + name: "FS", + newChonk: func(t *testing.T) tka.CompactableChonk { + return must.Get(tka.ChonkDir(t.TempDir())) + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + RunCompactableChonkTests(t, tt.newChonk) + }) + } +} diff --git a/tstest/clock_test.go b/tstest/clock_test.go index d5816564a07f1..2ebaf752a1963 100644 --- a/tstest/clock_test.go +++ b/tstest/clock_test.go @@ -56,7 +56,6 @@ func TestClockWithDefinedStartTime(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clock := NewClock(ClockOpts{ @@ -118,7 +117,6 @@ func TestClockWithDefaultStartTime(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clock := NewClock(ClockOpts{ @@ -277,7 +275,6 @@ func TestClockSetStep(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clock := NewClock(ClockOpts{ @@ -426,7 +423,6 @@ func TestClockAdvance(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clock := NewClock(ClockOpts{ @@ -876,7 +872,6 @@ func TestSingleTicker(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() var realTimeClockForTestClock tstime.Clock @@ -1377,7 +1372,6 @@ func TestSingleTimer(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() var realTimeClockForTestClock tstime.Clock @@ -1911,7 +1905,6 @@ func TestClockFollowRealTime(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() realTimeClock := NewClock(tt.realTimeClockOpts) @@ -2364,7 +2357,6 @@ func TestAfterFunc(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() var realTimeClockForTestClock tstime.Clock @@ -2468,7 +2460,6 @@ func TestSince(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() clock := NewClock(ClockOpts{ diff --git a/tstest/integration/integration.go b/tstest/integration/integration.go index 987bb569a4f66..6700205cf8f55 100644 --- a/tstest/integration/integration.go +++ b/tstest/integration/integration.go @@ -34,8 +34,7 @@ import ( "go4.org/mem" "tailscale.com/client/local" - "tailscale.com/derp" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/ipn" "tailscale.com/ipn/ipnlocal" "tailscale.com/ipn/ipnstate" @@ -297,14 +296,14 @@ func exe() string { func RunDERPAndSTUN(t testing.TB, logf logger.Logf, ipAddress string) (derpMap *tailcfg.DERPMap) { t.Helper() - d := derp.NewServer(key.NewNode(), logf) + d := derpserver.New(key.NewNode(), logf) ln, err := net.Listen("tcp", net.JoinHostPort(ipAddress, "0")) if err != nil { t.Fatal(err) } - httpsrv := httptest.NewUnstartedServer(derphttp.Handler(d)) + httpsrv := httptest.NewUnstartedServer(derpserver.Handler(d)) httpsrv.Listener.Close() httpsrv.Listener = ln httpsrv.Config.ErrorLog = logger.StdLogger(logf) @@ -480,11 +479,13 @@ func (lc *LogCatcher) ServeHTTP(w http.ResponseWriter, r *http.Request) { // TestEnv contains the test environment (set of servers) used by one // or more nodes. type TestEnv struct { - t testing.TB - tunMode bool - cli string - daemon string - loopbackPort *int + t testing.TB + tunMode bool + cli string + daemon string + loopbackPort *int + neverDirectUDP bool + relayServerUseLoopback bool LogCatcher *LogCatcher LogCatcherServer *httptest.Server @@ -842,6 +843,12 @@ func (n *TestNode) StartDaemonAsIPNGOOS(ipnGOOS string) *Daemon { if n.env.loopbackPort != nil { cmd.Env = append(cmd.Env, "TS_DEBUG_NETSTACK_LOOPBACK_PORT="+strconv.Itoa(*n.env.loopbackPort)) } + if n.env.neverDirectUDP { + cmd.Env = append(cmd.Env, "TS_DEBUG_NEVER_DIRECT_UDP=1") + } + if n.env.relayServerUseLoopback { + cmd.Env = append(cmd.Env, "TS_DEBUG_RELAY_SERVER_ADDRS=::1,127.0.0.1") + } if version.IsRace() { cmd.Env = append(cmd.Env, "GORACE=halt_on_error=1") } @@ -1091,21 +1098,44 @@ func (tt *trafficTrap) ServeHTTP(w http.ResponseWriter, r *http.Request) { } type authURLParserWriter struct { + t *testing.T buf bytes.Buffer - fn func(urlStr string) error + // Handle login URLs, and count how many times they were seen + authURLFn func(urlStr string) error + // Handle machine approval URLs, and count how many times they were seen. + deviceApprovalURLFn func(urlStr string) error } +// Note: auth URLs from testcontrol look slightly different to real auth URLs, +// e.g. http://127.0.0.1:60456/auth/96af2ff7e04ae1499a9a var authURLRx = regexp.MustCompile(`(https?://\S+/auth/\S+)`) +// Looks for any device approval URL, which is any URL ending with `/admin` +// e.g. http://127.0.0.1:60456/admin +var deviceApprovalURLRx = regexp.MustCompile(`(https?://\S+/admin)[^\S]`) + func (w *authURLParserWriter) Write(p []byte) (n int, err error) { + w.t.Helper() + w.t.Logf("received bytes: %s", string(p)) n, err = w.buf.Write(p) + + defer w.buf.Reset() // so it's not matched again + m := authURLRx.FindSubmatch(w.buf.Bytes()) if m != nil { urlStr := string(m[1]) - w.buf.Reset() // so it's not matched again - if err := w.fn(urlStr); err != nil { + if err := w.authURLFn(urlStr); err != nil { return 0, err } } + + m = deviceApprovalURLRx.FindSubmatch(w.buf.Bytes()) + if m != nil && w.deviceApprovalURLFn != nil { + urlStr := string(m[1]) + if err := w.deviceApprovalURLFn(urlStr); err != nil { + return 0, err + } + } + return n, err } diff --git a/tstest/integration/integration_test.go b/tstest/integration/integration_test.go index de464108c44dd..234bb8c6ec11a 100644 --- a/tstest/integration/integration_test.go +++ b/tstest/integration/integration_test.go @@ -23,27 +23,33 @@ import ( "regexp" "runtime" "strconv" + "strings" "sync/atomic" "testing" "time" + "github.com/google/go-cmp/cmp" "github.com/miekg/dns" "go4.org/mem" "tailscale.com/client/local" "tailscale.com/client/tailscale" - "tailscale.com/clientupdate" "tailscale.com/cmd/testwrapper/flakytest" + "tailscale.com/feature" + _ "tailscale.com/feature/clientupdate" "tailscale.com/hostinfo" "tailscale.com/ipn" "tailscale.com/net/tsaddr" "tailscale.com/net/tstun" + "tailscale.com/net/udprelay/status" "tailscale.com/tailcfg" "tailscale.com/tstest" "tailscale.com/tstest/integration/testcontrol" "tailscale.com/types/key" + "tailscale.com/types/netmap" "tailscale.com/types/opt" "tailscale.com/types/ptr" "tailscale.com/util/must" + "tailscale.com/util/set" ) func TestMain(m *testing.M) { @@ -262,53 +268,426 @@ func TestStateSavedOnStart(t *testing.T) { d1.MustCleanShutdown(t) } +// This handler receives auth URLs, and logs into control. +// +// It counts how many URLs it sees, and will fail the test if it +// sees multiple login URLs. +func completeLogin(t *testing.T, control *testcontrol.Server, counter *atomic.Int32) func(string) error { + return func(urlStr string) error { + t.Logf("saw auth URL %q", urlStr) + if control.CompleteAuth(urlStr) { + if counter.Add(1) > 1 { + err := errors.New("completed multiple auth URLs") + t.Error(err) + return err + } + t.Logf("completed login to %s", urlStr) + return nil + } else { + err := fmt.Errorf("failed to complete initial login to %q", urlStr) + t.Fatal(err) + return err + } + } +} + +// This handler receives device approval URLs, and approves the device. +// +// It counts how many URLs it sees, and will fail the test if it +// sees multiple device approval URLs, or if you try to approve a device +// with the wrong control server. +func completeDeviceApproval(t *testing.T, node *TestNode, counter *atomic.Int32) func(string) error { + return func(urlStr string) error { + control := node.env.Control + nodeKey := node.MustStatus().Self.PublicKey + t.Logf("saw device approval URL %q", urlStr) + if control.CompleteDeviceApproval(node.env.ControlURL(), urlStr, &nodeKey) { + if counter.Add(1) > 1 { + err := errors.New("completed multiple device approval URLs") + t.Error(err) + return err + } + t.Log("completed device approval") + return nil + } else { + err := errors.New("failed to complete device approval") + t.Fatal(err) + return err + } + } +} + func TestOneNodeUpAuth(t *testing.T) { + type step struct { + args []string + // + // Do we expect to log in again with a new /auth/ URL? + wantAuthURL bool + // + // Do we expect to need a device approval URL? + wantDeviceApprovalURL bool + } + + for _, tt := range []struct { + name string + args []string + // + // What auth key should we use for control? + authKey string + // + // Do we require device approval in the tailnet? + requireDeviceApproval bool + // + // What CLI commands should we run in this test? + steps []step + }{ + { + name: "up", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true}, + }, + }, + { + name: "up-with-machine-auth", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true, wantDeviceApprovalURL: true}, + }, + requireDeviceApproval: true, + }, + { + name: "up-with-force-reauth", + steps: []step{ + {args: []string{"up", "--force-reauth"}, wantAuthURL: true}, + }, + }, + { + name: "up-with-auth-key", + authKey: "opensesame", + steps: []step{ + {args: []string{"up", "--auth-key=opensesame"}}, + }, + }, + { + name: "up-with-auth-key-with-machine-auth", + authKey: "opensesame", + steps: []step{ + { + args: []string{"up", "--auth-key=opensesame"}, + wantAuthURL: false, + wantDeviceApprovalURL: true, + }, + }, + requireDeviceApproval: true, + }, + { + name: "up-with-force-reauth-and-auth-key", + authKey: "opensesame", + steps: []step{ + {args: []string{"up", "--force-reauth", "--auth-key=opensesame"}}, + }, + }, + { + name: "up-after-login", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true}, + {args: []string{"up"}, wantAuthURL: false}, + }, + }, + { + name: "up-after-login-with-machine-approval", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true, wantDeviceApprovalURL: true}, + {args: []string{"up"}, wantAuthURL: false, wantDeviceApprovalURL: false}, + }, + requireDeviceApproval: true, + }, + { + name: "up-with-force-reauth-after-login", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true}, + {args: []string{"up", "--force-reauth"}, wantAuthURL: true}, + }, + }, + { + name: "up-with-force-reauth-after-login-with-machine-approval", + steps: []step{ + {args: []string{"up"}, wantAuthURL: true, wantDeviceApprovalURL: true}, + {args: []string{"up", "--force-reauth"}, wantAuthURL: true, wantDeviceApprovalURL: false}, + }, + requireDeviceApproval: true, + }, + { + name: "up-with-auth-key-after-login", + authKey: "opensesame", + steps: []step{ + {args: []string{"up", "--auth-key=opensesame"}}, + {args: []string{"up", "--auth-key=opensesame"}}, + }, + }, + { + name: "up-with-force-reauth-and-auth-key-after-login", + authKey: "opensesame", + steps: []step{ + {args: []string{"up", "--auth-key=opensesame"}}, + {args: []string{"up", "--force-reauth", "--auth-key=opensesame"}}, + }, + }, + } { + tstest.Shard(t) + + for _, useSeamlessKeyRenewal := range []bool{true, false} { + name := tt.name + if useSeamlessKeyRenewal { + name += "-with-seamless" + } + t.Run(name, func(t *testing.T) { + tstest.Parallel(t) + + env := NewTestEnv(t, ConfigureControl( + func(control *testcontrol.Server) { + if tt.authKey != "" { + control.RequireAuthKey = tt.authKey + } else { + control.RequireAuth = true + } + + if tt.requireDeviceApproval { + control.RequireMachineAuth = true + } + + control.AllNodesSameUser = true + + if useSeamlessKeyRenewal { + control.DefaultNodeCapabilities = &tailcfg.NodeCapMap{ + tailcfg.NodeAttrSeamlessKeyRenewal: []tailcfg.RawMessage{}, + } + } + }, + )) + + n1 := NewTestNode(t, env) + d1 := n1.StartDaemon() + defer d1.MustCleanShutdown(t) + + for i, step := range tt.steps { + t.Logf("Running step %d", i) + cmdArgs := append(step.args, "--login-server="+env.ControlURL()) + + t.Logf("Running command: %s", strings.Join(cmdArgs, " ")) + + var authURLCount atomic.Int32 + var deviceApprovalURLCount atomic.Int32 + + handler := &authURLParserWriter{t: t, + authURLFn: completeLogin(t, env.Control, &authURLCount), + deviceApprovalURLFn: completeDeviceApproval(t, n1, &deviceApprovalURLCount), + } + + cmd := n1.Tailscale(cmdArgs...) + cmd.Stdout = handler + cmd.Stdout = handler + cmd.Stderr = cmd.Stdout + if err := cmd.Run(); err != nil { + t.Fatalf("up: %v", err) + } + + n1.AwaitRunning() + + var wantAuthURLCount int32 + if step.wantAuthURL { + wantAuthURLCount = 1 + } + if n := authURLCount.Load(); n != wantAuthURLCount { + t.Errorf("Auth URLs completed = %d; want %d", n, wantAuthURLCount) + } + + var wantDeviceApprovalURLCount int32 + if step.wantDeviceApprovalURL { + wantDeviceApprovalURLCount = 1 + } + if n := deviceApprovalURLCount.Load(); n != wantDeviceApprovalURLCount { + t.Errorf("Device approval URLs completed = %d; want %d", n, wantDeviceApprovalURLCount) + } + } + }) + } + } +} + +// Returns true if the error returned by [exec.Run] fails with a non-zero +// exit code, false otherwise. +func isNonZeroExitCode(err error) bool { + if err == nil { + return false + } + + exitError, ok := err.(*exec.ExitError) + if !ok { + return false + } + + return exitError.ExitCode() != 0 +} + +// If we interrupt `tailscale up` and then run it again, we should only +// print a single auth URL. +func TestOneNodeUpInterruptedAuth(t *testing.T) { tstest.Shard(t) tstest.Parallel(t) - env := NewTestEnv(t, ConfigureControl(func(control *testcontrol.Server) { - control.RequireAuth = true - })) - n1 := NewTestNode(t, env) - d1 := n1.StartDaemon() - - n1.AwaitListening() + env := NewTestEnv(t, ConfigureControl( + func(control *testcontrol.Server) { + control.RequireAuth = true + control.AllNodesSameUser = true + }, + )) - st := n1.MustStatus() - t.Logf("Status: %s", st.BackendState) + n := NewTestNode(t, env) + d := n.StartDaemon() + defer d.MustCleanShutdown(t) - t.Logf("Running up --login-server=%s ...", env.ControlURL()) + cmdArgs := []string{"up", "--login-server=" + env.ControlURL()} - cmd := n1.Tailscale("up", "--login-server="+env.ControlURL()) - var authCountAtomic atomic.Int32 - cmd.Stdout = &authURLParserWriter{fn: func(urlStr string) error { + // The first time we run the command, we wait for an auth URL to be + // printed, and then we cancel the command -- equivalent to ^C. + // + // At this point, we've connected to control to get an auth URL, + // and printed it in the CLI, but not clicked it. + t.Logf("Running command for the first time: %s", strings.Join(cmdArgs, " ")) + cmd1 := n.Tailscale(cmdArgs...) + + // This handler watches for auth URLs in stdout, then cancels the + // running `tailscale up` CLI command. + cmd1.Stdout = &authURLParserWriter{t: t, authURLFn: func(urlStr string) error { t.Logf("saw auth URL %q", urlStr) - if env.Control.CompleteAuth(urlStr) { - if authCountAtomic.Add(1) > 1 { - err := errors.New("completed multple auth URLs") - t.Error(err) - return err - } - t.Logf("completed auth path %s", urlStr) - return nil - } - err := fmt.Errorf("Failed to complete auth path to %q", urlStr) - t.Error(err) - return err + cmd1.Process.Kill() + return nil }} - cmd.Stderr = cmd.Stdout - if err := cmd.Run(); err != nil { + cmd1.Stderr = cmd1.Stdout + + if err := cmd1.Run(); !isNonZeroExitCode(err) { + t.Fatalf("Command did not fail with non-zero exit code: %q", err) + } + + // Because we didn't click the auth URL, we should still be in NeedsLogin. + n.AwaitBackendState("NeedsLogin") + + // The second time we run the command, we click the first auth URL we see + // and check that we log in correctly. + // + // In #17361, there was a bug where we'd print two auth URLs, and you could + // click either auth URL and log in to control, but logging in through the + // first URL would leave `tailscale up` hanging. + // + // Using `authURLHandler` ensures we only print the new, correct auth URL. + // + // If we print both URLs, it will throw an error because it only expects + // to log in with one auth URL. + // + // If we only print the stale auth URL, the test will timeout because + // `tailscale up` will never return. + t.Logf("Running command for the second time: %s", strings.Join(cmdArgs, " ")) + + var authURLCount atomic.Int32 + + cmd2 := n.Tailscale(cmdArgs...) + cmd2.Stdout = &authURLParserWriter{ + t: t, authURLFn: completeLogin(t, env.Control, &authURLCount), + } + cmd2.Stderr = cmd2.Stdout + + if err := cmd2.Run(); err != nil { t.Fatalf("up: %v", err) } - t.Logf("Got IP: %v", n1.AwaitIP4()) - n1.AwaitRunning() + if urls := authURLCount.Load(); urls != 1 { + t.Errorf("Auth URLs completed = %d; want %d", urls, 1) + } - if n := authCountAtomic.Load(); n != 1 { - t.Errorf("Auth URLs completed = %d; want 1", n) + n.AwaitRunning() +} + +// If we interrupt `tailscale up` and login successfully, but don't +// complete the device approval, we should see the device approval URL +// when we run `tailscale up` a second time. +func TestOneNodeUpInterruptedDeviceApproval(t *testing.T) { + tstest.Shard(t) + tstest.Parallel(t) + + env := NewTestEnv(t, ConfigureControl( + func(control *testcontrol.Server) { + control.RequireAuth = true + control.RequireMachineAuth = true + control.AllNodesSameUser = true + }, + )) + + n := NewTestNode(t, env) + d := n.StartDaemon() + defer d.MustCleanShutdown(t) + + // The first time we run the command, we: + // + // * set a custom login URL + // * wait for an auth URL to be printed + // * click it to complete the login process + // * wait for a device approval URL to be printed + // * cancel the command, equivalent to ^C + // + // At this point, we've logged in to control, but our node isn't + // approved to connect to the tailnet. + cmd1Args := []string{"up", "--login-server=" + env.ControlURL()} + t.Logf("Running command: %s", strings.Join(cmd1Args, " ")) + cmd1 := n.Tailscale(cmd1Args...) + + handler1 := &authURLParserWriter{t: t, + authURLFn: completeLogin(t, env.Control, &atomic.Int32{}), + deviceApprovalURLFn: func(urlStr string) error { + t.Logf("saw device approval URL %q", urlStr) + cmd1.Process.Kill() + return nil + }, } + cmd1.Stdout = handler1 + cmd1.Stderr = cmd1.Stdout - d1.MustCleanShutdown(t) + if err := cmd1.Run(); !isNonZeroExitCode(err) { + t.Fatalf("Command did not fail with non-zero exit code: %q", err) + } + + // Because we logged in but we didn't complete the device approval, we + // should be in state NeedsMachineAuth. + n.AwaitBackendState("NeedsMachineAuth") + + // The second time we run the command, we expect not to get an auth URL + // and go straight to the device approval URL. We don't need to pass the + // login server, because `tailscale up` should remember our control URL. + cmd2Args := []string{"up"} + t.Logf("Running command: %s", strings.Join(cmd2Args, " ")) + + var deviceApprovalURLCount atomic.Int32 + + cmd2 := n.Tailscale(cmd2Args...) + cmd2.Stdout = &authURLParserWriter{t: t, + authURLFn: func(urlStr string) error { + t.Fatalf("got unexpected auth URL: %q", urlStr) + cmd2.Process.Kill() + return nil + }, + deviceApprovalURLFn: completeDeviceApproval(t, n, &deviceApprovalURLCount), + } + cmd2.Stderr = cmd2.Stdout + + if err := cmd2.Run(); err != nil { + t.Fatalf("up: %v", err) + } + + wantDeviceApprovalURLCount := int32(1) + if n := deviceApprovalURLCount.Load(); n != wantDeviceApprovalURLCount { + t.Errorf("Device approval URLs completed = %d; want %d", n, wantDeviceApprovalURLCount) + } + + n.AwaitRunning() } func TestConfigFileAuthKey(t *testing.T) { @@ -595,22 +974,6 @@ func TestC2NPingRequest(t *testing.T) { env := NewTestEnv(t) - gotPing := make(chan bool, 1) - env.Control.HandleC2N = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method != "POST" { - t.Errorf("unexpected ping method %q", r.Method) - } - got, err := io.ReadAll(r.Body) - if err != nil { - t.Errorf("ping body read error: %v", err) - } - const want = "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Type: text/plain; charset=utf-8\r\n\r\nabc" - if string(got) != want { - t.Errorf("body error\n got: %q\nwant: %q", got, want) - } - gotPing <- true - }) - n1 := NewTestNode(t, env) n1.StartDaemon() @@ -634,27 +997,33 @@ func TestC2NPingRequest(t *testing.T) { } cancel() - pr := &tailcfg.PingRequest{ - URL: fmt.Sprintf("https://unused/some-c2n-path/ping-%d", try), - Log: true, - Types: "c2n", - Payload: []byte("POST /echo HTTP/1.0\r\nContent-Length: 3\r\n\r\nabc"), + ctx, cancel = context.WithTimeout(t.Context(), 2*time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, "POST", "/echo", bytes.NewReader([]byte("abc"))) + if err != nil { + t.Errorf("failed to create request: %v", err) + continue } - if !env.Control.AddPingRequest(nodeKey, pr) { - t.Logf("failed to AddPingRequest") + r, err := env.Control.NodeRoundTripper(nodeKey).RoundTrip(req) + if err != nil { + t.Errorf("RoundTrip failed: %v", err) continue } - - // Wait for PingRequest to come back - pingTimeout := time.NewTimer(2 * time.Second) - defer pingTimeout.Stop() - select { - case <-gotPing: - t.Logf("got ping; success") - return - case <-pingTimeout.C: - // Try again. + if r.StatusCode != 200 { + t.Errorf("unexpected status code: %d", r.StatusCode) + continue + } + b, err := io.ReadAll(r.Body) + if err != nil { + t.Errorf("error reading body: %v", err) + continue } + if string(b) != "abc" { + t.Errorf("body = %q; want %q", b, "abc") + continue + } + return } t.Error("all ping attempts failed") } @@ -721,6 +1090,7 @@ func TestOneNodeUpWindowsStyle(t *testing.T) { // jailed node cannot initiate connections to the other node however the other // node can initiate connections to the jailed node. func TestClientSideJailing(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/17419") tstest.Shard(t) tstest.Parallel(t) env := NewTestEnv(t) @@ -1019,7 +1389,7 @@ func TestLogoutRemovesAllPeers(t *testing.T) { } func TestAutoUpdateDefaults(t *testing.T) { - if !clientupdate.CanAutoUpdate() { + if !feature.CanAutoUpdate() { t.Skip("auto-updates not supported on this platform") } tstest.Shard(t) @@ -1530,3 +1900,333 @@ func TestEncryptStateMigration(t *testing.T) { runNode(t, wantPlaintextStateKeys) }) } + +// TestPeerRelayPing creates three nodes with one acting as a peer relay. +// The test succeeds when "tailscale ping" flows through the peer +// relay between all 3 nodes, and "tailscale debug peer-relay-sessions" returns +// expected values. +func TestPeerRelayPing(t *testing.T) { + flakytest.Mark(t, "https://github.com/tailscale/tailscale/issues/17251") + tstest.Shard(t) + tstest.Parallel(t) + + env := NewTestEnv(t, ConfigureControl(func(server *testcontrol.Server) { + server.PeerRelayGrants = true + })) + env.neverDirectUDP = true + env.relayServerUseLoopback = true + + n1 := NewTestNode(t, env) + n2 := NewTestNode(t, env) + peerRelay := NewTestNode(t, env) + + allNodes := []*TestNode{n1, n2, peerRelay} + wantPeerRelayServers := make(set.Set[string]) + for _, n := range allNodes { + n.StartDaemon() + n.AwaitResponding() + n.MustUp() + wantPeerRelayServers.Add(n.AwaitIP4().String()) + n.AwaitRunning() + } + + if err := peerRelay.Tailscale("set", "--relay-server-port=0").Run(); err != nil { + t.Fatal(err) + } + + errCh := make(chan error) + for _, a := range allNodes { + go func() { + err := tstest.WaitFor(time.Second*5, func() error { + out, err := a.Tailscale("debug", "peer-relay-servers").CombinedOutput() + if err != nil { + return fmt.Errorf("debug peer-relay-servers failed: %v", err) + } + servers := make([]string, 0) + err = json.Unmarshal(out, &servers) + if err != nil { + return fmt.Errorf("failed to unmarshal debug peer-relay-servers: %v", err) + } + gotPeerRelayServers := make(set.Set[string]) + for _, server := range servers { + gotPeerRelayServers.Add(server) + } + if !gotPeerRelayServers.Equal(wantPeerRelayServers) { + return fmt.Errorf("got peer relay servers: %v want: %v", gotPeerRelayServers, wantPeerRelayServers) + } + return nil + }) + errCh <- err + }() + } + for range allNodes { + err := <-errCh + if err != nil { + t.Fatal(err) + } + } + + pingPairs := make([][2]*TestNode, 0) + for _, a := range allNodes { + for _, z := range allNodes { + if a == z { + continue + } + pingPairs = append(pingPairs, [2]*TestNode{a, z}) + } + } + for _, pair := range pingPairs { + go func() { + a := pair[0] + z := pair[1] + err := tstest.WaitFor(time.Second*10, func() error { + remoteKey := z.MustStatus().Self.PublicKey + if err := a.Tailscale("ping", "--until-direct=false", "--c=1", "--timeout=1s", z.AwaitIP4().String()).Run(); err != nil { + return err + } + remotePeer, ok := a.MustStatus().Peer[remoteKey] + if !ok { + return fmt.Errorf("%v->%v remote peer not found", a.MustStatus().Self.ID, z.MustStatus().Self.ID) + } + if len(remotePeer.PeerRelay) == 0 { + return fmt.Errorf("%v->%v not using peer relay, curAddr=%v relay=%v", a.MustStatus().Self.ID, z.MustStatus().Self.ID, remotePeer.CurAddr, remotePeer.Relay) + } + t.Logf("%v->%v using peer relay addr: %v", a.MustStatus().Self.ID, z.MustStatus().Self.ID, remotePeer.PeerRelay) + return nil + }) + errCh <- err + }() + } + for range pingPairs { + err := <-errCh + if err != nil { + t.Fatal(err) + } + } + + allControlNodes := env.Control.AllNodes() + wantSessionsForDiscoShorts := make(set.Set[[2]string]) + for i, a := range allControlNodes { + if i == len(allControlNodes)-1 { + break + } + for _, z := range allControlNodes[i+1:] { + wantSessionsForDiscoShorts.Add([2]string{a.DiscoKey.ShortString(), z.DiscoKey.ShortString()}) + } + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + debugSessions, err := peerRelay.LocalClient().DebugPeerRelaySessions(ctx) + cancel() + if err != nil { + t.Fatalf("debug peer-relay-sessions failed: %v", err) + } + if len(debugSessions.Sessions) != len(wantSessionsForDiscoShorts) { + t.Errorf("got %d peer relay sessions, want %d", len(debugSessions.Sessions), len(wantSessionsForDiscoShorts)) + } + for _, session := range debugSessions.Sessions { + if !wantSessionsForDiscoShorts.Contains([2]string{session.Client1.ShortDisco, session.Client2.ShortDisco}) && + !wantSessionsForDiscoShorts.Contains([2]string{session.Client2.ShortDisco, session.Client1.ShortDisco}) { + t.Errorf("peer relay session for disco keys %s<->%s not found in debug peer-relay-sessions: %+v", session.Client1.ShortDisco, session.Client2.ShortDisco, debugSessions.Sessions) + } + for _, client := range []status.ClientInfo{session.Client1, session.Client2} { + if client.BytesTx == 0 { + t.Errorf("unexpected 0 bytes TX counter in peer relay session: %+v", session) + } + if client.PacketsTx == 0 { + t.Errorf("unexpected 0 packets TX counter in peer relay session: %+v", session) + } + if !client.Endpoint.IsValid() { + t.Errorf("unexpected endpoint zero value in peer relay session: %+v", session) + } + if len(client.ShortDisco) == 0 { + t.Errorf("unexpected zero len short disco in peer relay session: %+v", session) + } + } + } +} + +func TestC2NDebugNetmap(t *testing.T) { + tstest.Shard(t) + tstest.Parallel(t) + env := NewTestEnv(t, ConfigureControl(func(s *testcontrol.Server) { + s.CollectServices = opt.False + })) + + var testNodes []*TestNode + var nodes []*tailcfg.Node + for i := range 2 { + n := NewTestNode(t, env) + d := n.StartDaemon() + defer d.MustCleanShutdown(t) + + n.AwaitResponding() + n.MustUp() + n.AwaitRunning() + testNodes = append(testNodes, n) + + controlNodes := env.Control.AllNodes() + if len(controlNodes) != i+1 { + t.Fatalf("expected %d nodes, got %d nodes", i+1, len(controlNodes)) + } + for _, cn := range controlNodes { + if n.MustStatus().Self.PublicKey == cn.Key { + nodes = append(nodes, cn) + break + } + } + } + + // getC2NNetmap fetches the current netmap. If a candidate map response is provided, + // a candidate netmap is also fetched and compared to the current netmap. + getC2NNetmap := func(node key.NodePublic, cand *tailcfg.MapResponse) *netmap.NetworkMap { + t.Helper() + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) + defer cancel() + + var req *http.Request + if cand != nil { + body := must.Get(json.Marshal(&tailcfg.C2NDebugNetmapRequest{Candidate: cand})) + req = must.Get(http.NewRequestWithContext(ctx, "POST", "/debug/netmap", bytes.NewReader(body))) + } else { + req = must.Get(http.NewRequestWithContext(ctx, "GET", "/debug/netmap", nil)) + } + httpResp := must.Get(env.Control.NodeRoundTripper(node).RoundTrip(req)) + defer httpResp.Body.Close() + + if httpResp.StatusCode != 200 { + t.Errorf("unexpected status code: %d", httpResp.StatusCode) + return nil + } + + respBody := must.Get(io.ReadAll(httpResp.Body)) + var resp tailcfg.C2NDebugNetmapResponse + must.Do(json.Unmarshal(respBody, &resp)) + + var current netmap.NetworkMap + must.Do(json.Unmarshal(resp.Current, ¤t)) + + if !current.PrivateKey.IsZero() { + t.Errorf("current netmap has non-zero private key: %v", current.PrivateKey) + } + // Check candidate netmap if we sent a map response. + if cand != nil { + var candidate netmap.NetworkMap + must.Do(json.Unmarshal(resp.Candidate, &candidate)) + if !candidate.PrivateKey.IsZero() { + t.Errorf("candidate netmap has non-zero private key: %v", candidate.PrivateKey) + } + if diff := cmp.Diff(current.SelfNode, candidate.SelfNode); diff != "" { + t.Errorf("SelfNode differs (-current +candidate):\n%s", diff) + } + if diff := cmp.Diff(current.Peers, candidate.Peers); diff != "" { + t.Errorf("Peers differ (-current +candidate):\n%s", diff) + } + } + return ¤t + } + + for _, n := range nodes { + mr := must.Get(env.Control.MapResponse(&tailcfg.MapRequest{NodeKey: n.Key})) + nm := getC2NNetmap(n.Key, mr) + + // Make sure peers do not have "testcap" initially (we'll change this later). + if len(nm.Peers) != 1 || nm.Peers[0].CapMap().Contains("testcap") { + t.Fatalf("expected 1 peer without testcap, got: %v", nm.Peers) + } + + // Make sure nodes think each other are offline initially. + if nm.Peers[0].Online().Get() { + t.Fatalf("expected 1 peer to be offline, got: %v", nm.Peers) + } + } + + // Send a delta update to n0, setting "testcap" on node 1. + env.Control.AddRawMapResponse(nodes[0].Key, &tailcfg.MapResponse{ + PeersChangedPatch: []*tailcfg.PeerChange{{ + NodeID: nodes[1].ID, CapMap: tailcfg.NodeCapMap{"testcap": []tailcfg.RawMessage{}}, + }}, + }) + + // node 0 should see node 1 with "testcap". + must.Do(tstest.WaitFor(5*time.Second, func() error { + st := testNodes[0].MustStatus() + p, ok := st.Peer[nodes[1].Key] + if !ok { + return fmt.Errorf("node 0 (%s) doesn't see node 1 (%s) as peer\n%v", nodes[0].Key, nodes[1].Key, st) + } + if _, ok := p.CapMap["testcap"]; !ok { + return fmt.Errorf("node 0 (%s) sees node 1 (%s) as peer but without testcap\n%v", nodes[0].Key, nodes[1].Key, p) + } + return nil + })) + + // Check that node 0's current netmap has "testcap" for node 1. + nm := getC2NNetmap(nodes[0].Key, nil) + if len(nm.Peers) != 1 || !nm.Peers[0].CapMap().Contains("testcap") { + t.Errorf("current netmap missing testcap: %v", nm.Peers[0].CapMap()) + } + + // Send a delta update to n1, marking node 0 as online. + env.Control.AddRawMapResponse(nodes[1].Key, &tailcfg.MapResponse{ + PeersChangedPatch: []*tailcfg.PeerChange{{ + NodeID: nodes[0].ID, Online: ptr.To(true), + }}, + }) + + // node 1 should see node 0 as online. + must.Do(tstest.WaitFor(5*time.Second, func() error { + st := testNodes[1].MustStatus() + p, ok := st.Peer[nodes[0].Key] + if !ok || !p.Online { + return fmt.Errorf("node 0 (%s) doesn't see node 1 (%s) as an online peer\n%v", nodes[0].Key, nodes[1].Key, st) + } + return nil + })) + + // The netmap from node 1 should show node 0 as online. + nm = getC2NNetmap(nodes[1].Key, nil) + if len(nm.Peers) != 1 || !nm.Peers[0].Online().Get() { + t.Errorf("expected peer to be online; got %+v", nm.Peers[0].AsStruct()) + } +} + +func TestNetworkLock(t *testing.T) { + + // If you run `tailscale lock log` on a node where Tailnet Lock isn't + // enabled, you get an error explaining that. + t.Run("log-when-not-enabled", func(t *testing.T) { + tstest.Shard(t) + t.Parallel() + + env := NewTestEnv(t) + n1 := NewTestNode(t, env) + d1 := n1.StartDaemon() + defer d1.MustCleanShutdown(t) + + n1.MustUp() + n1.AwaitRunning() + + cmdArgs := []string{"lock", "log"} + t.Logf("Running command: %s", strings.Join(cmdArgs, " ")) + + var outBuf, errBuf bytes.Buffer + + cmd := n1.Tailscale(cmdArgs...) + cmd.Stdout = &outBuf + cmd.Stderr = &errBuf + + if err := cmd.Run(); !isNonZeroExitCode(err) { + t.Fatalf("command did not fail with non-zero exit code: %q", err) + } + + if outBuf.String() != "" { + t.Fatalf("stdout: want '', got %q", outBuf.String()) + } + + wantErr := "Tailnet Lock is not enabled\n" + if errBuf.String() != wantErr { + t.Fatalf("stderr: want %q, got %q", wantErr, errBuf.String()) + } + }) +} diff --git a/tstest/integration/tailscaled_deps_test_darwin.go b/tstest/integration/tailscaled_deps_test_darwin.go index a87a3ec658ccb..217188f75f6c0 100644 --- a/tstest/integration/tailscaled_deps_test_darwin.go +++ b/tstest/integration/tailscaled_deps_test_darwin.go @@ -17,6 +17,8 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature" + _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" @@ -34,7 +36,6 @@ import ( _ "tailscale.com/net/proxymux" _ "tailscale.com/net/socks5" _ "tailscale.com/net/tsdial" - _ "tailscale.com/net/tshttpproxy" _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" @@ -49,7 +50,6 @@ import ( _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" _ "tailscale.com/util/eventbus" - _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy/pkey" _ "tailscale.com/util/syspolicy/policyclient" diff --git a/tstest/integration/tailscaled_deps_test_freebsd.go b/tstest/integration/tailscaled_deps_test_freebsd.go index a87a3ec658ccb..217188f75f6c0 100644 --- a/tstest/integration/tailscaled_deps_test_freebsd.go +++ b/tstest/integration/tailscaled_deps_test_freebsd.go @@ -17,6 +17,8 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature" + _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" @@ -34,7 +36,6 @@ import ( _ "tailscale.com/net/proxymux" _ "tailscale.com/net/socks5" _ "tailscale.com/net/tsdial" - _ "tailscale.com/net/tshttpproxy" _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" @@ -49,7 +50,6 @@ import ( _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" _ "tailscale.com/util/eventbus" - _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy/pkey" _ "tailscale.com/util/syspolicy/policyclient" diff --git a/tstest/integration/tailscaled_deps_test_linux.go b/tstest/integration/tailscaled_deps_test_linux.go index a87a3ec658ccb..217188f75f6c0 100644 --- a/tstest/integration/tailscaled_deps_test_linux.go +++ b/tstest/integration/tailscaled_deps_test_linux.go @@ -17,6 +17,8 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature" + _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" @@ -34,7 +36,6 @@ import ( _ "tailscale.com/net/proxymux" _ "tailscale.com/net/socks5" _ "tailscale.com/net/tsdial" - _ "tailscale.com/net/tshttpproxy" _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" @@ -49,7 +50,6 @@ import ( _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" _ "tailscale.com/util/eventbus" - _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy/pkey" _ "tailscale.com/util/syspolicy/policyclient" diff --git a/tstest/integration/tailscaled_deps_test_openbsd.go b/tstest/integration/tailscaled_deps_test_openbsd.go index a87a3ec658ccb..217188f75f6c0 100644 --- a/tstest/integration/tailscaled_deps_test_openbsd.go +++ b/tstest/integration/tailscaled_deps_test_openbsd.go @@ -17,6 +17,8 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature" + _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" @@ -34,7 +36,6 @@ import ( _ "tailscale.com/net/proxymux" _ "tailscale.com/net/socks5" _ "tailscale.com/net/tsdial" - _ "tailscale.com/net/tshttpproxy" _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" @@ -49,7 +50,6 @@ import ( _ "tailscale.com/types/logid" _ "tailscale.com/util/clientmetric" _ "tailscale.com/util/eventbus" - _ "tailscale.com/util/multierr" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy/pkey" _ "tailscale.com/util/syspolicy/policyclient" diff --git a/tstest/integration/tailscaled_deps_test_windows.go b/tstest/integration/tailscaled_deps_test_windows.go index 54e1bcc04dbbc..f3cd5e75b9e36 100644 --- a/tstest/integration/tailscaled_deps_test_windows.go +++ b/tstest/integration/tailscaled_deps_test_windows.go @@ -25,6 +25,8 @@ import ( _ "tailscale.com/derp/derphttp" _ "tailscale.com/drive/driveimpl" _ "tailscale.com/envknob" + _ "tailscale.com/feature" + _ "tailscale.com/feature/buildfeatures" _ "tailscale.com/feature/condregister" _ "tailscale.com/health" _ "tailscale.com/hostinfo" @@ -37,7 +39,6 @@ import ( _ "tailscale.com/ipn/store" _ "tailscale.com/logpolicy" _ "tailscale.com/logtail" - _ "tailscale.com/logtail/backoff" _ "tailscale.com/net/dns" _ "tailscale.com/net/dnsfallback" _ "tailscale.com/net/netmon" @@ -45,7 +46,6 @@ import ( _ "tailscale.com/net/proxymux" _ "tailscale.com/net/socks5" _ "tailscale.com/net/tsdial" - _ "tailscale.com/net/tshttpproxy" _ "tailscale.com/net/tstun" _ "tailscale.com/paths" _ "tailscale.com/safesocket" @@ -57,9 +57,9 @@ import ( _ "tailscale.com/types/key" _ "tailscale.com/types/logger" _ "tailscale.com/types/logid" + _ "tailscale.com/util/backoff" _ "tailscale.com/util/clientmetric" _ "tailscale.com/util/eventbus" - _ "tailscale.com/util/multierr" _ "tailscale.com/util/osdiag" _ "tailscale.com/util/osshare" _ "tailscale.com/util/syspolicy/pkey" diff --git a/tstest/integration/testcontrol/testcontrol.go b/tstest/integration/testcontrol/testcontrol.go index 2fbf37de9a15e..f9a33705b7f56 100644 --- a/tstest/integration/testcontrol/testcontrol.go +++ b/tstest/integration/testcontrol/testcontrol.go @@ -5,6 +5,7 @@ package testcontrol import ( + "bufio" "bytes" "cmp" "context" @@ -30,10 +31,13 @@ import ( "tailscale.com/control/controlhttp/controlhttpserver" "tailscale.com/net/netaddr" "tailscale.com/net/tsaddr" + "tailscale.com/syncs" "tailscale.com/tailcfg" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/types/opt" "tailscale.com/types/ptr" + "tailscale.com/util/httpm" "tailscale.com/util/mak" "tailscale.com/util/must" "tailscale.com/util/rands" @@ -46,19 +50,31 @@ const msgLimit = 1 << 20 // encrypted message length limit // Server is a control plane server. Its zero value is ready for use. // Everything is stored in-memory in one tailnet. type Server struct { - Logf logger.Logf // nil means to use the log package - DERPMap *tailcfg.DERPMap // nil means to use prod DERP map - RequireAuth bool - RequireAuthKey string // required authkey for all nodes - Verbose bool - DNSConfig *tailcfg.DNSConfig // nil means no DNS config - MagicDNSDomain string - HandleC2N http.Handler // if non-nil, used for /some-c2n-path/ in tests + Logf logger.Logf // nil means to use the log package + DERPMap *tailcfg.DERPMap // nil means to use prod DERP map + RequireAuth bool + RequireAuthKey string // required authkey for all nodes + RequireMachineAuth bool + Verbose bool + DNSConfig *tailcfg.DNSConfig // nil means no DNS config + MagicDNSDomain string + C2NResponses syncs.Map[string, func(*http.Response)] // token => onResponse func + + // PeerRelayGrants, if true, inserts relay capabilities into the wildcard + // grants rules. + PeerRelayGrants bool // AllNodesSameUser, if true, makes all created nodes // belong to the same user. AllNodesSameUser bool + // DefaultNodeCapabilities overrides the capability map sent to each client. + DefaultNodeCapabilities *tailcfg.NodeCapMap + + // CollectServices, if non-empty, sets whether the control server asks + // for service updates. If empty, the default is "true". + CollectServices opt.Bool + // ExplicitBaseURL or HTTPTestServer must be set. ExplicitBaseURL string // e.g. "http://127.0.0.1:1234" with no trailing URL HTTPTestServer *httptest.Server // if non-nil, used to get BaseURL @@ -179,6 +195,52 @@ func (s *Server) AddPingRequest(nodeKeyDst key.NodePublic, pr *tailcfg.PingReque return s.addDebugMessage(nodeKeyDst, pr) } +// c2nRoundTripper is an http.RoundTripper that sends requests to a node via C2N. +type c2nRoundTripper struct { + s *Server + n key.NodePublic +} + +func (s *Server) NodeRoundTripper(n key.NodePublic) http.RoundTripper { + return c2nRoundTripper{s, n} +} + +func (rt c2nRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + ctx := req.Context() + resc := make(chan *http.Response, 1) + if err := rt.s.SendC2N(rt.n, req, func(r *http.Response) { resc <- r }); err != nil { + return nil, err + } + select { + case <-ctx.Done(): + return nil, ctx.Err() + case r := <-resc: + return r, nil + } +} + +// SendC2N sends req to node. When the response is received, onRes is called. +func (s *Server) SendC2N(node key.NodePublic, req *http.Request, onRes func(*http.Response)) error { + var buf bytes.Buffer + if err := req.Write(&buf); err != nil { + return err + } + + token := rands.HexString(10) + pr := &tailcfg.PingRequest{ + URL: "https://unused/c2n/" + token, + Log: true, + Types: "c2n", + Payload: buf.Bytes(), + } + s.C2NResponses.Store(token, onRes) + if !s.AddPingRequest(node, pr) { + s.C2NResponses.Delete(token) + return fmt.Errorf("node %v not connected", node) + } + return nil +} + // AddRawMapResponse delivers the raw MapResponse mr to nodeKeyDst. It's meant // for testing incremental map updates. // @@ -265,9 +327,7 @@ func (s *Server) initMux() { s.mux.HandleFunc("/key", s.serveKey) s.mux.HandleFunc("/machine/", s.serveMachine) s.mux.HandleFunc("/ts2021", s.serveNoiseUpgrade) - if s.HandleC2N != nil { - s.mux.Handle("/some-c2n-path/", s.HandleC2N) - } + s.mux.HandleFunc("/c2n/", s.serveC2N) } func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { @@ -281,6 +341,37 @@ func (s *Server) serveUnhandled(w http.ResponseWriter, r *http.Request) { go panic(fmt.Sprintf("testcontrol.Server received unhandled request: %s", got.Bytes())) } +// serveC2N handles a POST from a node containing a c2n response. +func (s *Server) serveC2N(w http.ResponseWriter, r *http.Request) { + if err := func() error { + if r.Method != httpm.POST { + return errors.New("POST required") + } + token, ok := strings.CutPrefix(r.URL.Path, "/c2n/") + if !ok { + return fmt.Errorf("invalid path %q", r.URL.Path) + } + + onRes, ok := s.C2NResponses.Load(token) + if !ok { + return fmt.Errorf("unknown c2n token %q", token) + } + s.C2NResponses.Delete(token) + + res, err := http.ReadResponse(bufio.NewReader(r.Body), nil) + if err != nil { + return fmt.Errorf("error reading c2n response: %w", err) + } + onRes(res) + return nil + }(); err != nil { + s.logf("testcontrol: %s", err) + http.Error(w, err.Error(), 500) + return + } + w.WriteHeader(http.StatusNoContent) +} + type peerMachinePublicContextKey struct{} func (s *Server) serveNoiseUpgrade(w http.ResponseWriter, r *http.Request) { @@ -596,6 +687,29 @@ func (s *Server) CompleteAuth(authPathOrURL string) bool { return true } +// Complete the device approval for this node. +// +// This function returns false if the node does not exist, or you try to +// approve a device against a different control server. +func (s *Server) CompleteDeviceApproval(controlUrl string, urlStr string, nodeKey *key.NodePublic) bool { + s.mu.Lock() + defer s.mu.Unlock() + + node, ok := s.nodes[*nodeKey] + if !ok { + return false + } + + if urlStr != controlUrl+"/admin" { + return false + } + + sendUpdate(s.updates[node.ID], updateSelfChanged) + + node.MachineAuthorized = true + return true +} + func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key.MachinePublic) { msg, err := io.ReadAll(io.LimitReader(r.Body, msgLimit)) r.Body.Close() @@ -644,6 +758,25 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key. // some follow-ups? For now all are successes. } + // The in-memory list of nodes, users, and logins is keyed by + // the node key. If the node key changes, update all the data stores + // to use the new node key. + s.mu.Lock() + if _, oldNodeKeyOk := s.nodes[req.OldNodeKey]; oldNodeKeyOk { + if _, newNodeKeyOk := s.nodes[req.NodeKey]; !newNodeKeyOk { + s.nodes[req.OldNodeKey].Key = req.NodeKey + s.nodes[req.NodeKey] = s.nodes[req.OldNodeKey] + + s.users[req.NodeKey] = s.users[req.OldNodeKey] + s.logins[req.NodeKey] = s.logins[req.OldNodeKey] + + delete(s.nodes, req.OldNodeKey) + delete(s.users, req.OldNodeKey) + delete(s.logins, req.OldNodeKey) + } + } + s.mu.Unlock() + nk := req.NodeKey user, login := s.getUser(nk) @@ -652,7 +785,7 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key. s.nodes = map[key.NodePublic]*tailcfg.Node{} } _, ok := s.nodes[nk] - machineAuthorized := true // TODO: add Server.RequireMachineAuth + machineAuthorized := !s.RequireMachineAuth if !ok { nodeID := len(s.nodes) + 1 @@ -663,6 +796,19 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key. v4Prefix, v6Prefix, } + + var capMap tailcfg.NodeCapMap + if s.DefaultNodeCapabilities != nil { + capMap = *s.DefaultNodeCapabilities + } else { + capMap = tailcfg.NodeCapMap{ + tailcfg.CapabilityHTTPS: []tailcfg.RawMessage{}, + tailcfg.NodeAttrFunnel: []tailcfg.RawMessage{}, + tailcfg.CapabilityFileSharing: []tailcfg.RawMessage{}, + tailcfg.CapabilityFunnelPorts + "?ports=8080,443": []tailcfg.RawMessage{}, + } + } + node := &tailcfg.Node{ ID: tailcfg.NodeID(nodeID), StableID: tailcfg.StableNodeID(fmt.Sprintf("TESTCTRL%08x", int(nodeID))), @@ -675,12 +821,8 @@ func (s *Server) serveRegister(w http.ResponseWriter, r *http.Request, mkey key. Hostinfo: req.Hostinfo.View(), Name: req.Hostinfo.Hostname, Cap: req.Version, - Capabilities: []tailcfg.NodeCapability{ - tailcfg.CapabilityHTTPS, - tailcfg.NodeAttrFunnel, - tailcfg.CapabilityFileSharing, - tailcfg.CapabilityFunnelPorts + "?ports=8080,443", - }, + CapMap: capMap, + Capabilities: slices.Collect(maps.Keys(capMap)), } s.nodes[nk] = node } @@ -931,14 +1073,21 @@ var keepAliveMsg = &struct { KeepAlive: true, } -func packetFilterWithIngressCaps() []tailcfg.FilterRule { +func packetFilterWithIngress(addRelayCaps bool) []tailcfg.FilterRule { out := slices.Clone(tailcfg.FilterAllowAll) + caps := []tailcfg.PeerCapability{ + tailcfg.PeerCapabilityIngress, + } + if addRelayCaps { + caps = append(caps, tailcfg.PeerCapabilityRelay) + caps = append(caps, tailcfg.PeerCapabilityRelayTarget) + } out = append(out, tailcfg.FilterRule{ SrcIPs: []string{"*"}, CapGrant: []tailcfg.CapGrant{ { Dsts: []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()}, - Caps: []tailcfg.PeerCapability{tailcfg.PeerCapabilityIngress}, + Caps: caps, }, }, }) @@ -976,8 +1125,8 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse, Node: node, DERPMap: s.DERPMap, Domain: domain, - CollectServices: "true", - PacketFilter: packetFilterWithIngressCaps(), + CollectServices: cmp.Or(s.CollectServices, opt.True), + PacketFilter: packetFilterWithIngress(s.PeerRelayGrants), DNSConfig: dns, ControlTime: &t, } @@ -1059,18 +1208,25 @@ func (s *Server) canGenerateAutomaticMapResponseFor(nk key.NodePublic) bool { func (s *Server) hasPendingRawMapMessage(nk key.NodePublic) bool { s.mu.Lock() defer s.mu.Unlock() - _, ok := s.msgToSend[nk].(*tailcfg.MapResponse) + _, ok := s.msgToSend[nk] return ok } func (s *Server) takeRawMapMessage(nk key.NodePublic) (mapResJSON []byte, ok bool) { s.mu.Lock() defer s.mu.Unlock() - mr, ok := s.msgToSend[nk].(*tailcfg.MapResponse) + mr, ok := s.msgToSend[nk] if !ok { return nil, false } delete(s.msgToSend, nk) + + // If it's a bare PingRequest, wrap it in a MapResponse. + switch pr := mr.(type) { + case *tailcfg.PingRequest: + mr = &tailcfg.MapResponse{PingRequest: pr} + } + var err error mapResJSON, err = json.Marshal(mr) if err != nil { diff --git a/tstest/integration/vms/README.md b/tstest/integration/vms/README.md index 519c3d000fb63..a68ed051428f8 100644 --- a/tstest/integration/vms/README.md +++ b/tstest/integration/vms/README.md @@ -1,7 +1,6 @@ # End-to-End VM-based Integration Testing -This test spins up a bunch of common linux distributions and then tries to get -them to connect to a +These tests spin up a Tailscale client in a Linux VM and try to connect it to [`testcontrol`](https://pkg.go.dev/tailscale.com/tstest/integration/testcontrol) server. @@ -55,26 +54,6 @@ If you pass the `-no-s3` flag to `go test`, the S3 step will be skipped in favor of downloading the images directly from upstream sources, which may cause the test to fail in odd places. -### Distribution Picking - -This test runs on a large number of distributions. By default it tries to run -everything, which may or may not be ideal for you. If you only want to test a -subset of distributions, you can use the `--distro-regex` flag to match a subset -of distributions using a [regular expression](https://golang.org/pkg/regexp/) -such as like this: - -```console -$ go test -run-vm-tests -distro-regex centos -``` - -This would run all tests on all versions of CentOS. - -```console -$ go test -run-vm-tests -distro-regex '(debian|ubuntu)' -``` - -This would run all tests on all versions of Debian and Ubuntu. - ### Ram Limiting This test uses a lot of memory. In order to avoid making machines run out of diff --git a/tstest/integration/vms/distros.hujson b/tstest/integration/vms/distros.hujson index 049091ed50e6e..2c90f9a2f82c1 100644 --- a/tstest/integration/vms/distros.hujson +++ b/tstest/integration/vms/distros.hujson @@ -12,24 +12,16 @@ // /var/log/cloud-init-output.log for what you messed up. [ { - "Name": "ubuntu-18-04", - "URL": "https://cloud-images.ubuntu.com/releases/bionic/release-20210817/ubuntu-18.04-server-cloudimg-amd64.img", - "SHA256Sum": "1ee1039f0b91c8367351413b5b5f56026aaf302fd5f66f17f8215132d6e946d2", + "Name": "ubuntu-24-04", + "URL": "https://cloud-images.ubuntu.com/noble/20250523/noble-server-cloudimg-amd64.img", + "SHA256Sum": "0e865619967706765cdc8179fb9929202417ab3a0719d77d8c8942d38aa9611b", "MemoryMegs": 512, "PackageManager": "apt", "InitSystem": "systemd" }, { - "Name": "ubuntu-20-04", - "URL": "https://cloud-images.ubuntu.com/releases/focal/release-20210819/ubuntu-20.04-server-cloudimg-amd64.img", - "SHA256Sum": "99e25e6e344e3a50a081235e825937238a3d51b099969e107ef66f0d3a1f955e", - "MemoryMegs": 512, - "PackageManager": "apt", - "InitSystem": "systemd" - }, - { - "Name": "nixos-21-11", - "URL": "channel:nixos-21.11", + "Name": "nixos-25-05", + "URL": "channel:nixos-25.05", "SHA256Sum": "lolfakesha", "MemoryMegs": 512, "PackageManager": "nix", diff --git a/tstest/integration/vms/nixos_test.go b/tstest/integration/vms/nixos_test.go index c2998ff3c087c..02b040fedfaff 100644 --- a/tstest/integration/vms/nixos_test.go +++ b/tstest/integration/vms/nixos_test.go @@ -97,7 +97,7 @@ let # Wrap tailscaled with the ip and iptables commands. wrapProgram $out/bin/tailscaled --prefix PATH : ${ - lib.makeBinPath [ iproute iptables ] + lib.makeBinPath [ iproute2 iptables ] } # Install systemd unit. @@ -127,6 +127,9 @@ in { # yolo, this vm can sudo freely. security.sudo.wheelNeedsPassword = false; + # nix considers squid insecure, but this is fine for a test. + nixpkgs.config.permittedInsecurePackages = [ "squid-7.0.1" ]; + # Enable cloud-init so we can set VM hostnames and the like the same as other # distros. This will also take care of SSH keys. It's pretty handy. services.cloud-init = { diff --git a/tstest/integration/vms/opensuse_leap_15_1_test.go b/tstest/integration/vms/opensuse_leap_15_1_test.go deleted file mode 100644 index 7d3ac579ec6d1..0000000000000 --- a/tstest/integration/vms/opensuse_leap_15_1_test.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !windows && !plan9 - -package vms - -import ( - "encoding/json" - "os" - "path/filepath" - "testing" - - "github.com/google/uuid" -) - -/* - The images that we use for OpenSUSE Leap 15.1 have an issue that makes the - nocloud backend[1] for cloud-init just not work. As a distro-specific - workaround, we're gonna pretend to be OpenStack. - - TODO(Xe): delete once we no longer need to support OpenSUSE Leap 15.1. - - [1]: https://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html -*/ - -type openSUSELeap151MetaData struct { - Zone string `json:"availability_zone"` // nova - Hostname string `json:"hostname"` // opensuse-leap-15-1 - LaunchIndex string `json:"launch_index"` // 0 - Meta openSUSELeap151MetaDataMeta `json:"meta"` // some openstack metadata we don't need to care about - Name string `json:"name"` // opensuse-leap-15-1 - UUID string `json:"uuid"` // e9c664cd-b116-433b-aa61-7ff420163dcd -} - -type openSUSELeap151MetaDataMeta struct { - Role string `json:"role"` // server - DSMode string `json:"dsmode"` // local - Essential string `json:"essential"` // essential -} - -func hackOpenSUSE151UserData(t *testing.T, d Distro, dir string) bool { - if d.Name != "opensuse-leap-15-1" { - return false - } - - t.Log("doing OpenSUSE Leap 15.1 hack") - osDir := filepath.Join(dir, "openstack", "latest") - err := os.MkdirAll(osDir, 0755) - if err != nil { - t.Fatalf("can't make metadata home: %v", err) - } - - metadata, err := json.Marshal(openSUSELeap151MetaData{ - Zone: "nova", - Hostname: d.Name, - LaunchIndex: "0", - Meta: openSUSELeap151MetaDataMeta{ - Role: "server", - DSMode: "local", - Essential: "false", - }, - Name: d.Name, - UUID: uuid.New().String(), - }) - if err != nil { - t.Fatalf("can't encode metadata: %v", err) - } - err = os.WriteFile(filepath.Join(osDir, "meta_data.json"), metadata, 0666) - if err != nil { - t.Fatalf("can't write to meta_data.json: %v", err) - } - - data, err := os.ReadFile(filepath.Join(dir, "user-data")) - if err != nil { - t.Fatalf("can't read user_data: %v", err) - } - - err = os.WriteFile(filepath.Join(osDir, "user_data"), data, 0666) - if err != nil { - t.Fatalf("can't create output user_data: %v", err) - } - - return true -} diff --git a/tstest/integration/vms/regex_flag.go b/tstest/integration/vms/regex_flag.go deleted file mode 100644 index 02e399ecdfaad..0000000000000 --- a/tstest/integration/vms/regex_flag.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package vms - -import "regexp" - -type regexValue struct { - r *regexp.Regexp -} - -func (r *regexValue) String() string { - if r.r == nil { - return "" - } - - return r.r.String() -} - -func (r *regexValue) Set(val string) error { - if rex, err := regexp.Compile(val); err != nil { - return err - } else { - r.r = rex - return nil - } -} - -func (r regexValue) Unwrap() *regexp.Regexp { return r.r } diff --git a/tstest/integration/vms/regex_flag_test.go b/tstest/integration/vms/regex_flag_test.go deleted file mode 100644 index 0f4e5f8f7bdec..0000000000000 --- a/tstest/integration/vms/regex_flag_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package vms - -import ( - "flag" - "testing" -) - -func TestRegexFlag(t *testing.T) { - var v regexValue - fs := flag.NewFlagSet(t.Name(), flag.PanicOnError) - fs.Var(&v, "regex", "regex to parse") - - const want = `.*` - fs.Parse([]string{"-regex", want}) - if v.Unwrap().String() != want { - t.Fatalf("got wrong regex: %q, wanted: %q", v.Unwrap().String(), want) - } -} diff --git a/tstest/integration/vms/top_level_test.go b/tstest/integration/vms/top_level_test.go index c107fd89cc886..5db237b6e33b7 100644 --- a/tstest/integration/vms/top_level_test.go +++ b/tstest/integration/vms/top_level_test.go @@ -14,17 +14,13 @@ import ( expect "github.com/tailscale/goexpect" ) -func TestRunUbuntu1804(t *testing.T) { +func TestRunUbuntu2404(t *testing.T) { testOneDistribution(t, 0, Distros[0]) } -func TestRunUbuntu2004(t *testing.T) { - testOneDistribution(t, 1, Distros[1]) -} - -func TestRunNixos2111(t *testing.T) { +func TestRunNixos2505(t *testing.T) { t.Parallel() - testOneDistribution(t, 2, Distros[2]) + testOneDistribution(t, 1, Distros[1]) } // TestMITMProxy is a smoke test for derphttp through a MITM proxy. @@ -39,13 +35,7 @@ func TestRunNixos2111(t *testing.T) { func TestMITMProxy(t *testing.T) { t.Parallel() setupTests(t) - distro := Distros[2] // nixos-21.11 - - if distroRex.Unwrap().MatchString(distro.Name) { - t.Logf("%s matches %s", distro.Name, distroRex.Unwrap()) - } else { - t.Skip("regex not matched") - } + distro := Distros[1] // nixos-25.05 ctx, done := context.WithCancel(context.Background()) t.Cleanup(done) diff --git a/tstest/integration/vms/vms_test.go b/tstest/integration/vms/vms_test.go index f71f2bdbf2069..0bab3ba5d96d5 100644 --- a/tstest/integration/vms/vms_test.go +++ b/tstest/integration/vms/vms_test.go @@ -15,7 +15,6 @@ import ( "os" "os/exec" "path/filepath" - "regexp" "strconv" "strings" "sync" @@ -43,11 +42,6 @@ var ( useVNC = flag.Bool("use-vnc", false, "if set, display guest vms over VNC") verboseLogcatcher = flag.Bool("verbose-logcatcher", true, "if set, print logcatcher to t.Logf") verboseQemu = flag.Bool("verbose-qemu", true, "if set, print qemu console to t.Logf") - distroRex = func() *regexValue { - result := ®exValue{r: regexp.MustCompile(`.*`)} - flag.Var(result, "distro-regex", "The regex that matches what distros should be run") - return result - }() ) func TestDownloadImages(t *testing.T) { @@ -59,9 +53,6 @@ func TestDownloadImages(t *testing.T) { distro := d t.Run(distro.Name, func(t *testing.T) { t.Parallel() - if !distroRex.Unwrap().MatchString(distro.Name) { - t.Skipf("distro name %q doesn't match regex: %s", distro.Name, distroRex) - } if strings.HasPrefix(distro.Name, "nixos") { t.Skip("NixOS is built on the fly, no need to download it") } @@ -175,10 +166,6 @@ func mkSeed(t *testing.T, d Distro, sshKey, hostURL, tdir string, port int) { filepath.Join(dir, "user-data"), } - if hackOpenSUSE151UserData(t, d, dir) { - args = append(args, filepath.Join(dir, "openstack")) - } - run(t, tdir, "genisoimage", args...) } @@ -247,12 +234,6 @@ var ramsem struct { func testOneDistribution(t *testing.T, n int, distro Distro) { setupTests(t) - if distroRex.Unwrap().MatchString(distro.Name) { - t.Logf("%s matches %s", distro.Name, distroRex.Unwrap()) - } else { - t.Skip("regex not matched") - } - ctx, done := context.WithCancel(context.Background()) t.Cleanup(done) diff --git a/tstest/natlab/vnet/vnet.go b/tstest/natlab/vnet/vnet.go index 1fa170d87df50..49d47f02937ae 100644 --- a/tstest/natlab/vnet/vnet.go +++ b/tstest/natlab/vnet/vnet.go @@ -51,8 +51,7 @@ import ( "gvisor.dev/gvisor/pkg/tcpip/transport/tcp" "gvisor.dev/gvisor/pkg/waiter" "tailscale.com/client/local" - "tailscale.com/derp" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/net/netutil" "tailscale.com/net/netx" "tailscale.com/net/stun" @@ -601,7 +600,7 @@ func (n *node) String() string { } type derpServer struct { - srv *derp.Server + srv *derpserver.Server handler http.Handler tlsConfig *tls.Config } @@ -612,12 +611,12 @@ func newDERPServer() *derpServer { ts.Close() ds := &derpServer{ - srv: derp.NewServer(key.NewNode(), logger.Discard), + srv: derpserver.New(key.NewNode(), logger.Discard), tlsConfig: ts.TLS, // self-signed; test client configure to not check } var mux http.ServeMux - mux.Handle("/derp", derphttp.Handler(ds.srv)) - mux.HandleFunc("/generate_204", derphttp.ServeNoContent) + mux.Handle("/derp", derpserver.Handler(ds.srv)) + mux.HandleFunc("/generate_204", derpserver.ServeNoContent) ds.handler = &mux return ds diff --git a/tstest/tstest.go b/tstest/tstest.go index 2d0d1351e293a..169450686966d 100644 --- a/tstest/tstest.go +++ b/tstest/tstest.go @@ -14,8 +14,8 @@ import ( "time" "tailscale.com/envknob" - "tailscale.com/logtail/backoff" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/cibuild" ) diff --git a/tsweb/varz/varz.go b/tsweb/varz/varz.go index aca2878b74f29..b1c66b859e8cf 100644 --- a/tsweb/varz/varz.go +++ b/tsweb/varz/varz.go @@ -25,6 +25,7 @@ import ( "golang.org/x/exp/constraints" "tailscale.com/metrics" + "tailscale.com/syncs" "tailscale.com/types/logger" "tailscale.com/version" ) @@ -136,6 +137,9 @@ func writePromExpVar(w io.Writer, prefix string, kv expvar.KeyValue) { case *expvar.Int: fmt.Fprintf(w, "# TYPE %s %s\n%s %v\n", name, cmp.Or(typ, "counter"), name, v.Value()) return + case *syncs.ShardedInt: + fmt.Fprintf(w, "# TYPE %s %s\n%s %v\n", name, cmp.Or(typ, "counter"), name, v.Value()) + return case *expvar.Float: fmt.Fprintf(w, "# TYPE %s %s\n%s %v\n", name, cmp.Or(typ, "gauge"), name, v.Value()) return diff --git a/tsweb/varz/varz_test.go b/tsweb/varz/varz_test.go index f7a9d880199e2..5bbacbe356940 100644 --- a/tsweb/varz/varz_test.go +++ b/tsweb/varz/varz_test.go @@ -13,6 +13,7 @@ import ( "testing" "tailscale.com/metrics" + "tailscale.com/syncs" "tailscale.com/tstest" "tailscale.com/util/racebuild" "tailscale.com/version" @@ -283,6 +284,20 @@ foo_foo_a 1 foo_foo_b 1 `) + "\n", }, + { + "metrics_sharded_int", + "counter_api_status_code", + func() *syncs.ShardedInt { + m := syncs.NewShardedInt() + m.Add(40) + m.Add(2) + return m + }(), + strings.TrimSpace(` +# TYPE api_status_code counter +api_status_code 42 + `) + "\n", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/types/appctype/appconnector.go b/types/appctype/appconnector.go index f4ced65a41b14..567ab755f0598 100644 --- a/types/appctype/appconnector.go +++ b/types/appctype/appconnector.go @@ -73,3 +73,23 @@ type AppConnectorAttr struct { // tag of the form tag:. Connectors []string `json:"connectors,omitempty"` } + +// RouteInfo is a data structure used to persist the in memory state of an AppConnector +// so that we can know, even after a restart, which routes came from ACLs and which were +// learned from domains. +type RouteInfo struct { + // Control is the routes from the 'routes' section of an app connector acl. + Control []netip.Prefix `json:",omitempty"` + // Domains are the routes discovered by observing DNS lookups for configured domains. + Domains map[string][]netip.Addr `json:",omitempty"` + // Wildcards are the configured DNS lookup domains to observe. When a DNS query matches Wildcards, + // its result is added to Domains. + Wildcards []string `json:",omitempty"` +} + +// RouteUpdate records a set of routes that should be advertised and a set of +// routes that should be unadvertised in event bus updates. +type RouteUpdate struct { + Advertise []netip.Prefix + Unadvertise []netip.Prefix +} diff --git a/types/dnstype/messagetypes-string.go b/types/dnstype/messagetypes-string.go deleted file mode 100644 index 34abea1ba947b..0000000000000 --- a/types/dnstype/messagetypes-string.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package dnstype - -import ( - "errors" - "strings" - - "golang.org/x/net/dns/dnsmessage" -) - -// StringForType returns the string representation of a dnsmessage.Type. -// For example, StringForType(dnsmessage.TypeA) returns "A". -func StringForDNSMessageType(t dnsmessage.Type) string { - switch t { - case dnsmessage.TypeAAAA: - return "AAAA" - case dnsmessage.TypeALL: - return "ALL" - case dnsmessage.TypeA: - return "A" - case dnsmessage.TypeCNAME: - return "CNAME" - case dnsmessage.TypeHINFO: - return "HINFO" - case dnsmessage.TypeMINFO: - return "MINFO" - case dnsmessage.TypeMX: - return "MX" - case dnsmessage.TypeNS: - return "NS" - case dnsmessage.TypeOPT: - return "OPT" - case dnsmessage.TypePTR: - return "PTR" - case dnsmessage.TypeSOA: - return "SOA" - case dnsmessage.TypeSRV: - return "SRV" - case dnsmessage.TypeTXT: - return "TXT" - case dnsmessage.TypeWKS: - return "WKS" - } - return "UNKNOWN" -} - -// DNSMessageTypeForString returns the dnsmessage.Type for the given string. -// For example, DNSMessageTypeForString("A") returns dnsmessage.TypeA. -func DNSMessageTypeForString(s string) (t dnsmessage.Type, err error) { - s = strings.TrimSpace(strings.ToUpper(s)) - switch s { - case "AAAA": - return dnsmessage.TypeAAAA, nil - case "ALL": - return dnsmessage.TypeALL, nil - case "A": - return dnsmessage.TypeA, nil - case "CNAME": - return dnsmessage.TypeCNAME, nil - case "HINFO": - return dnsmessage.TypeHINFO, nil - case "MINFO": - return dnsmessage.TypeMINFO, nil - case "MX": - return dnsmessage.TypeMX, nil - case "NS": - return dnsmessage.TypeNS, nil - case "OPT": - return dnsmessage.TypeOPT, nil - case "PTR": - return dnsmessage.TypePTR, nil - case "SOA": - return dnsmessage.TypeSOA, nil - case "SRV": - return dnsmessage.TypeSRV, nil - case "TXT": - return dnsmessage.TypeTXT, nil - case "WKS": - return dnsmessage.TypeWKS, nil - } - return 0, errors.New("unknown DNS message type: " + s) -} diff --git a/types/key/hardware_attestation.go b/types/key/hardware_attestation.go index be2eefb78319e..9d4a21ee42706 100644 --- a/types/key/hardware_attestation.go +++ b/types/key/hardware_attestation.go @@ -5,25 +5,138 @@ package key import ( "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/subtle" "encoding/json" "fmt" + "io" + + "go4.org/mem" ) var ErrUnsupported = fmt.Errorf("key type not supported on this platform") +const hardwareAttestPublicHexPrefix = "hwattestpub:" + +const pubkeyLength = 65 // uncompressed P-256 + // HardwareAttestationKey describes a hardware-backed key that is used to // identify a node. Implementation details will // vary based on the platform in use (SecureEnclave for Apple, TPM for // Windows/Linux, Android Hardware-backed Keystore). -// This key can only be marshalled and unmarshalled on the same machine. +// This key can only be marshalled and unmarshaled on the same machine. type HardwareAttestationKey interface { crypto.Signer json.Marshaler json.Unmarshaler + io.Closer + Clone() HardwareAttestationKey + IsZero() bool +} + +// HardwareAttestationPublicFromPlatformKey creates a HardwareAttestationPublic +// for communicating the public component of the hardware attestation key +// with control and other nodes. +func HardwareAttestationPublicFromPlatformKey(k HardwareAttestationKey) HardwareAttestationPublic { + if k == nil { + return HardwareAttestationPublic{} + } + pub := k.Public() + ecdsaPub, ok := pub.(*ecdsa.PublicKey) + if !ok { + panic("hardware attestation key is not ECDSA") + } + bytes, err := ecdsaPub.Bytes() + if err != nil { + panic(err) + } + if len(bytes) != pubkeyLength { + panic("hardware attestation key is not uncompressed ECDSA P-256") + } + var ecdsaPubArr [pubkeyLength]byte + copy(ecdsaPubArr[:], bytes) + return HardwareAttestationPublic{k: ecdsaPubArr} +} + +// HardwareAttestationPublic is the public key counterpart to +// HardwareAttestationKey. +type HardwareAttestationPublic struct { + k [pubkeyLength]byte +} + +func (k *HardwareAttestationPublic) Clone() *HardwareAttestationPublic { + if k == nil { + return nil + } + var out HardwareAttestationPublic + copy(out.k[:], k.k[:]) + return &out +} + +func (k HardwareAttestationPublic) Equal(o HardwareAttestationPublic) bool { + return subtle.ConstantTimeCompare(k.k[:], o.k[:]) == 1 +} + +// IsZero reports whether k is the zero value. +func (k HardwareAttestationPublic) IsZero() bool { + var zero [pubkeyLength]byte + return k.k == zero +} + +// String returns the hex-encoded public key with a type prefix. +func (k HardwareAttestationPublic) String() string { + bs, err := k.MarshalText() + if err != nil { + panic(err) + } + return string(bs) +} + +// MarshalText implements encoding.TextMarshaler. +func (k HardwareAttestationPublic) MarshalText() ([]byte, error) { + if k.IsZero() { + return nil, nil + } + return k.AppendText(nil) +} + +// UnmarshalText implements encoding.TextUnmarshaler. It expects a typed prefix +// followed by a hex encoded representation of k. +func (k *HardwareAttestationPublic) UnmarshalText(b []byte) error { + if len(b) == 0 { + *k = HardwareAttestationPublic{} + return nil + } + + kb := make([]byte, pubkeyLength) + if err := parseHex(kb, mem.B(b), mem.S(hardwareAttestPublicHexPrefix)); err != nil { + return err + } + + _, err := ecdsa.ParseUncompressedPublicKey(elliptic.P256(), kb) + if err != nil { + return err + } + copy(k.k[:], kb) + return nil +} + +func (k HardwareAttestationPublic) AppendText(dst []byte) ([]byte, error) { + return appendHexKey(dst, hardwareAttestPublicHexPrefix, k.k[:]), nil +} + +// Verifier returns the ECDSA public key for verifying signatures made by k. +func (k HardwareAttestationPublic) Verifier() *ecdsa.PublicKey { + pk, err := ecdsa.ParseUncompressedPublicKey(elliptic.P256(), k.k[:]) + if err != nil { + panic(err) + } + return pk } // emptyHardwareAttestationKey is a function that returns an empty -// HardwareAttestationKey suitable for use with JSON unmarshalling. +// HardwareAttestationKey suitable for use with JSON unmarshaling. var emptyHardwareAttestationKey func() HardwareAttestationKey // createHardwareAttestationKey is a function that creates a new @@ -50,7 +163,7 @@ func RegisterHardwareAttestationKeyFns(emptyFn func() HardwareAttestationKey, cr } // NewEmptyHardwareAttestationKey returns an empty HardwareAttestationKey -// suitable for JSON unmarshalling. +// suitable for JSON unmarshaling. func NewEmptyHardwareAttestationKey() (HardwareAttestationKey, error) { if emptyHardwareAttestationKey == nil { return nil, ErrUnsupported diff --git a/types/lazy/lazy.go b/types/lazy/lazy.go index f5d7be4940a11..f537758fa6415 100644 --- a/types/lazy/lazy.go +++ b/types/lazy/lazy.go @@ -23,6 +23,9 @@ var nilErrPtr = ptr.To[error](nil) // Recursive use of a SyncValue from its own fill function will deadlock. // // SyncValue is safe for concurrent use. +// +// Unlike [sync.OnceValue], the linker can do better dead code elimination +// with SyncValue. See https://github.com/golang/go/issues/62202. type SyncValue[T any] struct { once sync.Once v T diff --git a/types/netlogfunc/netlogfunc.go b/types/netlogfunc/netlogfunc.go new file mode 100644 index 0000000000000..6185fcb715c65 --- /dev/null +++ b/types/netlogfunc/netlogfunc.go @@ -0,0 +1,15 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package netlogfunc defines types for network logging. +package netlogfunc + +import ( + "net/netip" + + "tailscale.com/types/ipproto" +) + +// ConnectionCounter is a function for counting packets and bytes +// for a particular connection. +type ConnectionCounter func(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, recv bool) diff --git a/types/netlogtype/netlogtype.go b/types/netlogtype/netlogtype.go index f2fa2bda92366..a29ea6f03dffa 100644 --- a/types/netlogtype/netlogtype.go +++ b/types/netlogtype/netlogtype.go @@ -5,27 +5,26 @@ package netlogtype import ( + "maps" "net/netip" + "sync" "time" "tailscale.com/tailcfg" "tailscale.com/types/ipproto" ) -// TODO(joetsai): Remove "omitempty" if "omitzero" is ever supported in both -// the v1 and v2 "json" packages. - // Message is the log message that captures network traffic. type Message struct { - NodeID tailcfg.StableNodeID `json:"nodeId" cbor:"0,keyasint"` // e.g., "n123456CNTRL" + NodeID tailcfg.StableNodeID `json:"nodeId"` // e.g., "n123456CNTRL" - Start time.Time `json:"start" cbor:"12,keyasint"` // inclusive - End time.Time `json:"end" cbor:"13,keyasint"` // inclusive + Start time.Time `json:"start"` // inclusive + End time.Time `json:"end"` // inclusive - VirtualTraffic []ConnectionCounts `json:"virtualTraffic,omitempty" cbor:"14,keyasint,omitempty"` - SubnetTraffic []ConnectionCounts `json:"subnetTraffic,omitempty" cbor:"15,keyasint,omitempty"` - ExitTraffic []ConnectionCounts `json:"exitTraffic,omitempty" cbor:"16,keyasint,omitempty"` - PhysicalTraffic []ConnectionCounts `json:"physicalTraffic,omitempty" cbor:"17,keyasint,omitempty"` + VirtualTraffic []ConnectionCounts `json:"virtualTraffic,omitempty"` + SubnetTraffic []ConnectionCounts `json:"subnetTraffic,omitempty"` + ExitTraffic []ConnectionCounts `json:"exitTraffic,omitempty"` + PhysicalTraffic []ConnectionCounts `json:"physicalTraffic,omitempty"` } const ( @@ -51,18 +50,6 @@ const ( // this object is nested within an array. // It assumes that netip.Addr never has IPv6 zones. MaxConnectionCountsJSONSize = len(maxJSONConnCounts) - - maxCBORConnCounts = "\xbf" + maxCBORConn + maxCBORCounts + "\xff" - maxCBORConn = "\x00" + maxCBORProto + "\x01" + maxCBORAddrPort + "\x02" + maxCBORAddrPort - maxCBORProto = "\x18\xff" - maxCBORAddrPort = "\x52\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" - maxCBORCounts = "\x0c" + maxCBORCount + "\x0d" + maxCBORCount + "\x0e" + maxCBORCount + "\x0f" + maxCBORCount - maxCBORCount = "\x1b\xff\xff\xff\xff\xff\xff\xff\xff" - - // MaxConnectionCountsCBORSize is the maximum size of a ConnectionCounts - // when it is serialized as CBOR. - // It assumes that netip.Addr never has IPv6 zones. - MaxConnectionCountsCBORSize = len(maxCBORConnCounts) ) // ConnectionCounts is a flattened struct of both a connection and counts. @@ -73,19 +60,19 @@ type ConnectionCounts struct { // Connection is a 5-tuple of proto, source and destination IP and port. type Connection struct { - Proto ipproto.Proto `json:"proto,omitzero,omitempty" cbor:"0,keyasint,omitempty"` - Src netip.AddrPort `json:"src,omitzero,omitempty" cbor:"1,keyasint,omitempty"` - Dst netip.AddrPort `json:"dst,omitzero,omitempty" cbor:"2,keyasint,omitempty"` + Proto ipproto.Proto `json:"proto,omitzero"` + Src netip.AddrPort `json:"src,omitzero"` + Dst netip.AddrPort `json:"dst,omitzero"` } func (c Connection) IsZero() bool { return c == Connection{} } // Counts are statistics about a particular connection. type Counts struct { - TxPackets uint64 `json:"txPkts,omitzero,omitempty" cbor:"12,keyasint,omitempty"` - TxBytes uint64 `json:"txBytes,omitzero,omitempty" cbor:"13,keyasint,omitempty"` - RxPackets uint64 `json:"rxPkts,omitzero,omitempty" cbor:"14,keyasint,omitempty"` - RxBytes uint64 `json:"rxBytes,omitzero,omitempty" cbor:"15,keyasint,omitempty"` + TxPackets uint64 `json:"txPkts,omitzero"` + TxBytes uint64 `json:"txBytes,omitzero"` + RxPackets uint64 `json:"rxPkts,omitzero"` + RxBytes uint64 `json:"rxBytes,omitzero"` } func (c Counts) IsZero() bool { return c == Counts{} } @@ -98,3 +85,43 @@ func (c1 Counts) Add(c2 Counts) Counts { c1.RxBytes += c2.RxBytes return c1 } + +// CountsByConnection is a count of packets and bytes for each connection. +// All methods are safe for concurrent calls. +type CountsByConnection struct { + mu sync.Mutex + m map[Connection]Counts +} + +// Add adds packets and bytes for the specified connection. +func (c *CountsByConnection) Add(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, recv bool) { + conn := Connection{Proto: proto, Src: src, Dst: dst} + c.mu.Lock() + defer c.mu.Unlock() + if c.m == nil { + c.m = make(map[Connection]Counts) + } + cnts := c.m[conn] + if recv { + cnts.RxPackets += uint64(packets) + cnts.RxBytes += uint64(bytes) + } else { + cnts.TxPackets += uint64(packets) + cnts.TxBytes += uint64(bytes) + } + c.m[conn] = cnts +} + +// Clone deep copies the map. +func (c *CountsByConnection) Clone() map[Connection]Counts { + c.mu.Lock() + defer c.mu.Unlock() + return maps.Clone(c.m) +} + +// Reset clear the map. +func (c *CountsByConnection) Reset() { + c.mu.Lock() + defer c.mu.Unlock() + clear(c.m) +} diff --git a/types/netlogtype/netlogtype_test.go b/types/netlogtype/netlogtype_test.go index 7f29090c5f757..00f89b228aa96 100644 --- a/types/netlogtype/netlogtype_test.go +++ b/types/netlogtype/netlogtype_test.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_tailnetlock + package netlogtype import ( @@ -9,7 +11,6 @@ import ( "net/netip" "testing" - "github.com/fxamacker/cbor/v2" "github.com/google/go-cmp/cmp" "tailscale.com/util/must" ) @@ -30,10 +31,4 @@ func TestMaxSize(t *testing.T) { if string(outJSON) != maxJSONConnCounts { t.Errorf("JSON mismatch (-got +want):\n%s", cmp.Diff(string(outJSON), maxJSONConnCounts)) } - - outCBOR := must.Get(cbor.Marshal(cc)) - maxCBORConnCountsAlt := "\xa7" + maxCBORConnCounts[1:len(maxCBORConnCounts)-1] // may use a definite encoding of map - if string(outCBOR) != maxCBORConnCounts && string(outCBOR) != maxCBORConnCountsAlt { - t.Errorf("CBOR mismatch (-got +want):\n%s", cmp.Diff(string(outCBOR), maxCBORConnCounts)) - } } diff --git a/types/opt/bool.go b/types/opt/bool.go index 0a3ee67ad2a6e..e2fd6a054ff0d 100644 --- a/types/opt/bool.go +++ b/types/opt/bool.go @@ -18,6 +18,22 @@ import ( // field without it being dropped. type Bool string +const ( + // True is the encoding of an explicit true. + True = Bool("true") + + // False is the encoding of an explicit false. + False = Bool("false") + + // ExplicitlyUnset is the encoding used by a null + // JSON value. It is a synonym for the empty string. + ExplicitlyUnset = Bool("unset") + + // Empty means the Bool is unset and it's neither + // true nor false. + Empty = Bool("") +) + // NewBool constructs a new Bool value equal to b. The returned Bool is set, // unless Set("") or Clear() methods are called. func NewBool(b bool) Bool { @@ -50,16 +66,16 @@ func (b *Bool) Scan(src any) error { switch src := src.(type) { case bool: if src { - *b = "true" + *b = True } else { - *b = "false" + *b = False } return nil case int64: if src == 0 { - *b = "false" + *b = False } else { - *b = "true" + *b = True } return nil default: @@ -75,18 +91,18 @@ func (b Bool) EqualBool(v bool) bool { } var ( - trueBytes = []byte("true") - falseBytes = []byte("false") + trueBytes = []byte(True) + falseBytes = []byte(False) nullBytes = []byte("null") ) func (b Bool) MarshalJSON() ([]byte, error) { switch b { - case "true": + case True: return trueBytes, nil - case "false": + case False: return falseBytes, nil - case "", "unset": + case Empty, ExplicitlyUnset: return nullBytes, nil } return nil, fmt.Errorf("invalid opt.Bool value %q", string(b)) @@ -95,11 +111,11 @@ func (b Bool) MarshalJSON() ([]byte, error) { func (b *Bool) UnmarshalJSON(j []byte) error { switch string(j) { case "true": - *b = "true" + *b = True case "false": - *b = "false" + *b = False case "null": - *b = "unset" + *b = ExplicitlyUnset default: return fmt.Errorf("invalid opt.Bool value %q", j) } diff --git a/types/persist/persist.go b/types/persist/persist.go index d888a6afb6af5..4b62c79ddd186 100644 --- a/types/persist/persist.go +++ b/types/persist/persist.go @@ -26,6 +26,7 @@ type Persist struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID + AttestationKey key.HardwareAttestationKey `json:",omitempty"` // DisallowedTKAStateIDs stores the tka.State.StateID values which // this node will not operate network lock on. This is used to @@ -84,11 +85,20 @@ func (p *Persist) Equals(p2 *Persist) bool { return false } + var pub, p2Pub key.HardwareAttestationPublic + if p.AttestationKey != nil && !p.AttestationKey.IsZero() { + pub = key.HardwareAttestationPublicFromPlatformKey(p.AttestationKey) + } + if p2.AttestationKey != nil && !p2.AttestationKey.IsZero() { + p2Pub = key.HardwareAttestationPublicFromPlatformKey(p2.AttestationKey) + } + return p.PrivateNodeKey.Equal(p2.PrivateNodeKey) && p.OldPrivateNodeKey.Equal(p2.OldPrivateNodeKey) && p.UserProfile.Equal(&p2.UserProfile) && p.NetworkLockKey.Equal(p2.NetworkLockKey) && p.NodeID == p2.NodeID && + pub.Equal(p2Pub) && reflect.DeepEqual(nilIfEmpty(p.DisallowedTKAStateIDs), nilIfEmpty(p2.DisallowedTKAStateIDs)) } @@ -96,12 +106,16 @@ func (p *Persist) Pretty() string { var ( ok, nk key.NodePublic ) + akString := "-" if !p.OldPrivateNodeKey.IsZero() { ok = p.OldPrivateNodeKey.Public() } if !p.PrivateNodeKey.IsZero() { nk = p.PublicNodeKey() } - return fmt.Sprintf("Persist{o=%v, n=%v u=%#v}", - ok.ShortString(), nk.ShortString(), p.UserProfile.LoginName) + if p.AttestationKey != nil && !p.AttestationKey.IsZero() { + akString = fmt.Sprintf("%v", p.AttestationKey.Public()) + } + return fmt.Sprintf("Persist{o=%v, n=%v u=%#v ak=%s}", + ok.ShortString(), nk.ShortString(), p.UserProfile.LoginName, akString) } diff --git a/types/persist/persist_clone.go b/types/persist/persist_clone.go index 680419ff2f30b..9dbe7e0f6fa6d 100644 --- a/types/persist/persist_clone.go +++ b/types/persist/persist_clone.go @@ -19,6 +19,9 @@ func (src *Persist) Clone() *Persist { } dst := new(Persist) *dst = *src + if src.AttestationKey != nil { + dst.AttestationKey = src.AttestationKey.Clone() + } dst.DisallowedTKAStateIDs = append(src.DisallowedTKAStateIDs[:0:0], src.DisallowedTKAStateIDs...) return dst } @@ -31,5 +34,6 @@ var _PersistCloneNeedsRegeneration = Persist(struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID + AttestationKey key.HardwareAttestationKey DisallowedTKAStateIDs []string }{}) diff --git a/types/persist/persist_test.go b/types/persist/persist_test.go index dbf2a6d8c7662..713114b74dcd5 100644 --- a/types/persist/persist_test.go +++ b/types/persist/persist_test.go @@ -21,7 +21,7 @@ func fieldsOf(t reflect.Type) (fields []string) { } func TestPersistEqual(t *testing.T) { - persistHandles := []string{"PrivateNodeKey", "OldPrivateNodeKey", "UserProfile", "NetworkLockKey", "NodeID", "DisallowedTKAStateIDs"} + persistHandles := []string{"PrivateNodeKey", "OldPrivateNodeKey", "UserProfile", "NetworkLockKey", "NodeID", "AttestationKey", "DisallowedTKAStateIDs"} if have := fieldsOf(reflect.TypeFor[Persist]()); !reflect.DeepEqual(have, persistHandles) { t.Errorf("Persist.Equal check might be out of sync\nfields: %q\nhandled: %q\n", have, persistHandles) diff --git a/types/persist/persist_view.go b/types/persist/persist_view.go index 7d1507468fc65..dbf8294ef5a7a 100644 --- a/types/persist/persist_view.go +++ b/types/persist/persist_view.go @@ -89,10 +89,11 @@ func (v *PersistView) UnmarshalJSONFrom(dec *jsontext.Decoder) error { func (v PersistView) PrivateNodeKey() key.NodePrivate { return v.ж.PrivateNodeKey } // needed to request key rotation -func (v PersistView) OldPrivateNodeKey() key.NodePrivate { return v.ж.OldPrivateNodeKey } -func (v PersistView) UserProfile() tailcfg.UserProfile { return v.ж.UserProfile } -func (v PersistView) NetworkLockKey() key.NLPrivate { return v.ж.NetworkLockKey } -func (v PersistView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID } +func (v PersistView) OldPrivateNodeKey() key.NodePrivate { return v.ж.OldPrivateNodeKey } +func (v PersistView) UserProfile() tailcfg.UserProfile { return v.ж.UserProfile } +func (v PersistView) NetworkLockKey() key.NLPrivate { return v.ж.NetworkLockKey } +func (v PersistView) NodeID() tailcfg.StableNodeID { return v.ж.NodeID } +func (v PersistView) AttestationKey() tailcfg.StableNodeID { panic("unsupported") } // DisallowedTKAStateIDs stores the tka.State.StateID values which // this node will not operate network lock on. This is used to @@ -110,5 +111,6 @@ var _PersistViewNeedsRegeneration = Persist(struct { UserProfile tailcfg.UserProfile NetworkLockKey key.NLPrivate NodeID tailcfg.StableNodeID + AttestationKey key.HardwareAttestationKey DisallowedTKAStateIDs []string }{}) diff --git a/update-flake.sh b/update-flake.sh index 4561183b89f3f..c22572b860248 100755 --- a/update-flake.sh +++ b/update-flake.sh @@ -10,6 +10,14 @@ rm -rf "$OUT" ./tool/go run tailscale.com/cmd/nardump --sri "$OUT" >go.mod.sri rm -rf "$OUT" +GOOUT=$(mktemp -d -t gocross-XXXXXX) +GOREV=$(xargs < ./go.toolchain.rev) +TARBALL="$GOOUT/go-$GOREV.tar.gz" +curl -Ls -o "$TARBALL" "https://github.com/tailscale/go/archive/$GOREV.tar.gz" +tar -xzf "$TARBALL" -C "$GOOUT" +./tool/go run tailscale.com/cmd/nardump --sri "$GOOUT/go-$GOREV" > go.toolchain.rev.sri +rm -rf "$GOOUT" + # nix-direnv only watches the top-level nix file for changes. As a # result, when we change a referenced SRI file, we have to cause some # change to shell.nix and flake.nix as well, so that nix-direnv diff --git a/logtail/backoff/backoff.go b/util/backoff/backoff.go similarity index 100% rename from logtail/backoff/backoff.go rename to util/backoff/backoff.go diff --git a/util/checkchange/checkchange.go b/util/checkchange/checkchange.go new file mode 100644 index 0000000000000..8ba64720d7e14 --- /dev/null +++ b/util/checkchange/checkchange.go @@ -0,0 +1,25 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package checkchange defines a utility for determining whether a value +// has changed since the last time it was checked. +package checkchange + +// EqualCloner is an interface for types that can be compared for equality +// and can be cloned. +type EqualCloner[T any] interface { + Equal(T) bool + Clone() T +} + +// Update sets *old to a clone of new if they are not equal, returning whether +// they were different. +// +// It only modifies *old if they are different. old must be non-nil. +func Update[T EqualCloner[T]](old *T, new T) (changed bool) { + if (*old).Equal(new) { + return false + } + *old = new.Clone() + return true +} diff --git a/util/clientmetric/clientmetric.go b/util/clientmetric/clientmetric.go index 5c11160194fdc..65223e6a9375a 100644 --- a/util/clientmetric/clientmetric.go +++ b/util/clientmetric/clientmetric.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_clientmetrics + // Package clientmetric provides client-side metrics whose values // get occasionally logged. package clientmetric @@ -18,6 +20,7 @@ import ( "sync/atomic" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/util/set" ) @@ -130,15 +133,17 @@ func (m *Metric) Publish() { metrics[m.name] = m sortedDirty = true - if m.f != nil { - lastLogVal = append(lastLogVal, scanEntry{f: m.f}) - } else { - if len(valFreeList) == 0 { - valFreeList = make([]int64, 256) + if buildfeatures.HasLogTail { + if m.f != nil { + lastLogVal = append(lastLogVal, scanEntry{f: m.f}) + } else { + if len(valFreeList) == 0 { + valFreeList = make([]int64, 256) + } + m.v = &valFreeList[0] + valFreeList = valFreeList[1:] + lastLogVal = append(lastLogVal, scanEntry{v: m.v}) } - m.v = &valFreeList[0] - valFreeList = valFreeList[1:] - lastLogVal = append(lastLogVal, scanEntry{v: m.v}) } m.regIdx = len(unsorted) @@ -319,6 +324,9 @@ const ( // - increment a metric: (decrements if negative) // 'I' + hex(varint(wireid)) + hex(varint(value)) func EncodeLogTailMetricsDelta() string { + if !buildfeatures.HasLogTail { + return "" + } mu.Lock() defer mu.Unlock() diff --git a/util/clientmetric/omit.go b/util/clientmetric/omit.go new file mode 100644 index 0000000000000..5349fc7244cd7 --- /dev/null +++ b/util/clientmetric/omit.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_clientmetrics + +package clientmetric + +type Metric struct{} + +func (*Metric) Add(int64) {} +func (*Metric) Set(int64) {} +func (*Metric) Value() int64 { return 0 } +func (*Metric) Register(expvarInt any) {} +func (*Metric) UnregisterAll() {} + +func HasPublished(string) bool { panic("unreachable") } +func EncodeLogTailMetricsDelta() string { return "" } +func WritePrometheusExpositionFormat(any) {} + +var zeroMetric Metric + +func NewCounter(string) *Metric { return &zeroMetric } +func NewGauge(string) *Metric { return &zeroMetric } +func NewAggregateCounter(string) *Metric { return &zeroMetric } diff --git a/util/cloudenv/cloudenv.go b/util/cloudenv/cloudenv.go index be60ca0070e54..f55f7dfb0794a 100644 --- a/util/cloudenv/cloudenv.go +++ b/util/cloudenv/cloudenv.go @@ -16,6 +16,7 @@ import ( "strings" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/syncs" "tailscale.com/types/lazy" ) @@ -51,6 +52,9 @@ const ( // ResolverIP returns the cloud host's recursive DNS server or the // empty string if not available. func (c Cloud) ResolverIP() string { + if !buildfeatures.HasCloud { + return "" + } switch c { case GCP: return GoogleMetadataAndDNSIP @@ -92,6 +96,9 @@ var cloudAtomic syncs.AtomicValue[Cloud] // Get returns the current cloud, or the empty string if unknown. func Get() Cloud { + if !buildfeatures.HasCloud { + return "" + } if c, ok := cloudAtomic.LoadOk(); ok { return c } diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index e5bf7329a67ee..d1507d8e67587 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -77,7 +77,7 @@ func (b *Bus) Debugger() *Debugger { return &Debugger{b} } -// Close closes the bus. Implicitly closes all clients, publishers and +// Close closes the bus. It implicitly closes all clients, publishers and // subscribers attached to the bus. // // Close blocks until the bus is fully shut down. The bus is diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go index e159b6a12608a..de292cf1adb5b 100644 --- a/util/eventbus/bus_test.go +++ b/util/eventbus/bus_test.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "testing" + "testing/synctest" "time" "github.com/creachadair/taskgroup" @@ -27,7 +28,16 @@ func TestBus(t *testing.T) { defer b.Close() c := b.Client("TestSub") - defer c.Close() + cdone := c.Done() + defer func() { + c.Close() + select { + case <-cdone: + t.Log("Client close signal received (OK)") + case <-time.After(time.Second): + t.Error("timed out waiting for client close signal") + } + }() s := eventbus.Subscribe[EventA](c) go func() { @@ -55,6 +65,55 @@ func TestBus(t *testing.T) { } } +func TestSubscriberFunc(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + b := eventbus.New() + defer b.Close() + + c := b.Client("TestClient") + + exp := expectEvents(t, EventA{12345}) + eventbus.SubscribeFunc[EventA](c, func(e EventA) { exp.Got(e) }) + + p := eventbus.Publish[EventA](c) + p.Publish(EventA{12345}) + + synctest.Wait() + c.Close() + + if !exp.Empty() { + t.Errorf("unexpected extra events: %+v", exp.want) + } + }) + + t.Run("SubscriberPublishes", func(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + b := eventbus.New() + defer b.Close() + + c := b.Client("TestClient") + pa := eventbus.Publish[EventA](c) + pb := eventbus.Publish[EventB](c) + exp := expectEvents(t, EventA{127}, EventB{128}) + eventbus.SubscribeFunc[EventA](c, func(e EventA) { + exp.Got(e) + pb.Publish(EventB{Counter: e.Counter + 1}) + }) + eventbus.SubscribeFunc[EventB](c, func(e EventB) { + exp.Got(e) + }) + + pa.Publish(EventA{127}) + + synctest.Wait() + c.Close() + if !exp.Empty() { + t.Errorf("unepxected extra events: %+v", exp.want) + } + }) + }) +} + func TestBusMultipleConsumers(t *testing.T) { b := eventbus.New() defer b.Close() @@ -102,80 +161,316 @@ func TestBusMultipleConsumers(t *testing.T) { } } -func TestSpam(t *testing.T) { - b := eventbus.New() - defer b.Close() +func TestClientMixedSubscribers(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + b := eventbus.New() + defer b.Close() + + c := b.Client("TestClient") + + var gotA EventA + s1 := eventbus.Subscribe[EventA](c) - const ( - publishers = 100 - eventsPerPublisher = 20 - wantEvents = publishers * eventsPerPublisher - subscribers = 100 - ) - - var g taskgroup.Group - - received := make([][]EventA, subscribers) - for i := range subscribers { - c := b.Client(fmt.Sprintf("Subscriber%d", i)) - defer c.Close() - s := eventbus.Subscribe[EventA](c) - g.Go(func() error { - for range wantEvents { + var gotB EventB + eventbus.SubscribeFunc[EventB](c, func(e EventB) { + t.Logf("func sub received %[1]T %+[1]v", e) + gotB = e + }) + + go func() { + for { select { - case evt := <-s.Events(): - received[i] = append(received[i], evt) - case <-s.Done(): - t.Errorf("queue done before expected number of events received") - return errors.New("queue prematurely closed") - case <-time.After(5 * time.Second): - t.Errorf("timed out waiting for expected bus event after %d events", len(received[i])) - return errors.New("timeout") + case <-s1.Done(): + return + case e := <-s1.Events(): + t.Logf("chan sub received %[1]T %+[1]v", e) + gotA = e } } - return nil - }) - } + }() + + p1 := eventbus.Publish[EventA](c) + p2 := eventbus.Publish[EventB](c) + + go p1.Publish(EventA{12345}) + go p2.Publish(EventB{67890}) + + synctest.Wait() + c.Close() + synctest.Wait() + + if diff := cmp.Diff(gotB, EventB{67890}); diff != "" { + t.Errorf("Chan sub (-got, +want):\n%s", diff) + } + if diff := cmp.Diff(gotA, EventA{12345}); diff != "" { + t.Errorf("Func sub (-got, +want):\n%s", diff) + } + }) +} + +func TestSpam(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + b := eventbus.New() + defer b.Close() + + const ( + publishers = 100 + eventsPerPublisher = 20 + wantEvents = publishers * eventsPerPublisher + subscribers = 100 + ) + + var g taskgroup.Group + + // A bunch of subscribers receiving on channels. + chanReceived := make([][]EventA, subscribers) + for i := range subscribers { + c := b.Client(fmt.Sprintf("Subscriber%d", i)) + defer c.Close() - published := make([][]EventA, publishers) - for i := range publishers { - g.Run(func() { + s := eventbus.Subscribe[EventA](c) + g.Go(func() error { + for range wantEvents { + select { + case evt := <-s.Events(): + chanReceived[i] = append(chanReceived[i], evt) + case <-s.Done(): + t.Errorf("queue done before expected number of events received") + return errors.New("queue prematurely closed") + case <-time.After(5 * time.Second): + t.Logf("timed out waiting for expected bus event after %d events", len(chanReceived[i])) + return errors.New("timeout") + } + } + return nil + }) + } + + // A bunch of subscribers receiving via a func. + funcReceived := make([][]EventA, subscribers) + for i := range subscribers { + c := b.Client(fmt.Sprintf("SubscriberFunc%d", i)) + defer c.Close() + eventbus.SubscribeFunc(c, func(e EventA) { + funcReceived[i] = append(funcReceived[i], e) + }) + } + + published := make([][]EventA, publishers) + for i := range publishers { c := b.Client(fmt.Sprintf("Publisher%d", i)) p := eventbus.Publish[EventA](c) - for j := range eventsPerPublisher { - evt := EventA{i*eventsPerPublisher + j} - p.Publish(evt) - published[i] = append(published[i], evt) + g.Run(func() { + defer c.Close() + for j := range eventsPerPublisher { + evt := EventA{i*eventsPerPublisher + j} + p.Publish(evt) + published[i] = append(published[i], evt) + } + }) + } + + if err := g.Wait(); err != nil { + t.Fatal(err) + } + synctest.Wait() + + tests := []struct { + name string + recv [][]EventA + }{ + {"Subscriber", chanReceived}, + {"SubscriberFunc", funcReceived}, + } + for _, tc := range tests { + for i, got := range tc.recv { + if len(got) != wantEvents { + t.Errorf("%s %d: got %d events, want %d", tc.name, i, len(got), wantEvents) + } + if i == 0 { + continue + } + if diff := cmp.Diff(got, tc.recv[i-1]); diff != "" { + t.Errorf("%s %d did not see the same events as %d (-got+want):\n%s", tc.name, i, i-1, diff) + } } - }) + } + for i, sent := range published { + if got := len(sent); got != eventsPerPublisher { + t.Fatalf("Publisher %d sent %d events, want %d", i, got, eventsPerPublisher) + } + } + + // TODO: check that the published sequences are proper + // subsequences of the received slices. + }) +} + +func TestClient_Done(t *testing.T) { + b := eventbus.New() + defer b.Close() + + c := b.Client(t.Name()) + s := eventbus.Subscribe[string](c) + + // The client is not Done until closed. + select { + case <-c.Done(): + t.Fatal("Client done before being closed") + default: + // OK } - if err := g.Wait(); err != nil { - t.Fatal(err) + go c.Close() + + // Once closed, the client becomes Done. + select { + case <-c.Done(): + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for Client to be done") + } + + // Thereafter, the subscriber should also be closed. + select { + case <-s.Done(): + // OK + case <-time.After(time.Second): + t.Fatal("timoeout waiting for Subscriber to be done") } - var last []EventA - for i, got := range received { - if len(got) != wantEvents { - // Receiving goroutine already reported an error, we just need - // to fail early within the main test goroutine. - t.FailNow() +} + +func TestMonitor(t *testing.T) { + t.Run("ZeroWait", func(t *testing.T) { + var zero eventbus.Monitor + + ready := make(chan struct{}) + go func() { zero.Wait(); close(ready) }() + + select { + case <-ready: + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for Wait to return") + } + }) + + t.Run("ZeroDone", func(t *testing.T) { + var zero eventbus.Monitor + + select { + case <-zero.Done(): + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for zero monitor to be done") } - if last == nil { - continue + }) + + t.Run("ZeroClose", func(t *testing.T) { + var zero eventbus.Monitor + + ready := make(chan struct{}) + go func() { zero.Close(); close(ready) }() + + select { + case <-ready: + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for Close to return") } - if diff := cmp.Diff(got, last); diff != "" { - t.Errorf("Subscriber %d did not see the same events as %d (-got+want):\n%s", i, i-1, diff) + }) + + testMon := func(t *testing.T, release func(*eventbus.Client, eventbus.Monitor)) func(t *testing.T) { + t.Helper() + return func(t *testing.T) { + bus := eventbus.New() + cli := bus.Client("test client") + + // The monitored goroutine runs until the client or test subscription ends. + sub := eventbus.Subscribe[string](cli) + m := cli.Monitor(func(c *eventbus.Client) { + select { + case <-c.Done(): + t.Log("client closed") + case <-sub.Done(): + t.Log("subscription closed") + } + }) + + done := make(chan struct{}) + go func() { + defer close(done) + m.Wait() + }() + + // While the goroutine is running, Wait does not complete. + select { + case <-done: + t.Error("monitor is ready before its goroutine is finished (Wait)") + default: + // OK + } + select { + case <-m.Done(): + t.Error("monitor is ready before its goroutine is finished (Done)") + default: + // OK + } + + release(cli, m) + select { + case <-done: + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for monitor to complete (Wait)") + } + select { + case <-m.Done(): + // OK + case <-time.After(time.Second): + t.Fatal("timeout waiting for monitor to complete (Done)") + } } - last = got } - for i, sent := range published { - if got := len(sent); got != eventsPerPublisher { - t.Fatalf("Publisher %d sent %d events, want %d", i, got, eventsPerPublisher) + t.Run("Close", testMon(t, func(_ *eventbus.Client, m eventbus.Monitor) { m.Close() })) + t.Run("Wait", testMon(t, func(c *eventbus.Client, m eventbus.Monitor) { c.Close(); m.Wait() })) +} + +func TestRegression(t *testing.T) { + bus := eventbus.New() + t.Cleanup(bus.Close) + + t.Run("SubscribeClosed", func(t *testing.T) { + c := bus.Client("test sub client") + c.Close() + + var v any + func() { + defer func() { v = recover() }() + eventbus.Subscribe[string](c) + }() + if v == nil { + t.Fatal("Expected a panic from Subscribe on a closed client") + } else { + t.Logf("Got expected panic: %v", v) } - } + }) + + t.Run("PublishClosed", func(t *testing.T) { + c := bus.Client("test pub client") + c.Close() - // TODO: check that the published sequences are proper - // subsequences of the received slices. + var v any + func() { + defer func() { v = recover() }() + eventbus.Publish[string](c) + }() + if v == nil { + t.Fatal("expected a panic from Publish on a closed client") + } else { + t.Logf("Got expected panic: %v", v) + } + }) } type queueChecker struct { @@ -190,10 +485,12 @@ func expectEvents(t *testing.T, want ...any) *queueChecker { func (q *queueChecker) Got(v any) { q.t.Helper() if q.Empty() { - q.t.Fatalf("queue got unexpected %v", v) + q.t.Errorf("queue got unexpected %v", v) + return } if v != q.want[0] { - q.t.Fatalf("queue got %#v, want %#v", v, q.want[0]) + q.t.Errorf("queue got %#v, want %#v", v, q.want[0]) + return } q.want = q.want[1:] } diff --git a/util/eventbus/client.go b/util/eventbus/client.go index a6266a4d8f823..9e3f3ee76cc31 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -21,14 +21,15 @@ type Client struct { bus *Bus publishDebug hook[PublishedEvent] - mu sync.Mutex - pub set.Set[publisher] - sub *subscribeState // Lazily created on first subscribe + mu sync.Mutex + pub set.Set[publisher] + sub *subscribeState // Lazily created on first subscribe + stop stopFlag // signaled on Close } func (c *Client) Name() string { return c.name } -// Close closes the client. Implicitly closes all publishers and +// Close closes the client. It implicitly closes all publishers and // subscribers obtained from this client. func (c *Client) Close() { var ( @@ -47,8 +48,16 @@ func (c *Client) Close() { for p := range pub { p.Close() } + c.stop.Stop() } +func (c *Client) isClosed() bool { return c.pub == nil && c.sub == nil } + +// Done returns a channel that is closed when [Client.Close] is called. +// The channel is closed after all the publishers and subscribers governed by +// the client have been closed. +func (c *Client) Done() <-chan struct{} { return c.stop.Done() } + func (c *Client) snapshotSubscribeQueue() []DeliveredEvent { return c.peekSubscribeState().snapshotQueue() } @@ -76,6 +85,10 @@ func (c *Client) subscribeTypes() []reflect.Type { func (c *Client) subscribeState() *subscribeState { c.mu.Lock() defer c.mu.Unlock() + return c.subscribeStateLocked() +} + +func (c *Client) subscribeStateLocked() *subscribeState { if c.sub == nil { c.sub = newSubscribeState(c) } @@ -85,6 +98,9 @@ func (c *Client) subscribeState() *subscribeState { func (c *Client) addPublisher(pub publisher) { c.mu.Lock() defer c.mu.Unlock() + if c.isClosed() { + panic("cannot Publish on a closed client") + } c.pub.Add(pub) } @@ -110,17 +126,52 @@ func (c *Client) shouldPublish(t reflect.Type) bool { return c.publishDebug.active() || c.bus.shouldPublish(t) } -// Subscribe requests delivery of events of type T through the given -// Queue. Panics if the queue already has a subscriber for T. +// Subscribe requests delivery of events of type T through the given client. +// It panics if c already has a subscriber for type T, or if c is closed. func Subscribe[T any](c *Client) *Subscriber[T] { - r := c.subscribeState() + // Hold the client lock throughout the subscription process so that a caller + // attempting to subscribe on a closed client will get a useful diagnostic + // instead of a random panic from inside the subscriber plumbing. + c.mu.Lock() + defer c.mu.Unlock() + + // The caller should not race subscriptions with close, give them a useful + // diagnostic at the call site. + if c.isClosed() { + panic("cannot Subscribe on a closed client") + } + + r := c.subscribeStateLocked() s := newSubscriber[T](r) r.addSubscriber(s) return s } -// Publish returns a publisher for event type T using the given -// client. +// SubscribeFunc is like [Subscribe], but calls the provided func for each +// event of type T. +// +// A SubscriberFunc calls f synchronously from the client's goroutine. +// This means the callback must not block for an extended period of time, +// as this will block the subscriber and slow event processing for all +// subscriptions on c. +func SubscribeFunc[T any](c *Client, f func(T)) *SubscriberFunc[T] { + c.mu.Lock() + defer c.mu.Unlock() + + // The caller should not race subscriptions with close, give them a useful + // diagnostic at the call site. + if c.isClosed() { + panic("cannot SubscribeFunc on a closed client") + } + + r := c.subscribeStateLocked() + s := newSubscriberFunc[T](r, f) + r.addSubscriber(s) + return s +} + +// Publish returns a publisher for event type T using the given client. +// It panics if c is closed. func Publish[T any](c *Client) *Publisher[T] { p := newPublisher[T](c) c.addPublisher(p) diff --git a/util/eventbus/debug-demo/main.go b/util/eventbus/debug-demo/main.go index a6d232d882944..71894d2eab94e 100644 --- a/util/eventbus/debug-demo/main.go +++ b/util/eventbus/debug-demo/main.go @@ -14,12 +14,16 @@ import ( "net/netip" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/tsweb" "tailscale.com/types/key" "tailscale.com/util/eventbus" ) func main() { + if !buildfeatures.HasDebugEventBus { + log.Fatalf("debug-demo requires the \"debugeventbus\" feature enabled") + } b := eventbus.New() c := b.Client("RouteMonitor") go testPub[RouteAdded](c, 5*time.Second) diff --git a/util/eventbus/debug.go b/util/eventbus/debug.go index a055f078fc4f2..6d5463bece7b2 100644 --- a/util/eventbus/debug.go +++ b/util/eventbus/debug.go @@ -10,8 +10,6 @@ import ( "slices" "sync" "sync/atomic" - - "tailscale.com/tsweb" ) // A Debugger offers access to a bus's privileged introspection and @@ -137,8 +135,6 @@ func (d *Debugger) SubscribeTypes(client *Client) []reflect.Type { return client.subscribeTypes() } -func (d *Debugger) RegisterHTTP(td *tsweb.DebugHandler) { registerHTTPDebugger(d, td) } - // A hook collects hook functions that can be run as a group. type hook[T any] struct { sync.Mutex diff --git a/util/eventbus/debughttp.go b/util/eventbus/debughttp.go index 617502b93752c..9e03676d07128 100644 --- a/util/eventbus/debughttp.go +++ b/util/eventbus/debughttp.go @@ -29,7 +29,7 @@ type httpDebugger struct { *Debugger } -func registerHTTPDebugger(d *Debugger, td *tsweb.DebugHandler) { +func (d *Debugger) RegisterHTTP(td *tsweb.DebugHandler) { dh := httpDebugger{d} td.Handle("bus", "Event bus", dh) td.HandleSilent("bus/monitor", http.HandlerFunc(dh.serveMonitor)) diff --git a/util/eventbus/debughttp_off.go b/util/eventbus/debughttp_off.go index 7d9fb327c494f..332525262aa29 100644 --- a/util/eventbus/debughttp_off.go +++ b/util/eventbus/debughttp_off.go @@ -5,16 +5,6 @@ package eventbus -import "tailscale.com/tsweb" +type tswebDebugHandler = any // actually *tsweb.DebugHandler; any to avoid import tsweb with ts_omit_debugeventbus -func registerHTTPDebugger(d *Debugger, td *tsweb.DebugHandler) { - // The event bus debugging UI uses html/template, which uses - // reflection for method lookups. This forces the compiler to - // retain a lot more code and information to make dynamic method - // dispatch work, which is unacceptable bloat for the iOS build. - // We also disable it on Android while we're at it, as nobody - // is debugging Tailscale internals on Android. - // - // TODO: https://github.com/tailscale/tailscale/issues/15297 to - // bring the debug UI back to iOS somehow. -} +func (*Debugger) RegisterHTTP(td tswebDebugHandler) {} diff --git a/util/eventbus/eventbustest/doc.go b/util/eventbus/eventbustest/doc.go index 9e39504a83521..1e9928b9d7cf9 100644 --- a/util/eventbus/eventbustest/doc.go +++ b/util/eventbus/eventbustest/doc.go @@ -39,6 +39,20 @@ // checks that the stream contains exactly the given events in the given order, // and no others. // +// To test for the absence of events, use [ExpectExactly] without any +// expected events, along side [testing/synctest] to avoid waiting for timers +// to ensure that no events are produced. This will look like: +// +// synctest.Test(t, func(t *testing.T) { +// bus := eventbustest.NewBus(t) +// tw := eventbustest.NewWatcher(t, bus) +// somethingThatShouldNotEmitsSomeEvent() +// synctest.Wait() +// if err := eventbustest.ExpectExactly(tw); err != nil { +// t.Errorf("Expected no events or errors, got %v", err) +// } +// }) +// // See the [usage examples]. // // [usage examples]: https://github.com/tailscale/tailscale/blob/main/util/eventbus/eventbustest/examples_test.go diff --git a/util/eventbus/eventbustest/eventbustest.go b/util/eventbus/eventbustest/eventbustest.go index 98536ae0affc8..fd8a150812e0d 100644 --- a/util/eventbus/eventbustest/eventbustest.go +++ b/util/eventbus/eventbustest/eventbustest.go @@ -10,12 +10,13 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" "tailscale.com/util/eventbus" ) // NewBus constructs an [eventbus.Bus] that will be shut automatically when // its controlling test ends. -func NewBus(t *testing.T) *eventbus.Bus { +func NewBus(t testing.TB) *eventbus.Bus { bus := eventbus.New() t.Cleanup(bus.Close) return bus @@ -26,13 +27,9 @@ func NewBus(t *testing.T) *eventbus.Bus { // [Expect] and [ExpectExactly], to verify that the desired events were captured. func NewWatcher(t *testing.T, bus *eventbus.Bus) *Watcher { tw := &Watcher{ - mon: bus.Debugger().WatchBus(), - TimeOut: 5 * time.Second, - chDone: make(chan bool, 1), - events: make(chan any, 100), - } - if deadline, ok := t.Deadline(); ok { - tw.TimeOut = deadline.Sub(time.Now()) + mon: bus.Debugger().WatchBus(), + chDone: make(chan bool, 1), + events: make(chan any, 100), } t.Cleanup(tw.done) go tw.watch() @@ -40,16 +37,15 @@ func NewWatcher(t *testing.T, bus *eventbus.Bus) *Watcher { } // Watcher monitors and holds events for test expectations. +// The Watcher works with [synctest], and some scenarios does require the use of +// [synctest]. This is amongst others true if you are testing for the absence of +// events. +// +// For usage examples, see the documentation in the top of the package. type Watcher struct { mon *eventbus.Subscriber[eventbus.RoutedEvent] events chan any chDone chan bool - // TimeOut defines when the Expect* functions should stop looking for events - // coming from the Watcher. The value is set by [NewWatcher] and defaults to - // the deadline passed in by [testing.T]. If looking to verify the absence - // of an event, the TimeOut can be set to a lower value after creating the - // Watcher. - TimeOut time.Duration } // Type is a helper representing the expectation to see an event of type T, without @@ -79,6 +75,11 @@ func Type[T any]() func(T) { return func(T) {} } // // The if error != nil, the test helper will return that error immediately. // func(e ExpectedType) (bool, error) // +// // Tests for event type and whatever is defined in the body. +// // If a non-nil error is reported, the test helper will return that error +// // immediately; otherwise the expectation is considered to be met. +// func(e ExpectedType) error +// // If the list of events must match exactly with no extra events, // use [ExpectExactly]. func Expect(tw *Watcher, filters ...any) error { @@ -97,10 +98,11 @@ func Expect(tw *Watcher, filters ...any) error { } else if ok { head++ } - case <-time.After(tw.TimeOut): + // Use synctest when you want an error here. + case <-time.After(100 * time.Second): // "indefinitely", to advance a synctest clock return fmt.Errorf( "timed out waiting for event, saw %d events, %d was expected", - eventCount, head) + eventCount, len(filters)) case <-tw.chDone: return errors.New("watcher closed while waiting for events") } @@ -112,9 +114,18 @@ func Expect(tw *Watcher, filters ...any) error { // in a given order, returning an error if the events does not match the given list // exactly. The given events are represented by a function as described in // [Expect]. Use [Expect] if other events are allowed. +// +// If you are expecting ExpectExactly to fail because of a missing event, or if +// you are testing for the absence of events, call [synctest.Wait] after +// actions that would publish an event, but before calling ExpectExactly. func ExpectExactly(tw *Watcher, filters ...any) error { if len(filters) == 0 { - return errors.New("no event filters were provided") + select { + case event := <-tw.events: + return fmt.Errorf("saw event type %s, expected none", reflect.TypeOf(event)) + case <-time.After(100 * time.Second): // "indefinitely", to advance a synctest clock + return nil + } } eventCount := 0 for pos, next := range filters { @@ -135,10 +146,10 @@ func ExpectExactly(tw *Watcher, filters ...any) error { return fmt.Errorf( "expected test ok for type %s, at index %d", argType, pos) } - case <-time.After(tw.TimeOut): + case <-time.After(100 * time.Second): // "indefinitely", to advance a synctest clock return fmt.Errorf( "timed out waiting for event, saw %d events, %d was expected", - eventCount, pos) + eventCount, len(filters)) case <-tw.chDone: return errors.New("watcher closed while waiting for events") } @@ -151,6 +162,9 @@ func (tw *Watcher) watch() { select { case event := <-tw.mon.Events(): tw.events <- event.Event + case <-tw.mon.Done(): + tw.done() + return case <-tw.chDone: tw.mon.Close() return @@ -179,15 +193,22 @@ func eventFilter(f any) filter { return []reflect.Value{reflect.ValueOf(true), reflect.Zero(reflect.TypeFor[error]())} } case 1: - if ft.Out(0) != reflect.TypeFor[bool]() { - panic(fmt.Sprintf("result is %T, want bool", ft.Out(0))) - } - fixup = func(vals []reflect.Value) []reflect.Value { - return append(vals, reflect.Zero(reflect.TypeFor[error]())) + switch ft.Out(0) { + case reflect.TypeFor[bool](): + fixup = func(vals []reflect.Value) []reflect.Value { + return append(vals, reflect.Zero(reflect.TypeFor[error]())) + } + case reflect.TypeFor[error](): + fixup = func(vals []reflect.Value) []reflect.Value { + pass := vals[0].IsZero() + return append([]reflect.Value{reflect.ValueOf(pass)}, vals...) + } + default: + panic(fmt.Sprintf("result is %v, want bool or error", ft.Out(0))) } case 2: if ft.Out(0) != reflect.TypeFor[bool]() || ft.Out(1) != reflect.TypeFor[error]() { - panic(fmt.Sprintf("results are %T, %T; want bool, error", ft.Out(0), ft.Out(1))) + panic(fmt.Sprintf("results are %v, %v; want bool, error", ft.Out(0), ft.Out(1))) } fixup = func(vals []reflect.Value) []reflect.Value { return vals } default: @@ -237,3 +258,38 @@ func Inject[T any](inj *Injector, event T) { } pub.(*eventbus.Publisher[T]).Publish(event) } + +// EqualTo returns an event-matching function for use with [Expect] and +// [ExpectExactly] that matches on an event of the given type that is equal to +// want by comparison with [cmp.Diff]. The expectation fails with an error +// message including the diff, if present. +func EqualTo[T any](want T) func(T) error { + return func(got T) error { + if diff := cmp.Diff(got, want); diff != "" { + return fmt.Errorf("wrong result (-got, +want):\n%s", diff) + } + return nil + } +} + +// LogAllEvents logs summaries of all the events routed via the specified bus +// during the execution of the test governed by t. This is intended to support +// development and debugging of tests. +func LogAllEvents(t testing.TB, bus *eventbus.Bus) { + dw := bus.Debugger().WatchBus() + done := make(chan struct{}) + go func() { + defer close(done) + var i int + for { + select { + case <-dw.Done(): + return + case re := <-dw.Events(): + i++ + t.Logf("[eventbus] #%[1]d: %[2]T | %+[2]v", i, re.Event) + } + } + }() + t.Cleanup(func() { dw.Close(); <-done }) +} diff --git a/util/eventbus/eventbustest/eventbustest_test.go b/util/eventbus/eventbustest/eventbustest_test.go index fd95973e5538d..ac454023c9c47 100644 --- a/util/eventbus/eventbustest/eventbustest_test.go +++ b/util/eventbus/eventbustest/eventbustest_test.go @@ -4,14 +4,18 @@ package eventbustest_test import ( + "flag" "fmt" + "strings" "testing" - "time" + "testing/synctest" "tailscale.com/util/eventbus" "tailscale.com/util/eventbus/eventbustest" ) +var doDebug = flag.Bool("debug", false, "Enable debug logging") + type EventFoo struct { Value int } @@ -29,19 +33,17 @@ func TestExpectFilter(t *testing.T) { name string events []int expectFunc any - wantErr bool + wantErr string // if non-empty, an error is expected containing this text }{ { name: "single event", events: []int{42}, expectFunc: eventbustest.Type[EventFoo](), - wantErr: false, }, { name: "multiple events, single expectation", events: []int{42, 1, 2, 3, 4, 5}, expectFunc: eventbustest.Type[EventFoo](), - wantErr: false, }, { name: "filter on event with function", @@ -52,7 +54,27 @@ func TestExpectFilter(t *testing.T) { } return false, nil }, - wantErr: false, + }, + { + name: "filter-with-nil-error", + events: []int{1, 2, 3}, + expectFunc: func(event EventFoo) error { + if event.Value > 10 { + return fmt.Errorf("value > 10: %d", event.Value) + } + return nil + }, + }, + { + name: "filter-with-non-nil-error", + events: []int{100, 200, 300}, + expectFunc: func(event EventFoo) error { + if event.Value > 10 { + return fmt.Errorf("value > 10: %d", event.Value) + } + return nil + }, + wantErr: "value > 10", }, { name: "first event has to be func", @@ -63,7 +85,18 @@ func TestExpectFilter(t *testing.T) { } return false, nil }, - wantErr: true, + wantErr: "expected 42, got 24", + }, + { + name: "equal-values", + events: []int{23}, + expectFunc: eventbustest.EqualTo(EventFoo{Value: 23}), + }, + { + name: "unequal-values", + events: []int{37}, + expectFunc: eventbustest.EqualTo(EventFoo{Value: 23}), + wantErr: "wrong result (-got, +want)", }, { name: "no events", @@ -71,30 +104,41 @@ func TestExpectFilter(t *testing.T) { expectFunc: func(event EventFoo) (bool, error) { return true, nil }, - wantErr: true, + wantErr: "timed out waiting", }, } - bus := eventbustest.NewBus(t) - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tw := eventbustest.NewWatcher(t, bus) - // TODO(cmol): When synctest is out of experimental, use that instead: - // https://go.dev/blog/synctest - tw.TimeOut = 10 * time.Millisecond + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) + + if *doDebug { + eventbustest.LogAllEvents(t, bus) + } + tw := eventbustest.NewWatcher(t, bus) + + client := bus.Client("testClient") + updater := eventbus.Publish[EventFoo](client) - client := bus.Client("testClient") - defer client.Close() - updater := eventbus.Publish[EventFoo](client) + for _, i := range tt.events { + updater.Publish(EventFoo{i}) + } - for _, i := range tt.events { - updater.Publish(EventFoo{i}) - } + synctest.Wait() - if err := eventbustest.Expect(tw, tt.expectFunc); (err != nil) != tt.wantErr { - t.Errorf("ExpectFilter[EventFoo]: error = %v, wantErr %v", err, tt.wantErr) - } + if err := eventbustest.Expect(tw, tt.expectFunc); err != nil { + if tt.wantErr == "" { + t.Errorf("Expect[EventFoo]: unexpected error: %v", err) + } else if !strings.Contains(err.Error(), tt.wantErr) { + t.Errorf("Expect[EventFoo]: err = %v, want %q", err, tt.wantErr) + } else { + t.Logf("Got expected error: %v (OK)", err) + } + } else if tt.wantErr != "" { + t.Errorf("Expect[EventFoo]: unexpectedly succeeded, want error %q", tt.wantErr) + } + }) }) } } @@ -196,38 +240,37 @@ func TestExpectEvents(t *testing.T) { }, } - bus := eventbustest.NewBus(t) - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tw := eventbustest.NewWatcher(t, bus) - // TODO(cmol): When synctest is out of experimental, use that instead: - // https://go.dev/blog/synctest - tw.TimeOut = 10 * time.Millisecond + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) - client := bus.Client("testClient") - defer client.Close() - updaterFoo := eventbus.Publish[EventFoo](client) - updaterBar := eventbus.Publish[EventBar](client) - updaterBaz := eventbus.Publish[EventBaz](client) + tw := eventbustest.NewWatcher(t, bus) - for _, ev := range tt.events { - switch ev.(type) { - case EventFoo: - evCast := ev.(EventFoo) - updaterFoo.Publish(evCast) - case EventBar: - evCast := ev.(EventBar) - updaterBar.Publish(evCast) - case EventBaz: - evCast := ev.(EventBaz) - updaterBaz.Publish(evCast) + client := bus.Client("testClient") + updaterFoo := eventbus.Publish[EventFoo](client) + updaterBar := eventbus.Publish[EventBar](client) + updaterBaz := eventbus.Publish[EventBaz](client) + + for _, ev := range tt.events { + switch ev := ev.(type) { + case EventFoo: + evCast := ev + updaterFoo.Publish(evCast) + case EventBar: + evCast := ev + updaterBar.Publish(evCast) + case EventBaz: + evCast := ev + updaterBaz.Publish(evCast) + } } - } - if err := eventbustest.Expect(tw, tt.expectEvents...); (err != nil) != tt.wantErr { - t.Errorf("ExpectEvents: error = %v, wantErr %v", err, tt.wantErr) - } + synctest.Wait() + if err := eventbustest.Expect(tw, tt.expectEvents...); (err != nil) != tt.wantErr { + t.Errorf("ExpectEvents: error = %v, wantErr %v", err, tt.wantErr) + } + }) }) } } @@ -329,38 +372,37 @@ func TestExpectExactlyEventsFilter(t *testing.T) { }, } - bus := eventbustest.NewBus(t) - for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tw := eventbustest.NewWatcher(t, bus) - // TODO(cmol): When synctest is out of experimental, use that instead: - // https://go.dev/blog/synctest - tw.TimeOut = 10 * time.Millisecond + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) - client := bus.Client("testClient") - defer client.Close() - updaterFoo := eventbus.Publish[EventFoo](client) - updaterBar := eventbus.Publish[EventBar](client) - updaterBaz := eventbus.Publish[EventBaz](client) + tw := eventbustest.NewWatcher(t, bus) - for _, ev := range tt.events { - switch ev.(type) { - case EventFoo: - evCast := ev.(EventFoo) - updaterFoo.Publish(evCast) - case EventBar: - evCast := ev.(EventBar) - updaterBar.Publish(evCast) - case EventBaz: - evCast := ev.(EventBaz) - updaterBaz.Publish(evCast) + client := bus.Client("testClient") + updaterFoo := eventbus.Publish[EventFoo](client) + updaterBar := eventbus.Publish[EventBar](client) + updaterBaz := eventbus.Publish[EventBaz](client) + + for _, ev := range tt.events { + switch ev := ev.(type) { + case EventFoo: + evCast := ev + updaterFoo.Publish(evCast) + case EventBar: + evCast := ev + updaterBar.Publish(evCast) + case EventBaz: + evCast := ev + updaterBaz.Publish(evCast) + } } - } - if err := eventbustest.ExpectExactly(tw, tt.expectEvents...); (err != nil) != tt.wantErr { - t.Errorf("ExpectEvents: error = %v, wantErr %v", err, tt.wantErr) - } + synctest.Wait() + if err := eventbustest.ExpectExactly(tw, tt.expectEvents...); (err != nil) != tt.wantErr { + t.Errorf("ExpectEvents: error = %v, wantErr %v", err, tt.wantErr) + } + }) }) } } diff --git a/util/eventbus/eventbustest/examples_test.go b/util/eventbus/eventbustest/examples_test.go index 914e29933b2a2..c848113173bc6 100644 --- a/util/eventbus/eventbustest/examples_test.go +++ b/util/eventbus/eventbustest/examples_test.go @@ -5,6 +5,8 @@ package eventbustest_test import ( "testing" + "testing/synctest" + "time" "tailscale.com/util/eventbus" "tailscale.com/util/eventbus/eventbustest" @@ -157,7 +159,7 @@ func TestExample_Expect_WithMultipleFunctions(t *testing.T) { // OK } -func TestExample_ExpectExactly_WithMultipleFuncions(t *testing.T) { +func TestExample_ExpectExactly_WithMultipleFunctions(t *testing.T) { type eventOfInterest struct { value int } @@ -199,3 +201,60 @@ func TestExample_ExpectExactly_WithMultipleFuncions(t *testing.T) { // Output: // expected event type eventbustest.eventOfCuriosity, saw eventbustest.eventOfNoConcern, at index 1 } + +func TestExample_ExpectExactly_NoEvents(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + + go func() { + // Do some work that does not produce an event + time.Sleep(10 * time.Second) + t.Log("Not producing events") + }() + + // Wait for all other routines to be stale before continuing to ensure that + // there is nothing running that would produce an event at a later time. + synctest.Wait() + + if err := eventbustest.ExpectExactly(tw); err != nil { + t.Error(err.Error()) + } else { + t.Log("OK") + } + // Output: + // OK + }) +} + +func TestExample_ExpectExactly_OneEventExpectingTwo(t *testing.T) { + synctest.Test(t, func(t *testing.T) { + type eventOfInterest struct{} + + bus := eventbustest.NewBus(t) + tw := eventbustest.NewWatcher(t, bus) + client := bus.Client("testClient") + updater := eventbus.Publish[eventOfInterest](client) + + go func() { + // Do some work that does not produce an event + time.Sleep(10 * time.Second) + updater.Publish(eventOfInterest{}) + }() + + // Wait for all other routines to be stale before continuing to ensure that + // there is nothing running that would produce an event at a later time. + synctest.Wait() + + if err := eventbustest.ExpectExactly(tw, + eventbustest.Type[eventOfInterest](), + eventbustest.Type[eventOfInterest](), + ); err != nil { + t.Log(err.Error()) + } else { + t.Log("OK") + } + // Output: + // timed out waiting for event, saw 1 events, 2 was expected + }) +} diff --git a/util/eventbus/monitor.go b/util/eventbus/monitor.go new file mode 100644 index 0000000000000..db6fe1be44737 --- /dev/null +++ b/util/eventbus/monitor.go @@ -0,0 +1,54 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbus + +import "tailscale.com/syncs" + +// A Monitor monitors the execution of a goroutine processing events from a +// [Client], allowing the caller to block until it is complete. The zero value +// of m is valid; its Close and Wait methods return immediately, and its Done +// method returns an already-closed channel. +type Monitor struct { + // These fields are immutable after initialization + cli *Client + done <-chan struct{} +} + +// Close closes the client associated with m and blocks until the processing +// goroutine is complete. +func (m Monitor) Close() { + if m.cli == nil { + return + } + m.cli.Close() + <-m.done +} + +// Wait blocks until the goroutine monitored by m has finished executing, but +// does not close the associated client. It is safe to call Wait repeatedly, +// and from multiple concurrent goroutines. +func (m Monitor) Wait() { + if m.done == nil { + return + } + <-m.done +} + +// Done returns a channel that is closed when the monitored goroutine has +// finished executing. +func (m Monitor) Done() <-chan struct{} { + if m.done == nil { + return syncs.ClosedChan() + } + return m.done +} + +// Monitor executes f in a new goroutine attended by a [Monitor]. The caller +// is responsible for waiting for the goroutine to complete, by calling either +// [Monitor.Close] or [Monitor.Wait]. +func (c *Client) Monitor(f func(*Client)) Monitor { + done := make(chan struct{}) + go func() { defer close(done); f(c) }() + return Monitor{cli: c, done: done} +} diff --git a/util/eventbus/publish.go b/util/eventbus/publish.go index 4a4bdfb7eda11..348bb9dff950c 100644 --- a/util/eventbus/publish.go +++ b/util/eventbus/publish.go @@ -27,6 +27,10 @@ func newPublisher[T any](c *Client) *Publisher[T] { // Close closes the publisher. // // Calls to Publish after Close silently do nothing. +// +// If the Bus or Client from which the Publisher was created is closed, +// the Publisher is implicitly closed and does not need to be closed +// separately. func (p *Publisher[T]) Close() { // Just unblocks any active calls to Publish, no other // synchronization needed. diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index ee534781a2cce..c35c7e7f05682 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -61,45 +61,45 @@ func newSubscribeState(c *Client) *subscribeState { return ret } -func (q *subscribeState) pump(ctx context.Context) { +func (s *subscribeState) pump(ctx context.Context) { var vals queue[DeliveredEvent] acceptCh := func() chan DeliveredEvent { if vals.Full() { return nil } - return q.write + return s.write } for { if !vals.Empty() { val := vals.Peek() - sub := q.subscriberFor(val.Event) + sub := s.subscriberFor(val.Event) if sub == nil { // Raced with unsubscribe. vals.Drop() continue } - if !sub.dispatch(ctx, &vals, acceptCh, q.snapshot) { + if !sub.dispatch(ctx, &vals, acceptCh, s.snapshot) { return } - if q.debug.active() { - q.debug.run(DeliveredEvent{ + if s.debug.active() { + s.debug.run(DeliveredEvent{ Event: val.Event, From: val.From, - To: q.client, + To: s.client, }) } } else { // Keep the cases in this select in sync with - // Subscriber.dispatch below. The only difference should be - // that this select doesn't deliver queued values to - // anyone, and unconditionally accepts new values. + // Subscriber.dispatch and SubscriberFunc.dispatch below. + // The only difference should be that this select doesn't deliver + // queued values to anyone, and unconditionally accepts new values. select { - case val := <-q.write: + case val := <-s.write: vals.Add(val) case <-ctx.Done(): return - case ch := <-q.snapshot: + case ch := <-s.snapshot: ch <- vals.Snapshot() } } @@ -152,13 +152,13 @@ func (s *subscribeState) deleteSubscriber(t reflect.Type) { s.client.deleteSubscriber(t, s) } -func (q *subscribeState) subscriberFor(val any) subscriber { - q.outputsMu.Lock() - defer q.outputsMu.Unlock() - return q.outputs[reflect.TypeOf(val)] +func (s *subscribeState) subscriberFor(val any) subscriber { + s.outputsMu.Lock() + defer s.outputsMu.Unlock() + return s.outputs[reflect.TypeOf(val)] } -// Close closes the subscribeState. Implicitly closes all Subscribers +// Close closes the subscribeState. It implicitly closes all Subscribers // linked to this state, and any pending events are discarded. func (s *subscribeState) close() { s.dispatcher.StopAndWait() @@ -177,6 +177,7 @@ func (s *subscribeState) closed() <-chan struct{} { } // A Subscriber delivers one type of event from a [Client]. +// Events are sent to the [Subscriber.Events] channel. type Subscriber[T any] struct { stop stopFlag read chan T @@ -213,7 +214,7 @@ func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue[DeliveredEvent t := vals.Peek().Event.(T) for { // Keep the cases in this select in sync with subscribeState.pump - // above. The only different should be that this select + // above. The only difference should be that this select // delivers a value on s.read. select { case s.read <- t: @@ -244,7 +245,67 @@ func (s *Subscriber[T]) Done() <-chan struct{} { // Close closes the Subscriber, indicating the caller no longer wishes // to receive this event type. After Close, receives on // [Subscriber.Events] block for ever. +// +// If the Bus from which the Subscriber was created is closed, +// the Subscriber is implicitly closed and does not need to be closed +// separately. func (s *Subscriber[T]) Close() { s.stop.Stop() // unblock receivers s.unregister() } + +// A SubscriberFunc delivers one type of event from a [Client]. +// Events are forwarded synchronously to a function provided at construction. +type SubscriberFunc[T any] struct { + stop stopFlag + read func(T) + unregister func() +} + +func newSubscriberFunc[T any](r *subscribeState, f func(T)) *SubscriberFunc[T] { + return &SubscriberFunc[T]{ + read: f, + unregister: func() { r.deleteSubscriber(reflect.TypeFor[T]()) }, + } +} + +// Close closes the SubscriberFunc, indicating the caller no longer wishes to +// receive this event type. After Close, no further events will be passed to +// the callback. +// +// If the [Bus] from which s was created is closed, s is implicitly closed and +// does not need to be closed separately. +func (s *SubscriberFunc[T]) Close() { s.stop.Stop(); s.unregister() } + +// subscribeType implements part of the subscriber interface. +func (s *SubscriberFunc[T]) subscribeType() reflect.Type { return reflect.TypeFor[T]() } + +// dispatch implements part of the subscriber interface. +func (s *SubscriberFunc[T]) dispatch(ctx context.Context, vals *queue[DeliveredEvent], acceptCh func() chan DeliveredEvent, snapshot chan chan []DeliveredEvent) bool { + t := vals.Peek().Event.(T) + callDone := make(chan struct{}) + go s.runCallback(t, callDone) + // Keep the cases in this select in sync with subscribeState.pump + // above. The only difference should be that this select + // delivers a value by calling s.read. + for { + select { + case <-callDone: + vals.Drop() + return true + case val := <-acceptCh(): + vals.Add(val) + case <-ctx.Done(): + return false + case ch := <-snapshot: + ch <- vals.Snapshot() + } + } +} + +// runCallback invokes the callback on v and closes ch when it returns. +// This should be run in a goroutine. +func (s *SubscriberFunc[T]) runCallback(v T, ch chan struct{}) { + defer close(ch) + s.read(v) +} diff --git a/util/linuxfw/detector.go b/util/linuxfw/detector.go index fffa523afdcf4..644126131bbba 100644 --- a/util/linuxfw/detector.go +++ b/util/linuxfw/detector.go @@ -10,6 +10,8 @@ import ( "os/exec" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/hostinfo" "tailscale.com/types/logger" "tailscale.com/version/distro" @@ -42,10 +44,12 @@ func detectFirewallMode(logf logger.Logf, prefHint string) FirewallMode { var det linuxFWDetector if mode == "" { // We have no preference, so check if `iptables` is even available. - _, err := det.iptDetect() - if err != nil && errors.Is(err, exec.ErrNotFound) { - logf("iptables not found: %v; falling back to nftables", err) - mode = "nftables" + if buildfeatures.HasIPTables { + _, err := det.iptDetect() + if err != nil && errors.Is(err, exec.ErrNotFound) { + logf("iptables not found: %v; falling back to nftables", err) + mode = "nftables" + } } } @@ -59,11 +63,16 @@ func detectFirewallMode(logf logger.Logf, prefHint string) FirewallMode { return FirewallModeNfTables case "iptables": hostinfo.SetFirewallMode("ipt-forced") - default: + return FirewallModeIPTables + } + if buildfeatures.HasIPTables { logf("default choosing iptables") hostinfo.SetFirewallMode("ipt-default") + return FirewallModeIPTables } - return FirewallModeIPTables + logf("default choosing nftables") + hostinfo.SetFirewallMode("nft-default") + return FirewallModeNfTables } // tableDetector abstracts helpers to detect the firewall mode. @@ -80,19 +89,33 @@ func (l linuxFWDetector) iptDetect() (int, error) { return detectIptables() } +var hookDetectNetfilter feature.Hook[func() (int, error)] + +// ErrUnsupported is the error returned from all functions on non-Linux +// platforms. +var ErrUnsupported = errors.New("linuxfw:unsupported") + // nftDetect returns the number of nftables rules in the current namespace. func (l linuxFWDetector) nftDetect() (int, error) { - return detectNetfilter() + if f, ok := hookDetectNetfilter.GetOk(); ok { + return f() + } + return 0, ErrUnsupported } // pickFirewallModeFromInstalledRules returns the firewall mode to use based on // the environment and the system's capabilities. func pickFirewallModeFromInstalledRules(logf logger.Logf, det tableDetector) FirewallMode { + if !buildfeatures.HasIPTables { + hostinfo.SetFirewallMode("nft-noipt") + return FirewallModeNfTables + } if distro.Get() == distro.Gokrazy { // Reduce startup logging on gokrazy. There's no way to do iptables on // gokrazy anyway. return FirewallModeNfTables } + iptAva, nftAva := true, true iptRuleCount, err := det.iptDetect() if err != nil { diff --git a/util/linuxfw/fake.go b/util/linuxfw/fake.go index 63a728d5566a5..d01849a2e5c9d 100644 --- a/util/linuxfw/fake.go +++ b/util/linuxfw/fake.go @@ -128,7 +128,7 @@ func (n *fakeIPTables) DeleteChain(table, chain string) error { } } -func NewFakeIPTablesRunner() *iptablesRunner { +func NewFakeIPTablesRunner() NetfilterRunner { ipt4 := newFakeIPTables() v6Available := false var ipt6 iptablesInterface diff --git a/util/linuxfw/iptables.go b/util/linuxfw/iptables.go index 234fa526ce17c..76c5400becff8 100644 --- a/util/linuxfw/iptables.go +++ b/util/linuxfw/iptables.go @@ -1,21 +1,31 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// TODO(#8502): add support for more architectures -//go:build linux && (arm64 || amd64) +//go:build linux && !ts_omit_iptables package linuxfw import ( + "bytes" + "errors" "fmt" + "os" "os/exec" "strings" "unicode" + "github.com/coreos/go-iptables/iptables" "tailscale.com/types/logger" - "tailscale.com/util/multierr" + "tailscale.com/version/distro" ) +func init() { + isNotExistError = func(err error) bool { + var e *iptables.Error + return errors.As(err, &e) && e.IsNotExist() + } +} + // DebugNetfilter prints debug information about iptables rules to the // provided log function. func DebugIptables(logf logger.Logf) error { @@ -54,7 +64,7 @@ func detectIptables() (int, error) { default: return 0, FWModeNotSupportedError{ Mode: FirewallModeIPTables, - Err: fmt.Errorf("iptables command run fail: %w", multierr.New(err, ip6err)), + Err: fmt.Errorf("iptables command run fail: %w", errors.Join(err, ip6err)), } } @@ -71,3 +81,153 @@ func detectIptables() (int, error) { // return the count of non-default rules return count, nil } + +// newIPTablesRunner constructs a NetfilterRunner that programs iptables rules. +// If the underlying iptables library fails to initialize, that error is +// returned. The runner probes for IPv6 support once at initialization time and +// if not found, no IPv6 rules will be modified for the lifetime of the runner. +func newIPTablesRunner(logf logger.Logf) (*iptablesRunner, error) { + ipt4, err := iptables.NewWithProtocol(iptables.ProtocolIPv4) + if err != nil { + return nil, err + } + + supportsV6, supportsV6NAT, supportsV6Filter := false, false, false + v6err := CheckIPv6(logf) + ip6terr := checkIP6TablesExists() + var ipt6 *iptables.IPTables + switch { + case v6err != nil: + logf("disabling tunneled IPv6 due to system IPv6 config: %v", v6err) + case ip6terr != nil: + logf("disabling tunneled IPv6 due to missing ip6tables: %v", ip6terr) + default: + supportsV6 = true + ipt6, err = iptables.NewWithProtocol(iptables.ProtocolIPv6) + if err != nil { + return nil, err + } + supportsV6Filter = checkSupportsV6Filter(ipt6, logf) + supportsV6NAT = checkSupportsV6NAT(ipt6, logf) + logf("netfilter running in iptables mode v6 = %v, v6filter = %v, v6nat = %v", supportsV6, supportsV6Filter, supportsV6NAT) + } + return &iptablesRunner{ + ipt4: ipt4, + ipt6: ipt6, + v6Available: supportsV6, + v6NATAvailable: supportsV6NAT, + v6FilterAvailable: supportsV6Filter}, nil +} + +// checkSupportsV6Filter returns whether the system has a "filter" table in the +// IPv6 tables. Some container environments such as GitHub codespaces have +// limited local IPv6 support, and containers containing ip6tables, but do not +// have kernel support for IPv6 filtering. +// We will not set ip6tables rules in these instances. +func checkSupportsV6Filter(ipt *iptables.IPTables, logf logger.Logf) bool { + if ipt == nil { + return false + } + _, filterListErr := ipt.ListChains("filter") + if filterListErr == nil { + return true + } + logf("ip6tables filtering is not supported on this host: %v", filterListErr) + return false +} + +// checkSupportsV6NAT returns whether the system has a "nat" table in the +// IPv6 netfilter stack. +// +// The nat table was added after the initial release of ipv6 +// netfilter, so some older distros ship a kernel that can't NAT IPv6 +// traffic. +// ipt must be initialized for IPv6. +func checkSupportsV6NAT(ipt *iptables.IPTables, logf logger.Logf) bool { + if ipt == nil || ipt.Proto() != iptables.ProtocolIPv6 { + return false + } + _, natListErr := ipt.ListChains("nat") + if natListErr == nil { + return true + } + + // TODO (irbekrm): the following two checks were added before the check + // above that verifies that nat chains can be listed. It is a + // container-friendly check (see + // https://github.com/tailscale/tailscale/issues/11344), but also should + // be good enough on its own in other environments. If we never observe + // it falsely succeed, let's remove the other two checks. + + bs, err := os.ReadFile("/proc/net/ip6_tables_names") + if err != nil { + return false + } + if bytes.Contains(bs, []byte("nat\n")) { + logf("[unexpected] listing nat chains failed, but /proc/net/ip6_tables_name reports a nat table existing") + return true + } + if exec.Command("modprobe", "ip6table_nat").Run() == nil { + logf("[unexpected] listing nat chains failed, but modprobe ip6table_nat succeeded") + return true + } + return false +} + +func init() { + hookIPTablesCleanup.Set(ipTablesCleanUp) +} + +// ipTablesCleanUp removes all Tailscale added iptables rules. +// Any errors that occur are logged to the provided logf. +func ipTablesCleanUp(logf logger.Logf) { + switch distro.Get() { + case distro.Gokrazy, distro.JetKVM: + // These use nftables and don't have the "iptables" command. + // Avoid log spam on cleanup. (#12277) + return + } + err := clearRules(iptables.ProtocolIPv4, logf) + if err != nil { + logf("linuxfw: clear iptables: %v", err) + } + + err = clearRules(iptables.ProtocolIPv6, logf) + if err != nil { + logf("linuxfw: clear ip6tables: %v", err) + } +} + +// clearRules clears all the iptables rules created by Tailscale +// for the given protocol. If error occurs, it's logged but not returned. +func clearRules(proto iptables.Protocol, logf logger.Logf) error { + ipt, err := iptables.NewWithProtocol(proto) + if err != nil { + return err + } + + var errs []error + + if err := delTSHook(ipt, "filter", "INPUT", logf); err != nil { + errs = append(errs, err) + } + if err := delTSHook(ipt, "filter", "FORWARD", logf); err != nil { + errs = append(errs, err) + } + if err := delTSHook(ipt, "nat", "POSTROUTING", logf); err != nil { + errs = append(errs, err) + } + + if err := delChain(ipt, "filter", "ts-input"); err != nil { + errs = append(errs, err) + } + if err := delChain(ipt, "filter", "ts-forward"); err != nil { + errs = append(errs, err) + } + + if err := delChain(ipt, "nat", "ts-postrouting"); err != nil { + errs = append(errs, err) + } + + return errors.Join(errs...) +} diff --git a/util/linuxfw/iptables_disabled.go b/util/linuxfw/iptables_disabled.go new file mode 100644 index 0000000000000..538e33647381a --- /dev/null +++ b/util/linuxfw/iptables_disabled.go @@ -0,0 +1,20 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux && ts_omit_iptables + +package linuxfw + +import ( + "errors" + + "tailscale.com/types/logger" +) + +func detectIptables() (int, error) { + return 0, nil +} + +func newIPTablesRunner(logf logger.Logf) (*iptablesRunner, error) { + return nil, errors.New("iptables disabled in build") +} diff --git a/util/linuxfw/iptables_for_svcs_test.go b/util/linuxfw/iptables_for_svcs_test.go index c3c1b1f65d6fe..0e56d70ba7078 100644 --- a/util/linuxfw/iptables_for_svcs_test.go +++ b/util/linuxfw/iptables_for_svcs_test.go @@ -10,6 +10,10 @@ import ( "testing" ) +func newFakeIPTablesRunner() *iptablesRunner { + return NewFakeIPTablesRunner().(*iptablesRunner) +} + func Test_iptablesRunner_EnsurePortMapRuleForSvc(t *testing.T) { v4Addr := netip.MustParseAddr("10.0.0.4") v6Addr := netip.MustParseAddr("fd7a:115c:a1e0::701:b62a") @@ -45,7 +49,7 @@ func Test_iptablesRunner_EnsurePortMapRuleForSvc(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() table := iptr.getIPTByAddr(tt.targetIP) for _, ruleset := range tt.precreateSvcRules { mustPrecreatePortMapRule(t, ruleset, table) @@ -103,7 +107,7 @@ func Test_iptablesRunner_DeletePortMapRuleForSvc(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() table := iptr.getIPTByAddr(tt.targetIP) for _, ruleset := range tt.precreateSvcRules { mustPrecreatePortMapRule(t, ruleset, table) @@ -127,7 +131,7 @@ func Test_iptablesRunner_DeleteSvc(t *testing.T) { v4Addr := netip.MustParseAddr("10.0.0.4") v6Addr := netip.MustParseAddr("fd7a:115c:a1e0::701:b62a") testPM := PortMap{Protocol: "tcp", MatchPort: 4003, TargetPort: 80} - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() // create two rules that will consitute svc1 s1R1 := argsForPortMapRule("svc1", "tailscale0", v4Addr, testPM) @@ -189,7 +193,7 @@ func Test_iptablesRunner_EnsureDNATRuleForSvc(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() table := iptr.getIPTByAddr(tt.targetIP) for _, ruleset := range tt.precreateSvcRules { mustPrecreateDNATRule(t, ruleset, table) @@ -248,7 +252,7 @@ func Test_iptablesRunner_DeleteDNATRuleForSvc(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() table := iptr.getIPTByAddr(tt.targetIP) for _, ruleset := range tt.precreateSvcRules { mustPrecreateDNATRule(t, ruleset, table) diff --git a/util/linuxfw/iptables_runner.go b/util/linuxfw/iptables_runner.go index ba1d7c1574cd1..41116f019bf9c 100644 --- a/util/linuxfw/iptables_runner.go +++ b/util/linuxfw/iptables_runner.go @@ -6,31 +6,22 @@ package linuxfw import ( - "bytes" - "errors" "fmt" "log" "net/netip" - "os" "os/exec" "slices" "strconv" "strings" - "github.com/coreos/go-iptables/iptables" "tailscale.com/net/tsaddr" "tailscale.com/types/logger" - "tailscale.com/util/multierr" - "tailscale.com/version/distro" ) // isNotExistError needs to be overridden in tests that rely on distinguishing // this error, because we don't have a good way how to create a new // iptables.Error of that type. -var isNotExistError = func(err error) bool { - var e *iptables.Error - return errors.As(err, &e) && e.IsNotExist() -} +var isNotExistError = func(err error) bool { return false } type iptablesInterface interface { // Adding this interface for testing purposes so we can mock out @@ -62,98 +53,6 @@ func checkIP6TablesExists() error { return nil } -// newIPTablesRunner constructs a NetfilterRunner that programs iptables rules. -// If the underlying iptables library fails to initialize, that error is -// returned. The runner probes for IPv6 support once at initialization time and -// if not found, no IPv6 rules will be modified for the lifetime of the runner. -func newIPTablesRunner(logf logger.Logf) (*iptablesRunner, error) { - ipt4, err := iptables.NewWithProtocol(iptables.ProtocolIPv4) - if err != nil { - return nil, err - } - - supportsV6, supportsV6NAT, supportsV6Filter := false, false, false - v6err := CheckIPv6(logf) - ip6terr := checkIP6TablesExists() - var ipt6 *iptables.IPTables - switch { - case v6err != nil: - logf("disabling tunneled IPv6 due to system IPv6 config: %v", v6err) - case ip6terr != nil: - logf("disabling tunneled IPv6 due to missing ip6tables: %v", ip6terr) - default: - supportsV6 = true - ipt6, err = iptables.NewWithProtocol(iptables.ProtocolIPv6) - if err != nil { - return nil, err - } - supportsV6Filter = checkSupportsV6Filter(ipt6, logf) - supportsV6NAT = checkSupportsV6NAT(ipt6, logf) - logf("netfilter running in iptables mode v6 = %v, v6filter = %v, v6nat = %v", supportsV6, supportsV6Filter, supportsV6NAT) - } - return &iptablesRunner{ - ipt4: ipt4, - ipt6: ipt6, - v6Available: supportsV6, - v6NATAvailable: supportsV6NAT, - v6FilterAvailable: supportsV6Filter}, nil -} - -// checkSupportsV6Filter returns whether the system has a "filter" table in the -// IPv6 tables. Some container environments such as GitHub codespaces have -// limited local IPv6 support, and containers containing ip6tables, but do not -// have kernel support for IPv6 filtering. -// We will not set ip6tables rules in these instances. -func checkSupportsV6Filter(ipt *iptables.IPTables, logf logger.Logf) bool { - if ipt == nil { - return false - } - _, filterListErr := ipt.ListChains("filter") - if filterListErr == nil { - return true - } - logf("ip6tables filtering is not supported on this host: %v", filterListErr) - return false -} - -// checkSupportsV6NAT returns whether the system has a "nat" table in the -// IPv6 netfilter stack. -// -// The nat table was added after the initial release of ipv6 -// netfilter, so some older distros ship a kernel that can't NAT IPv6 -// traffic. -// ipt must be initialized for IPv6. -func checkSupportsV6NAT(ipt *iptables.IPTables, logf logger.Logf) bool { - if ipt == nil || ipt.Proto() != iptables.ProtocolIPv6 { - return false - } - _, natListErr := ipt.ListChains("nat") - if natListErr == nil { - return true - } - - // TODO (irbekrm): the following two checks were added before the check - // above that verifies that nat chains can be listed. It is a - // container-friendly check (see - // https://github.com/tailscale/tailscale/issues/11344), but also should - // be good enough on its own in other environments. If we never observe - // it falsely succeed, let's remove the other two checks. - - bs, err := os.ReadFile("/proc/net/ip6_tables_names") - if err != nil { - return false - } - if bytes.Contains(bs, []byte("nat\n")) { - logf("[unexpected] listing nat chains failed, but /proc/net/ip6_tables_name reports a nat table existing") - return true - } - if exec.Command("modprobe", "ip6table_nat").Run() == nil { - logf("[unexpected] listing nat chains failed, but modprobe ip6table_nat succeeded") - return true - } - return false -} - // HasIPV6 reports true if the system supports IPv6. func (i *iptablesRunner) HasIPV6() bool { return i.v6Available @@ -359,11 +258,11 @@ func (i *iptablesRunner) addBase4(tunname string) error { // POSTROUTING. So instead, we match on the inbound interface in // filter/FORWARD, and set a packet mark that nat/POSTROUTING can // use to effectively run that same test again. - args = []string{"-i", tunname, "-j", "MARK", "--set-mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask} + args = []string{"-i", tunname, "-j", "MARK", "--set-mark", subnetRouteMark + "/" + fwmarkMask} if err := i.ipt4.Append("filter", "ts-forward", args...); err != nil { return fmt.Errorf("adding %v in v4/filter/ts-forward: %w", args, err) } - args = []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "ACCEPT"} + args = []string{"-m", "mark", "--mark", subnetRouteMark + "/" + fwmarkMask, "-j", "ACCEPT"} if err := i.ipt4.Append("filter", "ts-forward", args...); err != nil { return fmt.Errorf("adding %v in v4/filter/ts-forward: %w", args, err) } @@ -465,11 +364,11 @@ func (i *iptablesRunner) addBase6(tunname string) error { return fmt.Errorf("adding %v in v6/filter/ts-input: %w", args, err) } - args = []string{"-i", tunname, "-j", "MARK", "--set-mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask} + args = []string{"-i", tunname, "-j", "MARK", "--set-mark", subnetRouteMark + "/" + fwmarkMask} if err := i.ipt6.Append("filter", "ts-forward", args...); err != nil { return fmt.Errorf("adding %v in v6/filter/ts-forward: %w", args, err) } - args = []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "ACCEPT"} + args = []string{"-m", "mark", "--mark", subnetRouteMark + "/" + fwmarkMask, "-j", "ACCEPT"} if err := i.ipt6.Append("filter", "ts-forward", args...); err != nil { return fmt.Errorf("adding %v in v6/filter/ts-forward: %w", args, err) } @@ -558,7 +457,7 @@ func (i *iptablesRunner) DelHooks(logf logger.Logf) error { // AddSNATRule adds a netfilter rule to SNAT traffic destined for // local subnets. func (i *iptablesRunner) AddSNATRule() error { - args := []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "MASQUERADE"} + args := []string{"-m", "mark", "--mark", subnetRouteMark + "/" + fwmarkMask, "-j", "MASQUERADE"} for _, ipt := range i.getNATTables() { if err := ipt.Append("nat", "ts-postrouting", args...); err != nil { return fmt.Errorf("adding %v in nat/ts-postrouting: %w", args, err) @@ -570,7 +469,7 @@ func (i *iptablesRunner) AddSNATRule() error { // DelSNATRule removes the netfilter rule to SNAT traffic destined for // local subnets. An error is returned if the rule does not exist. func (i *iptablesRunner) DelSNATRule() error { - args := []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "MASQUERADE"} + args := []string{"-m", "mark", "--mark", subnetRouteMark + "/" + fwmarkMask, "-j", "MASQUERADE"} for _, ipt := range i.getNATTables() { if err := ipt.Delete("nat", "ts-postrouting", args...); err != nil { return fmt.Errorf("deleting %v in nat/ts-postrouting: %w", args, err) @@ -697,26 +596,6 @@ func (i *iptablesRunner) DelMagicsockPortRule(port uint16, network string) error return nil } -// IPTablesCleanUp removes all Tailscale added iptables rules. -// Any errors that occur are logged to the provided logf. -func IPTablesCleanUp(logf logger.Logf) { - switch distro.Get() { - case distro.Gokrazy, distro.JetKVM: - // These use nftables and don't have the "iptables" command. - // Avoid log spam on cleanup. (#12277) - return - } - err := clearRules(iptables.ProtocolIPv4, logf) - if err != nil { - logf("linuxfw: clear iptables: %v", err) - } - - err = clearRules(iptables.ProtocolIPv6, logf) - if err != nil { - logf("linuxfw: clear ip6tables: %v", err) - } -} - // delTSHook deletes hook in a chain that jumps to a ts-chain. If the hook does not // exist, it's a no-op since the desired state is already achieved but we log the // error because error code from the iptables module resists unwrapping. @@ -745,40 +624,6 @@ func delChain(ipt iptablesInterface, table, chain string) error { return nil } -// clearRules clears all the iptables rules created by Tailscale -// for the given protocol. If error occurs, it's logged but not returned. -func clearRules(proto iptables.Protocol, logf logger.Logf) error { - ipt, err := iptables.NewWithProtocol(proto) - if err != nil { - return err - } - - var errs []error - - if err := delTSHook(ipt, "filter", "INPUT", logf); err != nil { - errs = append(errs, err) - } - if err := delTSHook(ipt, "filter", "FORWARD", logf); err != nil { - errs = append(errs, err) - } - if err := delTSHook(ipt, "nat", "POSTROUTING", logf); err != nil { - errs = append(errs, err) - } - - if err := delChain(ipt, "filter", "ts-input"); err != nil { - errs = append(errs, err) - } - if err := delChain(ipt, "filter", "ts-forward"); err != nil { - errs = append(errs, err) - } - - if err := delChain(ipt, "nat", "ts-postrouting"); err != nil { - errs = append(errs, err) - } - - return multierr.New(errs...) -} - // argsFromPostRoutingRule accepts a rule as returned by iptables.List and, if it is a rule from POSTROUTING chain, // returns the args part, else returns the original rule. func argsFromPostRoutingRule(r string) string { diff --git a/util/linuxfw/iptables_runner_test.go b/util/linuxfw/iptables_runner_test.go index 56f13c78a8010..ce905aef3f75b 100644 --- a/util/linuxfw/iptables_runner_test.go +++ b/util/linuxfw/iptables_runner_test.go @@ -11,6 +11,7 @@ import ( "testing" "tailscale.com/net/tsaddr" + "tailscale.com/tsconst" ) var testIsNotExistErr = "exitcode:1" @@ -20,7 +21,7 @@ func init() { } func TestAddAndDeleteChains(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() err := iptr.AddChains() if err != nil { t.Fatal(err) @@ -59,7 +60,7 @@ func TestAddAndDeleteChains(t *testing.T) { } func TestAddAndDeleteHooks(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() // don't need to test what happens if the chains don't exist, because // this is handled by fake iptables, in realife iptables would return error. if err := iptr.AddChains(); err != nil { @@ -113,7 +114,7 @@ func TestAddAndDeleteHooks(t *testing.T) { } func TestAddAndDeleteBase(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() tunname := "tun0" if err := iptr.AddChains(); err != nil { t.Fatal(err) @@ -132,8 +133,8 @@ func TestAddAndDeleteBase(t *testing.T) { tsRulesCommon := []fakeRule{ // table/chain/rule {"filter", "ts-input", []string{"-i", tunname, "-j", "ACCEPT"}}, - {"filter", "ts-forward", []string{"-i", tunname, "-j", "MARK", "--set-mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask}}, - {"filter", "ts-forward", []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "ACCEPT"}}, + {"filter", "ts-forward", []string{"-i", tunname, "-j", "MARK", "--set-mark", tsconst.LinuxSubnetRouteMark + "/" + tsconst.LinuxFwmarkMask}}, + {"filter", "ts-forward", []string{"-m", "mark", "--mark", tsconst.LinuxSubnetRouteMark + "/" + tsconst.LinuxFwmarkMask, "-j", "ACCEPT"}}, {"filter", "ts-forward", []string{"-o", tunname, "-j", "ACCEPT"}}, } @@ -176,7 +177,7 @@ func TestAddAndDeleteBase(t *testing.T) { } func TestAddAndDelLoopbackRule(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() // We don't need to test for malformed addresses, AddLoopbackRule // takes in a netip.Addr, which is already valid. fakeAddrV4 := netip.MustParseAddr("192.168.0.2") @@ -247,14 +248,14 @@ func TestAddAndDelLoopbackRule(t *testing.T) { } func TestAddAndDelSNATRule(t *testing.T) { - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() if err := iptr.AddChains(); err != nil { t.Fatal(err) } rule := fakeRule{ // table/chain/rule - "nat", "ts-postrouting", []string{"-m", "mark", "--mark", TailscaleSubnetRouteMark + "/" + TailscaleFwmarkMask, "-j", "MASQUERADE"}, + "nat", "ts-postrouting", []string{"-m", "mark", "--mark", tsconst.LinuxSubnetRouteMark + "/" + tsconst.LinuxFwmarkMask, "-j", "MASQUERADE"}, } // Add SNAT rule @@ -292,7 +293,7 @@ func TestAddAndDelSNATRule(t *testing.T) { func TestEnsureSNATForDst_ipt(t *testing.T) { ip1, ip2, ip3 := netip.MustParseAddr("100.99.99.99"), netip.MustParseAddr("100.88.88.88"), netip.MustParseAddr("100.77.77.77") - iptr := NewFakeIPTablesRunner() + iptr := newFakeIPTablesRunner() // 1. A new rule gets added mustCreateSNATRule_ipt(t, iptr, ip1, ip2) diff --git a/util/linuxfw/linuxfw.go b/util/linuxfw/linuxfw.go index be520e7a4a074..ec73aaceea03a 100644 --- a/util/linuxfw/linuxfw.go +++ b/util/linuxfw/linuxfw.go @@ -14,6 +14,8 @@ import ( "strings" "github.com/tailscale/netlink" + "tailscale.com/feature" + "tailscale.com/tsconst" "tailscale.com/types/logger" ) @@ -69,23 +71,12 @@ const ( // matching and setting the bits, so they can be directly embedded in // commands. const ( - // The mask for reading/writing the 'firewall mask' bits on a packet. - // See the comment on the const block on why we only use the third byte. - // - // We claim bits 16:23 entirely. For now we only use the lower four - // bits, leaving the higher 4 bits for future use. - TailscaleFwmarkMask = "0xff0000" - TailscaleFwmarkMaskNum = 0xff0000 - - // Packet is from Tailscale and to a subnet route destination, so - // is allowed to be routed through this machine. - TailscaleSubnetRouteMark = "0x40000" - TailscaleSubnetRouteMarkNum = 0x40000 - - // Packet was originated by tailscaled itself, and must not be - // routed over the Tailscale network. - TailscaleBypassMark = "0x80000" - TailscaleBypassMarkNum = 0x80000 + fwmarkMask = tsconst.LinuxFwmarkMask + fwmarkMaskNum = tsconst.LinuxFwmarkMaskNum + subnetRouteMark = tsconst.LinuxSubnetRouteMark + subnetRouteMarkNum = tsconst.LinuxSubnetRouteMarkNum + bypassMark = tsconst.LinuxBypassMark + bypassMarkNum = tsconst.LinuxBypassMarkNum ) // getTailscaleFwmarkMaskNeg returns the negation of TailscaleFwmarkMask in bytes. @@ -169,7 +160,7 @@ func CheckIPRuleSupportsV6(logf logger.Logf) error { // Try to actually create & delete one as a test. rule := netlink.NewRule() rule.Priority = 1234 - rule.Mark = TailscaleBypassMarkNum + rule.Mark = bypassMarkNum rule.Table = 52 rule.Family = netlink.FAMILY_V6 // First delete the rule unconditionally, and don't check for @@ -180,3 +171,13 @@ func CheckIPRuleSupportsV6(logf logger.Logf) error { defer netlink.RuleDel(rule) return netlink.RuleAdd(rule) } + +var hookIPTablesCleanup feature.Hook[func(logger.Logf)] + +// IPTablesCleanUp removes all Tailscale added iptables rules. +// Any errors that occur are logged to the provided logf. +func IPTablesCleanUp(logf logger.Logf) { + if f, ok := hookIPTablesCleanup.GetOk(); ok { + f(logf) + } +} diff --git a/util/linuxfw/linuxfw_unsupported.go b/util/linuxfw/linuxfw_unsupported.go deleted file mode 100644 index 7bfb4fd010302..0000000000000 --- a/util/linuxfw/linuxfw_unsupported.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -// NOTE: linux_{arm64, amd64} are the only two currently supported archs due to missing -// support in upstream dependencies. - -// TODO(#8502): add support for more architectures -//go:build linux && !(arm64 || amd64) - -package linuxfw - -import ( - "errors" - - "tailscale.com/types/logger" -) - -// ErrUnsupported is the error returned from all functions on non-Linux -// platforms. -var ErrUnsupported = errors.New("linuxfw:unsupported") - -// DebugNetfilter is not supported on non-Linux platforms. -func DebugNetfilter(logf logger.Logf) error { - return ErrUnsupported -} - -// DetectNetfilter is not supported on non-Linux platforms. -func detectNetfilter() (int, error) { - return 0, ErrUnsupported -} - -// DebugIptables is not supported on non-Linux platforms. -func debugIptables(logf logger.Logf) error { - return ErrUnsupported -} - -// DetectIptables is not supported on non-Linux platforms. -func detectIptables() (int, error) { - return 0, ErrUnsupported -} diff --git a/util/linuxfw/nftables.go b/util/linuxfw/nftables.go index e8b267b5e42ae..94ce51a1405a4 100644 --- a/util/linuxfw/nftables.go +++ b/util/linuxfw/nftables.go @@ -103,6 +103,10 @@ func DebugNetfilter(logf logger.Logf) error { return nil } +func init() { + hookDetectNetfilter.Set(detectNetfilter) +} + // detectNetfilter returns the number of nftables rules present in the system. func detectNetfilter() (int, error) { // Frist try creating a dummy postrouting chain. Emperically, we have diff --git a/util/osshare/filesharingstatus_windows.go b/util/osshare/filesharingstatus_windows.go index 999fc1cf77372..c125de15990c3 100644 --- a/util/osshare/filesharingstatus_windows.go +++ b/util/osshare/filesharingstatus_windows.go @@ -9,30 +9,31 @@ import ( "fmt" "os" "path/filepath" - "sync" + "runtime" "golang.org/x/sys/windows/registry" + "tailscale.com/types/lazy" "tailscale.com/types/logger" + "tailscale.com/util/winutil" ) const ( sendFileShellKey = `*\shell\tailscale` ) -var ipnExePath struct { - sync.Mutex - cache string // absolute path of tailscale-ipn.exe, populated lazily on first use -} +var ipnExePath lazy.SyncValue[string] // absolute path of the GUI executable func getIpnExePath(logf logger.Logf) string { - ipnExePath.Lock() - defer ipnExePath.Unlock() - - if ipnExePath.cache != "" { - return ipnExePath.cache + exe, err := winutil.GUIPathFromReg() + if err == nil { + return exe } - // Find the absolute path of tailscale-ipn.exe assuming that it's in the same + return findGUIInSameDirAsThisExe(logf) +} + +func findGUIInSameDirAsThisExe(logf logger.Logf) string { + // Find the absolute path of the GUI, assuming that it's in the same // directory as this executable (tailscaled.exe). p, err := os.Executable() if err != nil { @@ -43,14 +44,23 @@ func getIpnExePath(logf logger.Logf) string { logf("filepath.EvalSymlinks error: %v", err) return "" } - p = filepath.Join(filepath.Dir(p), "tailscale-ipn.exe") if p, err = filepath.Abs(p); err != nil { logf("filepath.Abs error: %v", err) return "" } - ipnExePath.cache = p - - return p + d := filepath.Dir(p) + candidates := []string{"tailscale-ipn.exe"} + if runtime.GOARCH == "arm64" { + // This name may be used on Windows 10 ARM64. + candidates = append(candidates, "tailscale-gui-386.exe") + } + for _, c := range candidates { + testPath := filepath.Join(d, c) + if _, err := os.Stat(testPath); err == nil { + return testPath + } + } + return "" } // SetFileSharingEnabled adds/removes "Send with Tailscale" from the Windows shell menu. @@ -64,7 +74,9 @@ func SetFileSharingEnabled(enabled bool, logf logger.Logf) { } func enableFileSharing(logf logger.Logf) { - path := getIpnExePath(logf) + path := ipnExePath.Get(func() string { + return getIpnExePath(logf) + }) if path == "" { return } @@ -79,7 +91,7 @@ func enableFileSharing(logf logger.Logf) { logf("k.SetStringValue error: %v", err) return } - if err := k.SetStringValue("Icon", path+",0"); err != nil { + if err := k.SetStringValue("Icon", path+",1"); err != nil { logf("k.SetStringValue error: %v", err) return } diff --git a/util/prompt/prompt.go b/util/prompt/prompt.go index 4e589ceb32b52..a6d86fb481769 100644 --- a/util/prompt/prompt.go +++ b/util/prompt/prompt.go @@ -6,19 +6,34 @@ package prompt import ( "fmt" + "os" "strings" + + "github.com/mattn/go-isatty" ) // YesNo takes a question and prompts the user to answer the // question with a yes or no. It appends a [y/n] to the message. -func YesNo(msg string) bool { - fmt.Print(msg + " [y/n] ") +// +// If there is no TTY on both Stdin and Stdout, assume that we're in a script +// and return the dflt result. +func YesNo(msg string, dflt bool) bool { + if !(isatty.IsTerminal(os.Stdin.Fd()) && isatty.IsTerminal(os.Stdout.Fd())) { + return dflt + } + if dflt { + fmt.Print(msg + " [Y/n] ") + } else { + fmt.Print(msg + " [y/N] ") + } var resp string fmt.Scanln(&resp) resp = strings.ToLower(resp) switch resp { case "y", "yes", "sure": return true + case "": + return dflt } return false } diff --git a/util/set/handle.go b/util/set/handle.go index 471ceeba2d523..9c6b6dab0549b 100644 --- a/util/set/handle.go +++ b/util/set/handle.go @@ -9,20 +9,28 @@ package set type HandleSet[T any] map[Handle]T // Handle is an opaque comparable value that's used as the map key in a -// HandleSet. The only way to get one is to call HandleSet.Add. +// HandleSet. type Handle struct { v *byte } +// NewHandle returns a new handle value. +func NewHandle() Handle { + return Handle{new(byte)} +} + // Add adds the element (map value) e to the set. // -// It returns the handle (map key) with which e can be removed, using a map -// delete. +// It returns a new handle (map key) with which e can be removed, using a map +// delete or the [HandleSet.Delete] method. func (s *HandleSet[T]) Add(e T) Handle { - h := Handle{new(byte)} + h := NewHandle() if *s == nil { *s = make(HandleSet[T]) } (*s)[h] = e return h } + +// Delete removes the element with handle h from the set. +func (s HandleSet[T]) Delete(h Handle) { delete(s, h) } diff --git a/util/syspolicy/pkey/pkey.go b/util/syspolicy/pkey/pkey.go index cfef9e17a333a..e450625cd1710 100644 --- a/util/syspolicy/pkey/pkey.go +++ b/util/syspolicy/pkey/pkey.go @@ -47,6 +47,13 @@ const ( // An empty string or a zero duration disables automatic reconnection. ReconnectAfter Key = "ReconnectAfter" + // AllowTailscaledRestart is a boolean key that controls whether users with write access + // to the LocalAPI are allowed to shutdown tailscaled with the intention of restarting it. + // On Windows, tailscaled will be restarted automatically by the service process + // (see babysitProc in cmd/tailscaled/tailscaled_windows.go). + // On other platforms, it is the client's responsibility to restart tailscaled. + AllowTailscaledRestart Key = "AllowTailscaledRestart" + // ExitNodeID is the exit node's node id. default ""; if blank, no exit node is forced. // Exit node ID takes precedence over exit node IP. // To find the node ID, go to /api.md#device. @@ -129,9 +136,15 @@ const ( FlushDNSOnSessionUnlock Key = "FlushDNSOnSessionUnlock" // EncryptState is a boolean setting that specifies whether to encrypt the - // tailscaled state file with a TPM device. + // tailscaled state file. + // Windows and Linux use a TPM device, Apple uses the Keychain. + // It's a noop on other platforms. EncryptState Key = "EncryptState" + // HardwareAttestation is a boolean key that controls whether to use a + // hardware-backed key to bind the node identity to this device. + HardwareAttestation Key = "HardwareAttestation" + // PostureChecking indicates if posture checking is enabled and the client shall gather // posture data. // Key is a string value that specifies an option: "always", "never", "user-decides". diff --git a/util/syspolicy/policy_keys.go b/util/syspolicy/policy_keys.go index ef2ac430dbccc..3a54f9dde5dd7 100644 --- a/util/syspolicy/policy_keys.go +++ b/util/syspolicy/policy_keys.go @@ -17,6 +17,7 @@ var implicitDefinitions = []*setting.Definition{ // Device policy settings (can only be configured on a per-device basis): setting.NewDefinition(pkey.AllowedSuggestedExitNodes, setting.DeviceSetting, setting.StringListValue), setting.NewDefinition(pkey.AllowExitNodeOverride, setting.DeviceSetting, setting.BooleanValue), + setting.NewDefinition(pkey.AllowTailscaledRestart, setting.DeviceSetting, setting.BooleanValue), setting.NewDefinition(pkey.AlwaysOn, setting.DeviceSetting, setting.BooleanValue), setting.NewDefinition(pkey.AlwaysOnOverrideWithReason, setting.DeviceSetting, setting.BooleanValue), setting.NewDefinition(pkey.ApplyUpdates, setting.DeviceSetting, setting.PreferenceOptionValue), @@ -42,6 +43,7 @@ var implicitDefinitions = []*setting.Definition{ setting.NewDefinition(pkey.PostureChecking, setting.DeviceSetting, setting.PreferenceOptionValue), setting.NewDefinition(pkey.ReconnectAfter, setting.DeviceSetting, setting.DurationValue), setting.NewDefinition(pkey.Tailnet, setting.DeviceSetting, setting.StringValue), + setting.NewDefinition(pkey.HardwareAttestation, setting.DeviceSetting, setting.BooleanValue), // User policy settings (can be configured on a user- or device-basis): setting.NewDefinition(pkey.AdminConsoleVisibility, setting.UserSetting, setting.VisibilityValue), diff --git a/util/systemd/systemd_nonlinux.go b/util/systemd/systemd_nonlinux.go deleted file mode 100644 index 5d7772bb3e61f..0000000000000 --- a/util/systemd/systemd_nonlinux.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !linux || android - -package systemd - -func Ready() {} -func Status(string, ...any) {} diff --git a/util/usermetric/metrics.go b/util/usermetric/metrics.go index 044b4d65f7120..be425fb87fd6c 100644 --- a/util/usermetric/metrics.go +++ b/util/usermetric/metrics.go @@ -10,15 +10,15 @@ package usermetric import ( "sync" - "tailscale.com/metrics" + "tailscale.com/feature/buildfeatures" ) // Metrics contains user-facing metrics that are used by multiple packages. type Metrics struct { initOnce sync.Once - droppedPacketsInbound *metrics.MultiLabelMap[DropLabels] - droppedPacketsOutbound *metrics.MultiLabelMap[DropLabels] + droppedPacketsInbound *MultiLabelMap[DropLabels] + droppedPacketsOutbound *MultiLabelMap[DropLabels] } // DropReason is the reason why a packet was dropped. @@ -55,6 +55,9 @@ type DropLabels struct { // initOnce initializes the common metrics. func (r *Registry) initOnce() { + if !buildfeatures.HasUserMetrics { + return + } r.m.initOnce.Do(func() { r.m.droppedPacketsInbound = NewMultiLabelMapWithRegistry[DropLabels]( r, @@ -73,13 +76,13 @@ func (r *Registry) initOnce() { // DroppedPacketsOutbound returns the outbound dropped packet metric, creating it // if necessary. -func (r *Registry) DroppedPacketsOutbound() *metrics.MultiLabelMap[DropLabels] { +func (r *Registry) DroppedPacketsOutbound() *MultiLabelMap[DropLabels] { r.initOnce() return r.m.droppedPacketsOutbound } // DroppedPacketsInbound returns the inbound dropped packet metric. -func (r *Registry) DroppedPacketsInbound() *metrics.MultiLabelMap[DropLabels] { +func (r *Registry) DroppedPacketsInbound() *MultiLabelMap[DropLabels] { r.initOnce() return r.m.droppedPacketsInbound } diff --git a/util/usermetric/omit.go b/util/usermetric/omit.go new file mode 100644 index 0000000000000..0611990abe89e --- /dev/null +++ b/util/usermetric/omit.go @@ -0,0 +1,29 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_usermetrics + +package usermetric + +type Registry struct { + m Metrics +} + +func (*Registry) NewGauge(name, help string) *Gauge { return nil } + +type MultiLabelMap[T comparable] = noopMap[T] + +type noopMap[T comparable] struct{} + +type Gauge struct{} + +func (*Gauge) Set(float64) {} + +func NewMultiLabelMapWithRegistry[T comparable](m *Registry, name string, promType, helpText string) *MultiLabelMap[T] { + return nil +} + +func (*noopMap[T]) Add(T, int64) {} +func (*noopMap[T]) Set(T, any) {} + +func (r *Registry) Handler(any, any) {} // no-op HTTP handler diff --git a/util/usermetric/usermetric.go b/util/usermetric/usermetric.go index 74e9447a64bbb..1805a5dbee626 100644 --- a/util/usermetric/usermetric.go +++ b/util/usermetric/usermetric.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_usermetrics + // Package usermetric provides a container and handler // for user-facing metrics. package usermetric @@ -25,6 +27,10 @@ type Registry struct { m Metrics } +// MultiLabelMap is an alias for metrics.MultiLabelMap in the common case, +// or an alias to a lighter type when usermetrics are omitted from the build. +type MultiLabelMap[T comparable] = metrics.MultiLabelMap[T] + // NewMultiLabelMapWithRegistry creates and register a new // MultiLabelMap[T] variable with the given name and returns it. // The variable is registered with the userfacing metrics package. diff --git a/util/winutil/restartmgr_windows.go b/util/winutil/restartmgr_windows.go index a52e2fee9f933..6f549de557653 100644 --- a/util/winutil/restartmgr_windows.go +++ b/util/winutil/restartmgr_windows.go @@ -19,7 +19,6 @@ import ( "github.com/dblohm7/wingoes" "golang.org/x/sys/windows" "tailscale.com/types/logger" - "tailscale.com/util/multierr" ) var ( @@ -538,7 +537,7 @@ func (rps RestartableProcesses) Terminate(logf logger.Logf, exitCode uint32, tim } if len(errs) != 0 { - return multierr.New(errs...) + return errors.Join(errs...) } return nil } diff --git a/util/winutil/winutil_windows.go b/util/winutil/winutil_windows.go index 5dde9a347d7f7..c935b210e9e6a 100644 --- a/util/winutil/winutil_windows.go +++ b/util/winutil/winutil_windows.go @@ -8,8 +8,10 @@ import ( "fmt" "log" "math" + "os" "os/exec" "os/user" + "path/filepath" "reflect" "runtime" "strings" @@ -33,6 +35,10 @@ var ErrNoShell = errors.New("no Shell process is present") // ErrNoValue is returned when the value doesn't exist in the registry. var ErrNoValue = registry.ErrNotExist +// ErrBadRegValueFormat is returned when a string value does not match the +// expected format. +var ErrBadRegValueFormat = errors.New("registry value formatted incorrectly") + // GetDesktopPID searches the PID of the process that's running the // currently active desktop. Returns ErrNoShell if the shell is not present. // Usually the PID will be for explorer.exe. @@ -947,3 +953,22 @@ func IsDomainName(name string) (bool, error) { return isDomainName(name16) } + +// GUIPathFromReg obtains the path to the client GUI executable from the +// registry value that was written during installation. +func GUIPathFromReg() (string, error) { + regPath, err := GetRegString("GUIPath") + if err != nil { + return "", err + } + + if !filepath.IsAbs(regPath) { + return "", ErrBadRegValueFormat + } + + if _, err := os.Stat(regPath); err != nil { + return "", err + } + + return regPath, nil +} diff --git a/version/cmdname.go b/version/cmdname.go index 51e065438e3a5..c38544ce1642c 100644 --- a/version/cmdname.go +++ b/version/cmdname.go @@ -12,7 +12,7 @@ import ( "io" "os" "path" - "path/filepath" + "runtime" "strings" ) @@ -30,7 +30,7 @@ func CmdName() string { func cmdName(exe string) string { // fallbackName, the lowercase basename of the executable, is what we return if // we can't find the Go module metadata embedded in the file. - fallbackName := filepath.Base(strings.TrimSuffix(strings.ToLower(exe), ".exe")) + fallbackName := prepExeNameForCmp(exe, runtime.GOARCH) var ret string info, err := findModuleInfo(exe) @@ -45,10 +45,10 @@ func cmdName(exe string) string { break } } - if strings.HasPrefix(ret, "wg") && fallbackName == "tailscale-ipn" { - // The tailscale-ipn.exe binary for internal build system packaging reasons - // has a path of "tailscale.io/win/wg64", "tailscale.io/win/wg32", etc. - // Ignore that name and use "tailscale-ipn" instead. + if runtime.GOOS == "windows" && strings.HasPrefix(ret, "gui") && checkPreppedExeNameForGUI(fallbackName) { + // The GUI binary for internal build system packaging reasons + // has a path of "tailscale.io/win/gui". + // Ignore that name and use fallbackName instead. return fallbackName } if ret == "" { diff --git a/version/exename.go b/version/exename.go new file mode 100644 index 0000000000000..d5047c2038ffe --- /dev/null +++ b/version/exename.go @@ -0,0 +1,25 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package version + +import ( + "path/filepath" + "strings" +) + +// prepExeNameForCmp strips any extension and arch suffix from exe, and +// lowercases it. +func prepExeNameForCmp(exe, arch string) string { + baseNoExt := strings.ToLower(strings.TrimSuffix(filepath.Base(exe), filepath.Ext(exe))) + archSuffix := "-" + arch + return strings.TrimSuffix(baseNoExt, archSuffix) +} + +func checkPreppedExeNameForGUI(preppedExeName string) bool { + return preppedExeName == "tailscale-ipn" || preppedExeName == "tailscale-gui" +} + +func isGUIExeName(exe, arch string) bool { + return checkPreppedExeNameForGUI(prepExeNameForCmp(exe, arch)) +} diff --git a/version/prop.go b/version/prop.go index 9327e6fe6d0f4..0d6a5c00df375 100644 --- a/version/prop.go +++ b/version/prop.go @@ -159,7 +159,9 @@ func IsWindowsGUI() bool { if err != nil { return false } - return strings.EqualFold(exe, "tailscale-ipn.exe") || strings.EqualFold(exe, "tailscale-ipn") + // It is okay to use GOARCH here because we're checking whether our + // _own_ process is the GUI. + return isGUIExeName(exe, runtime.GOARCH) }) } diff --git a/version/version_internal_test.go b/version/version_internal_test.go index 19aeab44228bd..b3b848276e820 100644 --- a/version/version_internal_test.go +++ b/version/version_internal_test.go @@ -25,3 +25,38 @@ func TestIsValidLongWithTwoRepos(t *testing.T) { } } } + +func TestPrepExeNameForCmp(t *testing.T) { + cases := []struct { + exe string + want string + }{ + { + "tailscale-ipn.exe", + "tailscale-ipn", + }, + { + "tailscale-gui-amd64.exe", + "tailscale-gui", + }, + { + "tailscale-gui-amd64", + "tailscale-gui", + }, + { + "tailscale-ipn", + "tailscale-ipn", + }, + { + "TaIlScAlE-iPn.ExE", + "tailscale-ipn", + }, + } + + for _, c := range cases { + got := prepExeNameForCmp(c.exe, "amd64") + if got != c.want { + t.Errorf("prepExeNameForCmp(%q) = %q; want %q", c.exe, got, c.want) + } + } +} diff --git a/wgengine/bench/wg.go b/wgengine/bench/wg.go index 9b195bdb78fde..4de7677f26257 100644 --- a/wgengine/bench/wg.go +++ b/wgengine/bench/wg.go @@ -53,7 +53,7 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. ListenPort: 0, Tun: t1, SetSubsystem: s1.Set, - HealthTracker: s1.HealthTracker(), + HealthTracker: s1.HealthTracker.Get(), }) if err != nil { log.Fatalf("e1 init: %v", err) @@ -80,7 +80,7 @@ func setupWGTest(b *testing.B, logf logger.Logf, traf *TrafficGen, a1, a2 netip. ListenPort: 0, Tun: t2, SetSubsystem: s2.Set, - HealthTracker: s2.HealthTracker(), + HealthTracker: s2.HealthTracker.Get(), }) if err != nil { log.Fatalf("e2 init: %v", err) diff --git a/wgengine/magicsock/cloudinfo.go b/wgengine/magicsock/cloudinfo.go index 1de369631314c..0db56b3f6c514 100644 --- a/wgengine/magicsock/cloudinfo.go +++ b/wgengine/magicsock/cloudinfo.go @@ -17,6 +17,7 @@ import ( "strings" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/types/logger" "tailscale.com/util/cloudenv" ) @@ -34,6 +35,9 @@ type cloudInfo struct { } func newCloudInfo(logf logger.Logf) *cloudInfo { + if !buildfeatures.HasCloud { + return nil + } tr := &http.Transport{ DisableKeepAlives: true, Dial: (&net.Dialer{ @@ -53,6 +57,9 @@ func newCloudInfo(logf logger.Logf) *cloudInfo { // if the tailscaled process is running in a known cloud and there are any such // IPs present. func (ci *cloudInfo) GetPublicIPs(ctx context.Context) ([]netip.Addr, error) { + if !buildfeatures.HasCloud { + return nil, nil + } switch ci.cloud { case cloudenv.AWS: ret, err := ci.getAWS(ctx) diff --git a/wgengine/magicsock/debughttp.go b/wgengine/magicsock/debughttp.go index a0159d21e592f..9aecab74b4278 100644 --- a/wgengine/magicsock/debughttp.go +++ b/wgengine/magicsock/debughttp.go @@ -13,6 +13,8 @@ import ( "strings" "time" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/tailcfg" "tailscale.com/tstime/mono" "tailscale.com/types/key" @@ -24,6 +26,11 @@ import ( // /debug/magicsock) or via peerapi to a peer that's owned by the same // user (so they can e.g. inspect their phones). func (c *Conn) ServeHTTPDebug(w http.ResponseWriter, r *http.Request) { + if !buildfeatures.HasDebug { + http.Error(w, feature.ErrUnavailable.Error(), http.StatusNotImplemented) + return + } + c.mu.Lock() defer c.mu.Unlock() diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index b5fc36bb8aa9c..37a4f1a64ee02 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -19,7 +19,6 @@ import ( "tailscale.com/derp" "tailscale.com/derp/derphttp" "tailscale.com/health" - "tailscale.com/logtail/backoff" "tailscale.com/net/dnscache" "tailscale.com/net/netcheck" "tailscale.com/net/tsaddr" @@ -28,6 +27,7 @@ import ( "tailscale.com/tstime/mono" "tailscale.com/types/key" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/mak" "tailscale.com/util/rands" "tailscale.com/util/testenv" @@ -717,8 +717,8 @@ func (c *Conn) processDERPReadResult(dm derpReadResult, b []byte) (n int, ep *en } ep.noteRecvActivity(srcAddr, mono.Now()) - if stats := c.stats.Load(); stats != nil { - stats.UpdateRxPhysical(ep.nodeAddr, srcAddr.ap, 1, dm.n) + if update := c.connCounter.Load(); update != nil { + update(0, netip.AddrPortFrom(ep.nodeAddr, 0), srcAddr.ap, 1, dm.n, true) } c.metrics.inboundPacketsDERPTotal.Add(1) @@ -836,7 +836,6 @@ func (c *Conn) maybeCloseDERPsOnRebind(okayLocalIPs []netip.Prefix) { c.closeOrReconnectDERPLocked(regionID, "rebind-default-route-change") continue } - regionID := regionID dc := ad.c go func() { ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 1f36aabd3baf8..2010775a10d6e 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -879,14 +879,6 @@ func (de *endpoint) setHeartbeatDisabled(v bool) { // discoverUDPRelayPathsLocked starts UDP relay path discovery. func (de *endpoint) discoverUDPRelayPathsLocked(now mono.Time) { - if !de.c.hasPeerRelayServers.Load() { - // Changes in this value between its access and the logic following - // are fine, we will eventually do the "right" thing during future path - // discovery. The worst case is we suppress path discovery for the - // current cycle, or we unnecessarily call into [relayManager] and do - // some wasted work. - return - } de.lastUDPRelayPathDiscovery = now lastBest := de.bestAddr lastBestIsTrusted := mono.Now().Before(de.trustBestAddrUntil) @@ -899,6 +891,14 @@ func (de *endpoint) wantUDPRelayPathDiscoveryLocked(now mono.Time) bool { if runtime.GOOS == "js" { return false } + if !de.c.hasPeerRelayServers.Load() { + // Changes in this value between its access and a call to + // [endpoint.discoverUDPRelayPathsLocked] are fine, we will eventually + // do the "right" thing during future path discovery. The worst case is + // we suppress path discovery for the current cycle, or we unnecessarily + // call into [relayManager] and do some wasted work. + return false + } if !de.relayCapable { return false } @@ -1013,14 +1013,18 @@ func (de *endpoint) discoPing(res *ipnstate.PingResult, size int, cb func(*ipnst // order to also try all candidate direct paths. fallthrough default: - // Ping all candidate direct paths. This work overlaps with what - // [de.heartbeat] will periodically fire when it calls - // [de.sendDiscoPingsLocked], but a user-initiated [pingCLI] is a - // "do it now" operation that should not be subject to + // Ping all candidate direct paths and start peer relay path discovery, + // if appropriate. This work overlaps with what [de.heartbeat] will + // periodically fire when it calls [de.sendDiscoPingsLocked] and + // [de.discoveryUDPRelayPathsLocked], but a user-initiated [pingCLI] is + // a "do it now" operation that should not be subject to // [heartbeatInterval] tick or [discoPingInterval] rate-limiting. for ep := range de.endpointState { de.startDiscoPingLocked(epAddr{ap: ep}, now, pingCLI, size, resCB) } + if de.wantUDPRelayPathDiscoveryLocked(now) { + de.discoverUDPRelayPathsLocked(now) + } } } @@ -1046,14 +1050,10 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { } } else if !udpAddr.isDirect() || now.After(de.trustBestAddrUntil) { de.sendDiscoPingsLocked(now, true) + if de.wantUDPRelayPathDiscoveryLocked(now) { + de.discoverUDPRelayPathsLocked(now) + } } - // TODO(jwhited): consider triggering UDP relay path discovery here under - // certain conditions. We currently only trigger it in heartbeat(), which - // is both good and bad. It's good because the first heartbeat() tick is 3s - // after the first packet, which gives us time to discover a UDP direct - // path and potentially avoid what would be wasted UDP relay path discovery - // work. It's bad because we might not discover a UDP direct path, and we - // incur a 3s delay before we try to discover a UDP relay path. de.noteTxActivityExtTriggerLocked(now) de.lastSendAny = now de.mu.Unlock() @@ -1105,8 +1105,8 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { } // TODO(raggi): needs updating for accuracy, as in error conditions we may have partial sends. - if stats := de.c.stats.Load(); err == nil && stats != nil { - stats.UpdateTxPhysical(de.nodeAddr, udpAddr.ap, len(buffs), txBytes) + if update := de.c.connCounter.Load(); err == nil && update != nil { + update(0, netip.AddrPortFrom(de.nodeAddr, 0), udpAddr.ap, len(buffs), txBytes, false) } } if derpAddr.IsValid() { @@ -1123,8 +1123,8 @@ func (de *endpoint) send(buffs [][]byte, offset int) error { } } - if stats := de.c.stats.Load(); stats != nil { - stats.UpdateTxPhysical(de.nodeAddr, derpAddr, len(buffs), txBytes) + if update := de.c.connCounter.Load(); update != nil { + update(0, netip.AddrPortFrom(de.nodeAddr, 0), derpAddr, len(buffs), txBytes, false) } if allOk { return nil @@ -1768,11 +1768,6 @@ func (de *endpoint) handlePongConnLocked(m *disco.Pong, di *discoInfo, src epAdd // we don't clear direct UDP paths on disco ping timeout (see // discoPingTimeout). if betterAddr(thisPong, de.bestAddr) { - if src.vni.IsSet() { - // This would be unexpected. Switching to a Geneve-encapsulated - // path should only happen in de.relayEndpointReady(). - de.c.logf("[unexpected] switching to Geneve-encapsulated path %v from %v", thisPong, de.bestAddr) - } de.c.logf("magicsock: disco: node %v %v now using %v mtu=%v tx=%x", de.publicKey.ShortString(), de.discoShort(), sp.to, thisPong.wireMTU, m.TxID[:6]) de.debugUpdates.Add(EndpointChange{ When: time.Now(), diff --git a/wgengine/magicsock/endpoint_test.go b/wgengine/magicsock/endpoint_test.go index 666d862310c44..df1c9340657e4 100644 --- a/wgengine/magicsock/endpoint_test.go +++ b/wgengine/magicsock/endpoint_test.go @@ -173,130 +173,110 @@ func Test_endpoint_maybeProbeUDPLifetimeLocked(t *testing.T) { wantMaybe bool }{ { - "nil probeUDPLifetime", - higher, - &lower, - func() *probeUDPLifetime { + name: "nil probeUDPLifetime", + localDisco: higher, + remoteDisco: &lower, + probeUDPLifetimeFn: func() *probeUDPLifetime { return nil }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { - return 0 - }, - false, + bestAddr: addr, }, { - "local higher disco key", - higher, - &lower, - newProbeUDPLifetime, - addr, - func(lifetime *probeUDPLifetime) time.Duration { - return 0 - }, - false, + name: "local higher disco key", + localDisco: higher, + remoteDisco: &lower, + probeUDPLifetimeFn: newProbeUDPLifetime, + bestAddr: addr, }, { - "remote no disco key", - higher, - nil, - newProbeUDPLifetime, - addr, - func(lifetime *probeUDPLifetime) time.Duration { - return 0 - }, - false, + name: "remote no disco key", + localDisco: higher, + remoteDisco: nil, + probeUDPLifetimeFn: newProbeUDPLifetime, + bestAddr: addr, }, { - "invalid bestAddr", - lower, - &higher, - newProbeUDPLifetime, - addrQuality{}, - func(lifetime *probeUDPLifetime) time.Duration { - return 0 - }, - false, + name: "invalid bestAddr", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: newProbeUDPLifetime, + bestAddr: addrQuality{}, }, { - "cycle started too recently", - lower, - &higher, - func() *probeUDPLifetime { - l := newProbeUDPLifetime() - l.cycleActive = false - l.cycleStartedAt = time.Now() - return l - }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { - return 0 + name: "cycle started too recently", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: func() *probeUDPLifetime { + lt := newProbeUDPLifetime() + lt.cycleActive = false + lt.cycleStartedAt = time.Now() + return lt }, - false, + bestAddr: addr, }, { - "maybe cliff 0 cycle not active", - lower, - &higher, - func() *probeUDPLifetime { - l := newProbeUDPLifetime() - l.cycleActive = false - l.cycleStartedAt = time.Now().Add(-l.config.CycleCanStartEvery).Add(-time.Second) - return l + name: "maybe cliff 0 cycle not active", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: func() *probeUDPLifetime { + lt := newProbeUDPLifetime() + lt.cycleActive = false + lt.cycleStartedAt = time.Now().Add(-lt.config.CycleCanStartEvery).Add(-time.Second) + return lt }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { + bestAddr: addr, + wantAfterInactivityForFn: func(lifetime *probeUDPLifetime) time.Duration { return lifetime.config.Cliffs[0] - udpLifetimeProbeCliffSlack }, - true, + wantMaybe: true, }, { - "maybe cliff 0", - lower, - &higher, - func() *probeUDPLifetime { - l := newProbeUDPLifetime() - l.cycleActive = true - l.currentCliff = 0 - return l + name: "maybe cliff 0", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: func() *probeUDPLifetime { + lt := newProbeUDPLifetime() + lt.cycleActive = true + lt.currentCliff = 0 + return lt }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { + bestAddr: addr, + wantAfterInactivityForFn: func(lifetime *probeUDPLifetime) time.Duration { return lifetime.config.Cliffs[0] - udpLifetimeProbeCliffSlack }, - true, + wantMaybe: true, }, { - "maybe cliff 1", - lower, - &higher, - func() *probeUDPLifetime { - l := newProbeUDPLifetime() - l.cycleActive = true - l.currentCliff = 1 - return l + name: "maybe cliff 1", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: func() *probeUDPLifetime { + lt := newProbeUDPLifetime() + lt.cycleActive = true + lt.currentCliff = 1 + return lt }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { + bestAddr: addr, + wantAfterInactivityForFn: func(lifetime *probeUDPLifetime) time.Duration { return lifetime.config.Cliffs[1] - udpLifetimeProbeCliffSlack }, - true, + wantMaybe: true, }, { - "maybe cliff 2", - lower, - &higher, - func() *probeUDPLifetime { - l := newProbeUDPLifetime() - l.cycleActive = true - l.currentCliff = 2 - return l + name: "maybe cliff 2", + localDisco: lower, + remoteDisco: &higher, + probeUDPLifetimeFn: func() *probeUDPLifetime { + lt := newProbeUDPLifetime() + lt.cycleActive = true + lt.currentCliff = 2 + return lt }, - addr, - func(lifetime *probeUDPLifetime) time.Duration { + bestAddr: addr, + wantAfterInactivityForFn: func(lifetime *probeUDPLifetime) time.Duration { return lifetime.config.Cliffs[2] - udpLifetimeProbeCliffSlack }, - true, + wantMaybe: true, }, } for _, tt := range tests { @@ -316,7 +296,10 @@ func Test_endpoint_maybeProbeUDPLifetimeLocked(t *testing.T) { p := tt.probeUDPLifetimeFn() de.probeUDPLifetime = p gotAfterInactivityFor, gotMaybe := de.maybeProbeUDPLifetimeLocked() - wantAfterInactivityFor := tt.wantAfterInactivityForFn(p) + var wantAfterInactivityFor time.Duration + if tt.wantAfterInactivityForFn != nil { + wantAfterInactivityFor = tt.wantAfterInactivityForFn(p) + } if gotAfterInactivityFor != wantAfterInactivityFor { t.Errorf("maybeProbeUDPLifetimeLocked() gotAfterInactivityFor = %v, want %v", gotAfterInactivityFor, wantAfterInactivityFor) } diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index 8ab7957ca2bb6..e3c2d478e9882 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -11,7 +11,6 @@ import ( "context" "encoding/binary" "errors" - "expvar" "fmt" "io" "net" @@ -29,22 +28,22 @@ import ( "github.com/tailscale/wireguard-go/device" "go4.org/mem" "golang.org/x/net/ipv6" - "tailscale.com/control/controlknobs" "tailscale.com/disco" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" + "tailscale.com/feature/condlite/expvar" "tailscale.com/health" "tailscale.com/hostinfo" "tailscale.com/ipn/ipnstate" "tailscale.com/net/batching" - "tailscale.com/net/connstats" "tailscale.com/net/netcheck" "tailscale.com/net/neterror" "tailscale.com/net/netmon" "tailscale.com/net/netns" "tailscale.com/net/packet" "tailscale.com/net/ping" - "tailscale.com/net/portmapper" + "tailscale.com/net/portmapper/portmappertype" "tailscale.com/net/sockopts" "tailscale.com/net/sockstats" "tailscale.com/net/stun" @@ -56,6 +55,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/lazy" "tailscale.com/types/logger" + "tailscale.com/types/netlogfunc" "tailscale.com/types/netmap" "tailscale.com/types/nettype" "tailscale.com/types/views" @@ -67,6 +67,7 @@ import ( "tailscale.com/util/testenv" "tailscale.com/util/usermetric" "tailscale.com/wgengine/filter" + "tailscale.com/wgengine/router" "tailscale.com/wgengine/wgint" ) @@ -175,17 +176,11 @@ type Conn struct { connCtxCancel func() // closes connCtx donec <-chan struct{} // connCtx.Done()'s to avoid context.cancelCtx.Done()'s mutex per call - // These [eventbus.Subscriber] fields are solely accessed by - // consumeEventbusTopics once initialized. - pmSub *eventbus.Subscriber[portmapper.Mapping] - filterSub *eventbus.Subscriber[FilterUpdate] - nodeViewsSub *eventbus.Subscriber[NodeViewsUpdate] - nodeMutsSub *eventbus.Subscriber[NodeMutationsUpdate] - syncSub *eventbus.Subscriber[syncPoint] + // A publisher for synchronization points to ensure correct ordering of + // config changes between magicsock and wireguard. syncPub *eventbus.Publisher[syncPoint] allocRelayEndpointPub *eventbus.Publisher[UDPRelayAllocReq] - allocRelayEndpointSub *eventbus.Subscriber[UDPRelayAllocResp] - subsDoneCh chan struct{} // closed when consumeEventbusTopics returns + portUpdatePub *eventbus.Publisher[router.PortUpdate] // pconn4 and pconn6 are the underlying UDP sockets used to // send/receive packets for wireguard and other magicsock @@ -207,11 +202,8 @@ type Conn struct { // portMapper is the NAT-PMP/PCP/UPnP prober/client, for requesting // port mappings from NAT devices. - portMapper *portmapper.Client - - // portMapperLogfUnregister is the function to call to unregister - // the portmapper log limiter. - portMapperLogfUnregister func() + // If nil, the portmapper is disabled. + portMapper portmappertype.Client // derpRecvCh is used by receiveDERP to read DERP messages. // It must have buffer size > 0; see issue 3736. @@ -269,8 +261,8 @@ type Conn struct { //lint:ignore U1000 used on Linux/Darwin only peerMTUEnabled atomic.Bool - // stats maintains per-connection counters. - stats atomic.Pointer[connstats.Statistics] + // connCounter maintains per-connection counters. + connCounter syncs.AtomicValue[netlogfunc.ConnectionCounter] // captureHook, if non-nil, is the pcap logging callback when capturing. captureHook syncs.AtomicValue[packet.CaptureCallback] @@ -403,10 +395,6 @@ type Conn struct { // wgPinger is the WireGuard only pinger used for latency measurements. wgPinger lazy.SyncValue[*ping.Pinger] - // onPortUpdate is called with the new port when magicsock rebinds to - // a new port. - onPortUpdate func(port uint16, network string) - // getPeerByKey optionally specifies a function to look up a peer's // wireguard state by its public key. If nil, it's not used. getPeerByKey func(key.NodePublic) (_ wgint.Peer, ok bool) @@ -477,7 +465,8 @@ type Options struct { // NoteRecvActivity, if provided, is a func for magicsock to call // whenever it receives a packet from a a peer if it's been more // than ~10 seconds since the last one. (10 seconds is somewhat - // arbitrary; the sole user just doesn't need or want it called on + // arbitrary; the sole user, lazy WireGuard configuration, + // just doesn't need or want it called on // every packet, just every minute or two for WireGuard timeouts, // and 10 seconds seems like a good trade-off between often enough // and not too often.) @@ -501,10 +490,6 @@ type Options struct { // If nil, they're ignored and not updated. ControlKnobs *controlknobs.Knobs - // OnPortUpdate is called with the new port when magicsock rebinds to - // a new port. - OnPortUpdate func(port uint16, network string) - // PeerByKeyFunc optionally specifies a function to look up a peer's // WireGuard state by its public key. If nil, it's not used. // In regular use, this will be wgengine.(*userspaceEngine).PeerByKey. @@ -640,37 +625,6 @@ func newConn(logf logger.Logf) *Conn { return c } -// consumeEventbusTopics consumes events from all [Conn]-relevant -// [eventbus.Subscriber]'s and passes them to their related handler. Events are -// always handled in the order they are received, i.e. the next event is not -// read until the previous event's handler has returned. It returns when the -// [portmapper.Mapping] subscriber is closed, which is interpreted to be the -// same as the [eventbus.Client] closing ([eventbus.Subscribers] are either -// all open or all closed). -func (c *Conn) consumeEventbusTopics() { - defer close(c.subsDoneCh) - - for { - select { - case <-c.pmSub.Done(): - return - case <-c.pmSub.Events(): - c.onPortMapChanged() - case filterUpdate := <-c.filterSub.Events(): - c.onFilterUpdate(filterUpdate) - case nodeViews := <-c.nodeViewsSub.Events(): - c.onNodeViewsUpdate(nodeViews) - case nodeMuts := <-c.nodeMutsSub.Events(): - c.onNodeMutationsUpdate(nodeMuts) - case syncPoint := <-c.syncSub.Events(): - c.dlogf("magicsock: received sync point after reconfig") - syncPoint.Signal() - case allocResp := <-c.allocRelayEndpointSub.Events(): - c.onUDPRelayAllocResp(allocResp) - } - } -} - func (c *Conn) onUDPRelayAllocResp(allocResp UDPRelayAllocResp) { c.mu.Lock() defer c.mu.Unlock() @@ -733,47 +687,51 @@ func NewConn(opts Options) (*Conn, error) { c.testOnlyPacketListener = opts.TestOnlyPacketListener c.noteRecvActivity = opts.NoteRecvActivity - c.eventClient = c.eventBus.Client("magicsock.Conn") - - // Subscribe calls must return before NewConn otherwise published - // events can be missed. - c.pmSub = eventbus.Subscribe[portmapper.Mapping](c.eventClient) - c.filterSub = eventbus.Subscribe[FilterUpdate](c.eventClient) - c.nodeViewsSub = eventbus.Subscribe[NodeViewsUpdate](c.eventClient) - c.nodeMutsSub = eventbus.Subscribe[NodeMutationsUpdate](c.eventClient) - c.syncSub = eventbus.Subscribe[syncPoint](c.eventClient) - c.syncPub = eventbus.Publish[syncPoint](c.eventClient) - c.allocRelayEndpointPub = eventbus.Publish[UDPRelayAllocReq](c.eventClient) - c.allocRelayEndpointSub = eventbus.Subscribe[UDPRelayAllocResp](c.eventClient) - c.subsDoneCh = make(chan struct{}) - go c.consumeEventbusTopics() + // Set up publishers and subscribers. Subscribe calls must return before + // NewConn otherwise published events can be missed. + ec := c.eventBus.Client("magicsock.Conn") + c.eventClient = ec + c.syncPub = eventbus.Publish[syncPoint](ec) + c.allocRelayEndpointPub = eventbus.Publish[UDPRelayAllocReq](ec) + c.portUpdatePub = eventbus.Publish[router.PortUpdate](ec) + eventbus.SubscribeFunc(ec, c.onPortMapChanged) + eventbus.SubscribeFunc(ec, c.onFilterUpdate) + eventbus.SubscribeFunc(ec, c.onNodeViewsUpdate) + eventbus.SubscribeFunc(ec, c.onNodeMutationsUpdate) + eventbus.SubscribeFunc(ec, func(sp syncPoint) { + c.dlogf("magicsock: received sync point after reconfig") + sp.Signal() + }) + eventbus.SubscribeFunc(ec, c.onUDPRelayAllocResp) + + c.connCtx, c.connCtxCancel = context.WithCancel(context.Background()) + c.donec = c.connCtx.Done() // Don't log the same log messages possibly every few seconds in our // portmapper. - portmapperLogf := logger.WithPrefix(c.logf, "portmapper: ") - portmapperLogf, c.portMapperLogfUnregister = netmon.LinkChangeLogLimiter(portmapperLogf, opts.NetMon) - portMapOpts := &portmapper.DebugKnobs{ - DisableAll: func() bool { return opts.DisablePortMapper || c.onlyTCP443.Load() }, - } - c.portMapper = portmapper.NewClient(portmapper.Config{ - EventBus: c.eventBus, - Logf: portmapperLogf, - NetMon: opts.NetMon, - DebugKnobs: portMapOpts, - ControlKnobs: opts.ControlKnobs, - }) - c.portMapper.SetGatewayLookupFunc(opts.NetMon.GatewayAndSelfIP) + if buildfeatures.HasPortMapper && !opts.DisablePortMapper { + portmapperLogf := logger.WithPrefix(c.logf, "portmapper: ") + portmapperLogf = netmon.LinkChangeLogLimiter(c.connCtx, portmapperLogf, opts.NetMon) + var disableUPnP func() bool + if c.controlKnobs != nil { + disableUPnP = c.controlKnobs.DisableUPnP.Load + } + newPortMapper, ok := portmappertype.HookNewPortMapper.GetOk() + if ok { + c.portMapper = newPortMapper(portmapperLogf, opts.EventBus, opts.NetMon, disableUPnP, c.onlyTCP443.Load) + } else if !testenv.InTest() { + panic("unexpected: HookNewPortMapper not set") + } + } + c.netMon = opts.NetMon c.health = opts.HealthTracker - c.onPortUpdate = opts.OnPortUpdate c.getPeerByKey = opts.PeerByKeyFunc if err := c.rebind(keepCurrentPort); err != nil { return nil, err } - c.connCtx, c.connCtxCancel = context.WithCancel(context.Background()) - c.donec = c.connCtx.Done() c.netChecker = &netcheck.Client{ Logf: logger.WithPrefix(c.logf, "netcheck: "), NetMon: c.netMon, @@ -845,6 +803,21 @@ func registerMetrics(reg *usermetric.Registry) *metrics { metricRecvDataPacketsDERP.Register(&m.inboundPacketsDERPTotal) metricRecvDataPacketsPeerRelayIPv4.Register(&m.inboundPacketsPeerRelayIPv4Total) metricRecvDataPacketsPeerRelayIPv6.Register(&m.inboundPacketsPeerRelayIPv6Total) + metricRecvDataBytesIPv4.Register(&m.inboundBytesIPv4Total) + metricRecvDataBytesIPv6.Register(&m.inboundBytesIPv6Total) + metricRecvDataBytesDERP.Register(&m.inboundBytesDERPTotal) + metricRecvDataBytesPeerRelayIPv4.Register(&m.inboundBytesPeerRelayIPv4Total) + metricRecvDataBytesPeerRelayIPv6.Register(&m.inboundBytesPeerRelayIPv6Total) + metricSendDataPacketsIPv4.Register(&m.outboundPacketsIPv4Total) + metricSendDataPacketsIPv6.Register(&m.outboundPacketsIPv6Total) + metricSendDataPacketsDERP.Register(&m.outboundPacketsDERPTotal) + metricSendDataPacketsPeerRelayIPv4.Register(&m.outboundPacketsPeerRelayIPv4Total) + metricSendDataPacketsPeerRelayIPv6.Register(&m.outboundPacketsPeerRelayIPv6Total) + metricSendDataBytesIPv4.Register(&m.outboundBytesIPv4Total) + metricSendDataBytesIPv6.Register(&m.outboundBytesIPv6Total) + metricSendDataBytesDERP.Register(&m.outboundBytesDERPTotal) + metricSendDataBytesPeerRelayIPv4.Register(&m.outboundBytesPeerRelayIPv4Total) + metricSendDataBytesPeerRelayIPv6.Register(&m.outboundBytesPeerRelayIPv6Total) metricSendUDP.Register(&m.outboundPacketsIPv4Total) metricSendUDP.Register(&m.outboundPacketsIPv6Total) metricSendDERP.Register(&m.outboundPacketsDERPTotal) @@ -882,12 +855,27 @@ func registerMetrics(reg *usermetric.Registry) *metrics { // deregisterMetrics unregisters the underlying usermetrics expvar counters // from clientmetrics. -func deregisterMetrics(m *metrics) { +func deregisterMetrics() { metricRecvDataPacketsIPv4.UnregisterAll() metricRecvDataPacketsIPv6.UnregisterAll() metricRecvDataPacketsDERP.UnregisterAll() metricRecvDataPacketsPeerRelayIPv4.UnregisterAll() metricRecvDataPacketsPeerRelayIPv6.UnregisterAll() + metricRecvDataBytesIPv4.UnregisterAll() + metricRecvDataBytesIPv6.UnregisterAll() + metricRecvDataBytesDERP.UnregisterAll() + metricRecvDataBytesPeerRelayIPv4.UnregisterAll() + metricRecvDataBytesPeerRelayIPv6.UnregisterAll() + metricSendDataPacketsIPv4.UnregisterAll() + metricSendDataPacketsIPv6.UnregisterAll() + metricSendDataPacketsDERP.UnregisterAll() + metricSendDataPacketsPeerRelayIPv4.UnregisterAll() + metricSendDataPacketsPeerRelayIPv6.UnregisterAll() + metricSendDataBytesIPv4.UnregisterAll() + metricSendDataBytesIPv6.UnregisterAll() + metricSendDataBytesDERP.UnregisterAll() + metricSendDataBytesPeerRelayIPv4.UnregisterAll() + metricSendDataBytesPeerRelayIPv6.UnregisterAll() metricSendUDP.UnregisterAll() metricSendDERP.UnregisterAll() metricSendPeerRelay.UnregisterAll() @@ -898,6 +886,9 @@ func deregisterMetrics(m *metrics) { // can be called with a nil argument to uninstall the capture // hook. func (c *Conn) InstallCaptureHook(cb packet.CaptureCallback) { + if !buildfeatures.HasCapture { + return + } c.captureHook.Store(cb) } @@ -1023,6 +1014,7 @@ func (c *Conn) setEndpoints(endpoints []tailcfg.Endpoint) (changed bool) { func (c *Conn) SetStaticEndpoints(ep views.Slice[netip.AddrPort]) { c.mu.Lock() if reflect.DeepEqual(c.staticEndpoints.AsSlice(), ep.AsSlice()) { + c.mu.Unlock() return } c.staticEndpoints = ep @@ -1086,7 +1078,9 @@ func (c *Conn) updateNetInfo(ctx context.Context) (*netcheck.Report, error) { UPnP: report.UPnP, PMP: report.PMP, PCP: report.PCP, - HavePortMap: c.portMapper.HaveMapping(), + } + if c.portMapper != nil { + ni.HavePortMap = c.portMapper.HaveMapping() } for rid, d := range report.RegionV4Latency { ni.DERPLatency[fmt.Sprintf("%d-v4", rid)] = d.Seconds() @@ -1253,7 +1247,7 @@ func (c *Conn) DiscoPublicKey() key.DiscoPublic { func (c *Conn) determineEndpoints(ctx context.Context) ([]tailcfg.Endpoint, error) { var havePortmap bool var portmapExt netip.AddrPort - if runtime.GOOS != "js" { + if runtime.GOOS != "js" && c.portMapper != nil { portmapExt, havePortmap = c.portMapper.GetCachedMappingOrStartCreatingOne() } @@ -1293,7 +1287,7 @@ func (c *Conn) determineEndpoints(ctx context.Context) ([]tailcfg.Endpoint, erro } // If we didn't have a portmap earlier, maybe it's done by now. - if !havePortmap { + if !havePortmap && c.portMapper != nil { portmapExt, havePortmap = c.portMapper.GetCachedMappingOrStartCreatingOne() } if havePortmap { @@ -1565,6 +1559,7 @@ func (c *Conn) maybeRebindOnError(err error) { if c.lastErrRebind.Load().Before(time.Now().Add(-5 * time.Second)) { c.logf("magicsock: performing rebind due to %q", reason) + c.lastErrRebind.Store(time.Now()) c.Rebind() go c.ReSTUN(reason) } else { @@ -1711,7 +1706,7 @@ func (c *Conn) mkReceiveFunc(ruc *RebindingUDPConn, healthItem *health.ReceiveFu var epCache epAddrEndpointCache return func(buffs [][]byte, sizes []int, eps []conn.Endpoint) (_ int, retErr error) { - if healthItem != nil { + if buildfeatures.HasHealth && healthItem != nil { healthItem.Enter() defer healthItem.Exit() defer func() { @@ -1866,8 +1861,10 @@ func (c *Conn) receiveIP(b []byte, ipp netip.AddrPort, cache *epAddrEndpointCach now := mono.Now() ep.lastRecvUDPAny.StoreAtomic(now) connNoted := ep.noteRecvActivity(src, now) - if stats := c.stats.Load(); stats != nil { - stats.UpdateRxPhysical(ep.nodeAddr, ipp, 1, geneveInclusivePacketLen) + if buildfeatures.HasNetLog { + if update := c.connCounter.Load(); update != nil { + update(0, netip.AddrPortFrom(ep.nodeAddr, 0), ipp, 1, geneveInclusivePacketLen, true) + } } if src.vni.IsSet() && (connNoted || looksLikeInitiationMsg(b)) { // connNoted is periodic, but we also want to verify if the peer is who @@ -2539,10 +2536,7 @@ func (c *Conn) handlePingLocked(dm *disco.Ping, src epAddr, di *discoInfo, derpN // Remember this route if not present. var dup bool if isDerp { - if ep, ok := c.peerMap.endpointForNodeKey(derpNodeSrc); ok { - if ep.addCandidateEndpoint(src.ap, dm.TxID) { - return - } + if _, ok := c.peerMap.endpointForNodeKey(derpNodeSrc); ok { numNodes = 1 } } else { @@ -2670,7 +2664,9 @@ func (c *Conn) SetNetworkUp(up bool) { if up { c.startDerpHomeConnectLocked() } else { - c.portMapper.NoteNetworkDown() + if c.portMapper != nil { + c.portMapper.NoteNetworkDown() + } c.closeAllDerpLocked("network-down") } } @@ -2982,8 +2978,13 @@ func (c *Conn) onNodeViewsUpdate(update NodeViewsUpdate) { filt := c.filt self := c.self peers := c.peers + isClosed := c.closed c.mu.Unlock() // release c.mu before potentially calling c.updateRelayServersSet which is O(m * n) + if isClosed { + return // nothing to do here, the conn is closed and the update is no longer relevant + } + if peersChanged || relayClientChanged { if !relayClientEnabled { c.relayManager.handleRelayServersSet(nil) @@ -3313,14 +3314,12 @@ func (c *connBind) isClosed() bool { // // Only the first close does anything. Any later closes return nil. func (c *Conn) Close() error { - // Close the [eventbus.Client] and wait for Conn.consumeEventbusTopics to - // return. Do this before acquiring c.mu: - // 1. Conn.consumeEventbusTopics event handlers also acquire c.mu, they can - // deadlock with c.Close(). - // 2. Conn.consumeEventbusTopics event handlers may not guard against - // undesirable post/in-progress Conn.Close() behaviors. + // Close the [eventbus.Client] to wait for subscribers to + // return before acquiring c.mu: + // 1. Event handlers also acquire c.mu, they can deadlock with c.Close(). + // 2. Event handlers may not guard against undesirable post/in-progress + // Conn.Close() behaviors. c.eventClient.Close() - <-c.subsDoneCh c.mu.Lock() defer c.mu.Unlock() @@ -3332,8 +3331,9 @@ func (c *Conn) Close() error { c.derpCleanupTimer.Stop() } c.stopPeriodicReSTUNTimerLocked() - c.portMapper.Close() - c.portMapperLogfUnregister() + if c.portMapper != nil { + c.portMapper.Close() + } c.peerMap.forEachEndpoint(func(ep *endpoint) { ep.stopAndReset() @@ -3364,7 +3364,7 @@ func (c *Conn) Close() error { pinger.Close() } - deregisterMetrics(c.metrics) + deregisterMetrics() return nil } @@ -3416,7 +3416,7 @@ func (c *Conn) shouldDoPeriodicReSTUNLocked() bool { return true } -func (c *Conn) onPortMapChanged() { c.ReSTUN("portmap-changed") } +func (c *Conn) onPortMapChanged(portmappertype.Mapping) { c.ReSTUN("portmap-changed") } // ReSTUN triggers an address discovery. // The provided why string is for debug logging only. @@ -3533,7 +3533,7 @@ func (c *Conn) bindSocket(ruc *RebindingUDPConn, network string, curPortFate cur c.logf("magicsock: unable to bind %v port %d: %v", network, port, err) continue } - if c.onPortUpdate != nil { + if c.portUpdatePub.ShouldPublish() { _, gotPortStr, err := net.SplitHostPort(pconn.LocalAddr().String()) if err != nil { c.logf("could not parse port from %s: %w", pconn.LocalAddr().String(), err) @@ -3542,7 +3542,10 @@ func (c *Conn) bindSocket(ruc *RebindingUDPConn, network string, curPortFate cur if err != nil { c.logf("could not parse port from %s: %w", gotPort, err) } else { - c.onPortUpdate(uint16(gotPort), network) + c.portUpdatePub.Publish(router.PortUpdate{ + UDPPort: uint16(gotPort), + EndpointNetwork: network, + }) } } } @@ -3586,7 +3589,9 @@ func (c *Conn) rebind(curPortFate currentPortFate) error { if err := c.bindSocket(&c.pconn4, "udp4", curPortFate); err != nil { return fmt.Errorf("magicsock: Rebind IPv4 failed: %w", err) } - c.portMapper.SetLocalPort(c.LocalPort()) + if c.portMapper != nil { + c.portMapper.SetLocalPort(c.LocalPort()) + } c.UpdatePMTUD() return nil } @@ -3740,10 +3745,12 @@ func (c *Conn) UpdateStatus(sb *ipnstate.StatusBuilder) { }) } -// SetStatistics specifies a per-connection statistics aggregator. +// SetConnectionCounter specifies a per-connection statistics aggregator. // Nil may be specified to disable statistics gathering. -func (c *Conn) SetStatistics(stats *connstats.Statistics) { - c.stats.Store(stats) +func (c *Conn) SetConnectionCounter(fn netlogfunc.ConnectionCounter) { + if buildfeatures.HasNetLog { + c.connCounter.Store(fn) + } } // SetHomeless sets whether magicsock should idle harder and not have a DERP @@ -3947,13 +3954,20 @@ var ( metricSendDERPErrorClosed = clientmetric.NewCounter("magicsock_send_derp_error_closed") metricSendDERPErrorQueue = clientmetric.NewCounter("magicsock_send_derp_error_queue") metricSendDERPDropped = clientmetric.NewCounter("magicsock_send_derp_dropped") - metricSendUDP = clientmetric.NewAggregateCounter("magicsock_send_udp") metricSendUDPError = clientmetric.NewCounter("magicsock_send_udp_error") - metricSendPeerRelay = clientmetric.NewAggregateCounter("magicsock_send_peer_relay") metricSendPeerRelayError = clientmetric.NewCounter("magicsock_send_peer_relay_error") - metricSendDERP = clientmetric.NewAggregateCounter("magicsock_send_derp") metricSendDERPError = clientmetric.NewCounter("magicsock_send_derp_error") + // Sends (data) + // + // Note: Prior to v1.78 metricSendUDP & metricSendDERP counted sends of data + // AND disco packets. They were updated in v1.78 to only count data packets. + // metricSendPeerRelay was added in v1.86 and has always counted only data + // packets. + metricSendUDP = clientmetric.NewAggregateCounter("magicsock_send_udp") + metricSendPeerRelay = clientmetric.NewAggregateCounter("magicsock_send_peer_relay") + metricSendDERP = clientmetric.NewAggregateCounter("magicsock_send_derp") + // Data packets (non-disco) metricSendData = clientmetric.NewCounter("magicsock_send_data") metricSendDataNetworkDown = clientmetric.NewCounter("magicsock_send_data_network_down") @@ -3962,6 +3976,23 @@ var ( metricRecvDataPacketsIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_ipv6") metricRecvDataPacketsPeerRelayIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_peer_relay_ipv4") metricRecvDataPacketsPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_peer_relay_ipv6") + metricSendDataPacketsDERP = clientmetric.NewAggregateCounter("magicsock_send_data_derp") + metricSendDataPacketsIPv4 = clientmetric.NewAggregateCounter("magicsock_send_data_ipv4") + metricSendDataPacketsIPv6 = clientmetric.NewAggregateCounter("magicsock_send_data_ipv6") + metricSendDataPacketsPeerRelayIPv4 = clientmetric.NewAggregateCounter("magicsock_send_data_peer_relay_ipv4") + metricSendDataPacketsPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_send_data_peer_relay_ipv6") + + // Data bytes (non-disco) + metricRecvDataBytesDERP = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_derp") + metricRecvDataBytesIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_ipv4") + metricRecvDataBytesIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_ipv6") + metricRecvDataBytesPeerRelayIPv4 = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_peer_relay_ipv4") + metricRecvDataBytesPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_recv_data_bytes_peer_relay_ipv6") + metricSendDataBytesDERP = clientmetric.NewAggregateCounter("magicsock_send_data_bytes_derp") + metricSendDataBytesIPv4 = clientmetric.NewAggregateCounter("magicsock_send_data_bytes_ipv4") + metricSendDataBytesIPv6 = clientmetric.NewAggregateCounter("magicsock_send_data_bytes_ipv6") + metricSendDataBytesPeerRelayIPv4 = clientmetric.NewAggregateCounter("magicsock_send_data_bytes_peer_relay_ipv4") + metricSendDataBytesPeerRelayIPv6 = clientmetric.NewAggregateCounter("magicsock_send_data_bytes_peer_relay_ipv6") // Disco packets metricSendDiscoUDP = clientmetric.NewCounter("magicsock_disco_send_udp") diff --git a/wgengine/magicsock/magicsock_default.go b/wgengine/magicsock/magicsock_default.go index 1c315034a6f75..88759d3acc2e3 100644 --- a/wgengine/magicsock/magicsock_default.go +++ b/wgengine/magicsock/magicsock_default.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !linux +//go:build !linux || ts_omit_listenrawdisco package magicsock diff --git a/wgengine/magicsock/magicsock_linux.go b/wgengine/magicsock/magicsock_linux.go index cad0e9b5e3134..f37e19165141f 100644 --- a/wgengine/magicsock/magicsock_linux.go +++ b/wgengine/magicsock/magicsock_linux.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build linux && !ts_omit_listenrawdisco + package magicsock import ( diff --git a/wgengine/magicsock/magicsock_test.go b/wgengine/magicsock/magicsock_test.go index 5774432d5a0b9..60620b14100f1 100644 --- a/wgengine/magicsock/magicsock_test.go +++ b/wgengine/magicsock/magicsock_test.go @@ -27,10 +27,12 @@ import ( "sync/atomic" "syscall" "testing" + "testing/synctest" "time" "unsafe" qt "github.com/frankban/quicktest" + "github.com/google/go-cmp/cmp" wgconn "github.com/tailscale/wireguard-go/conn" "github.com/tailscale/wireguard-go/device" "github.com/tailscale/wireguard-go/tun/tuntest" @@ -39,13 +41,11 @@ import ( "golang.org/x/net/ipv4" "tailscale.com/cmd/testwrapper/flakytest" "tailscale.com/control/controlknobs" - "tailscale.com/derp" - "tailscale.com/derp/derphttp" + "tailscale.com/derp/derpserver" "tailscale.com/disco" "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/ipn/ipnstate" - "tailscale.com/net/connstats" "tailscale.com/net/netaddr" "tailscale.com/net/netcheck" "tailscale.com/net/netmon" @@ -67,6 +67,7 @@ import ( "tailscale.com/util/cibuild" "tailscale.com/util/clientmetric" "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/must" "tailscale.com/util/racebuild" "tailscale.com/util/set" @@ -111,9 +112,9 @@ func (c *Conn) WaitReady(t testing.TB) { } func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, stunIP netip.Addr) (derpMap *tailcfg.DERPMap, cleanup func()) { - d := derp.NewServer(key.NewNode(), logf) + d := derpserver.New(key.NewNode(), logf) - httpsrv := httptest.NewUnstartedServer(derphttp.Handler(d)) + httpsrv := httptest.NewUnstartedServer(derpserver.Handler(d)) httpsrv.Config.ErrorLog = logger.StdLogger(logf) httpsrv.Config.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler)) httpsrv.StartTLS() @@ -157,14 +158,14 @@ func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, st // happiness. type magicStack struct { privateKey key.NodePrivate - epCh chan []tailcfg.Endpoint // endpoint updates produced by this peer - stats *connstats.Statistics // per-connection statistics - conn *Conn // the magicsock itself - tun *tuntest.ChannelTUN // TUN device to send/receive packets - tsTun *tstun.Wrapper // wrapped tun that implements filtering and wgengine hooks - dev *device.Device // the wireguard-go Device that connects the previous things - wgLogger *wglog.Logger // wireguard-go log wrapper - netMon *netmon.Monitor // always non-nil + epCh chan []tailcfg.Endpoint // endpoint updates produced by this peer + counts netlogtype.CountsByConnection // per-connection statistics + conn *Conn // the magicsock itself + tun *tuntest.ChannelTUN // TUN device to send/receive packets + tsTun *tstun.Wrapper // wrapped tun that implements filtering and wgengine hooks + dev *device.Device // the wireguard-go Device that connects the previous things + wgLogger *wglog.Logger // wireguard-go log wrapper + netMon *netmon.Monitor // always non-nil metrics *usermetric.Registry } @@ -179,14 +180,13 @@ func newMagicStack(t testing.TB, logf logger.Logf, l nettype.PacketListener, der func newMagicStackWithKey(t testing.TB, logf logger.Logf, l nettype.PacketListener, derpMap *tailcfg.DERPMap, privateKey key.NodePrivate) *magicStack { t.Helper() - bus := eventbus.New() - t.Cleanup(bus.Close) + bus := eventbustest.NewBus(t) netMon, err := netmon.New(bus, logf) if err != nil { t.Fatalf("netmon.New: %v", err) } - ht := new(health.Tracker) + ht := health.NewTracker(bus) var reg usermetric.Registry epCh := make(chan []tailcfg.Endpoint, 100) // arbitrary @@ -1143,22 +1143,19 @@ func testTwoDevicePing(t *testing.T, d *devices) { } } - m1.stats = connstats.NewStatistics(0, 0, nil) - defer m1.stats.Shutdown(context.Background()) - m1.conn.SetStatistics(m1.stats) - m2.stats = connstats.NewStatistics(0, 0, nil) - defer m2.stats.Shutdown(context.Background()) - m2.conn.SetStatistics(m2.stats) + m1.conn.SetConnectionCounter(m1.counts.Add) + m2.conn.SetConnectionCounter(m2.counts.Add) checkStats := func(t *testing.T, m *magicStack, wantConns []netlogtype.Connection) { - _, stats := m.stats.TestExtract() + defer m.counts.Reset() + counts := m.counts.Clone() for _, conn := range wantConns { - if _, ok := stats[conn]; ok { + if _, ok := counts[conn]; ok { return } } t.Helper() - t.Errorf("missing any connection to %s from %s", wantConns, slicesx.MapKeys(stats)) + t.Errorf("missing any connection to %s from %s", wantConns, slicesx.MapKeys(counts)) } addrPort := netip.MustParseAddrPort @@ -1221,9 +1218,9 @@ func testTwoDevicePing(t *testing.T, d *devices) { setT(t) defer setT(outerT) m1.conn.resetMetricsForTest() - m1.stats.TestExtract() + m1.counts.Reset() m2.conn.resetMetricsForTest() - m2.stats.TestExtract() + m2.counts.Reset() t.Logf("Metrics before: %s\n", m1.metrics.String()) ping1(t) ping2(t) @@ -1249,8 +1246,6 @@ func (c *Conn) resetMetricsForTest() { } func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) { - _, phys := ms.stats.TestExtract() - physIPv4RxBytes := int64(0) physIPv4TxBytes := int64(0) physDERPRxBytes := int64(0) @@ -1259,7 +1254,7 @@ func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) { physIPv4TxPackets := int64(0) physDERPRxPackets := int64(0) physDERPTxPackets := int64(0) - for conn, count := range phys { + for conn, count := range ms.counts.Clone() { t.Logf("physconn src: %s, dst: %s", conn.Src.String(), conn.Dst.String()) if conn.Dst.String() == "127.3.3.40:1" { physDERPRxBytes += int64(count.RxBytes) @@ -1273,6 +1268,7 @@ func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) { physIPv4TxPackets += int64(count.TxPackets) } } + ms.counts.Reset() metricIPv4RxBytes := ms.conn.metrics.inboundBytesIPv4Total.Value() metricIPv4RxPackets := ms.conn.metrics.inboundPacketsIPv4Total.Value() @@ -1300,8 +1296,14 @@ func assertConnStatsAndUserMetricsEqual(t *testing.T, ms *magicStack) { // the metrics by 2 to get the expected value. // TODO(kradalby): https://github.com/tailscale/tailscale/issues/13420 c.Assert(metricSendUDP.Value(), qt.Equals, metricIPv4TxPackets*2) + c.Assert(metricSendDataPacketsIPv4.Value(), qt.Equals, metricIPv4TxPackets*2) + c.Assert(metricSendDataPacketsDERP.Value(), qt.Equals, metricDERPTxPackets*2) + c.Assert(metricSendDataBytesIPv4.Value(), qt.Equals, metricIPv4TxBytes*2) + c.Assert(metricSendDataBytesDERP.Value(), qt.Equals, metricDERPTxBytes*2) c.Assert(metricRecvDataPacketsIPv4.Value(), qt.Equals, metricIPv4RxPackets*2) c.Assert(metricRecvDataPacketsDERP.Value(), qt.Equals, metricDERPRxPackets*2) + c.Assert(metricRecvDataBytesIPv4.Value(), qt.Equals, metricIPv4RxBytes*2) + c.Assert(metricRecvDataBytesDERP.Value(), qt.Equals, metricDERPRxBytes*2) } // tests that having a endpoint.String prevents wireguard-go's @@ -1352,8 +1354,7 @@ func newTestConn(t testing.TB) *Conn { t.Helper() port := pickPort(t) - bus := eventbus.New() - t.Cleanup(bus.Close) + bus := eventbustest.NewBus(t) netMon, err := netmon.New(bus, logger.WithPrefix(t.Logf, "... netmon: ")) if err != nil { @@ -1364,7 +1365,7 @@ func newTestConn(t testing.TB) *Conn { conn, err := NewConn(Options{ NetMon: netMon, EventBus: bus, - HealthTracker: new(health.Tracker), + HealthTracker: health.NewTracker(bus), Metrics: new(usermetric.Registry), DisablePortMapper: true, Logf: t.Logf, @@ -3038,7 +3039,7 @@ func TestMaybeSetNearestDERP(t *testing.T) { } for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { - ht := new(health.Tracker) + ht := health.NewTracker(eventbustest.NewBus(t)) c := newConn(t.Logf) c.myDerp = tt.old c.derpMap = derpMap @@ -3116,49 +3117,121 @@ func TestMaybeRebindOnError(t *testing.T) { } t.Run("no-frequent-rebind", func(t *testing.T) { - if runtime.GOOS != "plan9" { - err := fmt.Errorf("outer err: %w", syscall.EPERM) - conn := newTestConn(t) - defer conn.Close() - conn.lastErrRebind.Store(time.Now().Add(-1 * time.Second)) - before := metricRebindCalls.Value() - conn.maybeRebindOnError(err) - after := metricRebindCalls.Value() - if before != after { - t.Errorf("should not rebind within 5 seconds of last") + synctest.Test(t, func(t *testing.T) { + if runtime.GOOS != "plan9" { + err := fmt.Errorf("outer err: %w", syscall.EPERM) + conn := newTestConn(t) + defer conn.Close() + lastRebindTime := time.Now().Add(-1 * time.Second) + conn.lastErrRebind.Store(lastRebindTime) + before := metricRebindCalls.Value() + conn.maybeRebindOnError(err) + after := metricRebindCalls.Value() + if before != after { + t.Errorf("should not rebind within 5 seconds of last") + } + + // ensure that rebinds are performed and store an updated last + // rebind time. + time.Sleep(6 * time.Second) + + conn.maybeRebindOnError(err) + newTime := conn.lastErrRebind.Load() + if newTime == lastRebindTime { + t.Errorf("expected a rebind to occur") + } + if newTime.Sub(lastRebindTime) < 5*time.Second { + t.Errorf("expected at least 5 seconds between %s and %s", lastRebindTime, newTime) + } } - } + + }) }) } -func TestNetworkDownSendErrors(t *testing.T) { +func newTestConnAndRegistry(t *testing.T) (*Conn, *usermetric.Registry, func()) { + t.Helper() bus := eventbus.New() - defer bus.Close() - netMon := must.Get(netmon.New(bus, t.Logf)) - defer netMon.Close() reg := new(usermetric.Registry) + conn := must.Get(NewConn(Options{ DisablePortMapper: true, Logf: t.Logf, NetMon: netMon, - Metrics: reg, EventBus: bus, + Metrics: reg, })) - defer conn.Close() - conn.SetNetworkUp(false) - if err := conn.Send([][]byte{{00}}, &lazyEndpoint{}, 0); err == nil { - t.Error("expected error, got nil") - } - resp := httptest.NewRecorder() - reg.Handler(resp, new(http.Request)) - if !strings.Contains(resp.Body.String(), `tailscaled_outbound_dropped_packets_total{reason="error"} 1`) { - t.Errorf("expected NetworkDown to increment packet dropped metric; got %q", resp.Body.String()) + return conn, reg, func() { + bus.Close() + netMon.Close() + conn.Close() } } +func TestNetworkSendErrors(t *testing.T) { + t.Run("network-down", func(t *testing.T) { + // TODO(alexc): This test case fails on Windows because it never + // successfully sends the first packet: + // + // expected successful Send, got err: "write udp4 0.0.0.0:57516->127.0.0.1:9999: + // wsasendto: The requested address is not valid in its context." + // + // It would be nice to run this test on Windows, but I was already + // on a side quest and it was unclear if this test has ever worked + // correctly on Windows. + if runtime.GOOS == "windows" { + t.Skipf("skipping on %s", runtime.GOOS) + } + + conn, reg, close := newTestConnAndRegistry(t) + defer close() + + buffs := [][]byte{{00, 00, 00, 00, 00, 00, 00, 00}} + ep := &lazyEndpoint{ + src: epAddr{ap: netip.MustParseAddrPort("127.0.0.1:9999")}, + } + offset := 8 + + // Check this is a valid payload to send when the network is up + conn.SetNetworkUp(true) + if err := conn.Send(buffs, ep, offset); err != nil { + t.Errorf("expected successful Send, got err: %q", err) + } + + // Now we know the payload would be sent if the network is up, + // send it again when the network is down + conn.SetNetworkUp(false) + err := conn.Send(buffs, ep, offset) + if err == nil { + t.Error("expected error, got nil") + } + resp := httptest.NewRecorder() + reg.Handler(resp, new(http.Request)) + if !strings.Contains(resp.Body.String(), `tailscaled_outbound_dropped_packets_total{reason="error"} 1`) { + t.Errorf("expected NetworkDown to increment packet dropped metric; got %q", resp.Body.String()) + } + }) + + t.Run("invalid-payload", func(t *testing.T) { + conn, reg, close := newTestConnAndRegistry(t) + defer close() + + conn.SetNetworkUp(false) + err := conn.Send([][]byte{{00}}, &lazyEndpoint{}, 0) + if err == nil { + t.Error("expected error, got nil") + } + resp := httptest.NewRecorder() + reg.Handler(resp, new(http.Request)) + if !strings.Contains(resp.Body.String(), `tailscaled_outbound_dropped_packets_total{reason="error"} 1`) { + t.Errorf("expected invalid payload to increment packet dropped metric; got %q", resp.Body.String()) + } + }) +} + func Test_packetLooksLike(t *testing.T) { discoPub := key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 30: 30, 31: 31})) nakedDisco := make([]byte, 0, 512) @@ -3909,7 +3982,8 @@ func TestConn_receiveIP(t *testing.T) { c.noteRecvActivity = func(public key.NodePublic) { noteRecvActivityCalled = true } - c.SetStatistics(connstats.NewStatistics(0, 0, nil)) + var counts netlogtype.CountsByConnection + c.SetConnectionCounter(counts.Add) if tt.insertWantEndpointTypeInPeerMap { var insertEPIntoPeerMap *endpoint @@ -3982,9 +4056,8 @@ func TestConn_receiveIP(t *testing.T) { } // Verify physical rx stats - stats := c.stats.Load() - _, gotPhy := stats.TestExtract() wantNonzeroRxStats := false + gotPhy := counts.Clone() switch ep := tt.wantEndpointType.(type) { case *lazyEndpoint: if ep.maybeEP != nil { @@ -4004,8 +4077,8 @@ func TestConn_receiveIP(t *testing.T) { RxBytes: wantRxBytes, }, } - if !reflect.DeepEqual(gotPhy, wantPhy) { - t.Errorf("receiveIP() got physical conn stats = %v, want %v", gotPhy, wantPhy) + if d := cmp.Diff(gotPhy, wantPhy); d != "" { + t.Errorf("receiveIP() stats mismatch (-got +want):\n%s", d) } } else { if len(gotPhy) != 0 { diff --git a/wgengine/magicsock/relaymanager.go b/wgengine/magicsock/relaymanager.go index 4680832d96bb8..a9dca70ae2228 100644 --- a/wgengine/magicsock/relaymanager.go +++ b/wgengine/magicsock/relaymanager.go @@ -758,7 +758,10 @@ func (r *relayManager) handleNewServerEndpointRunLoop(newServerEndpoint newRelay ctx: ctx, cancel: cancel, } - if byServerDisco == nil { + // We must look up byServerDisco again. The previous value may have been + // deleted from the outer map when cleaning up duplicate work. + byServerDisco, ok = r.handshakeWorkByServerDiscoByEndpoint[newServerEndpoint.wlb.ep] + if !ok { byServerDisco = make(map[key.DiscoPublic]*relayHandshakeWork) r.handshakeWorkByServerDiscoByEndpoint[newServerEndpoint.wlb.ep] = byServerDisco } diff --git a/wgengine/magicsock/relaymanager_test.go b/wgengine/magicsock/relaymanager_test.go index e4891f5678a24..d400818394c47 100644 --- a/wgengine/magicsock/relaymanager_test.go +++ b/wgengine/magicsock/relaymanager_test.go @@ -7,6 +7,7 @@ import ( "testing" "tailscale.com/disco" + udprelay "tailscale.com/net/udprelay/endpoint" "tailscale.com/types/key" "tailscale.com/util/set" ) @@ -78,3 +79,178 @@ func TestRelayManagerGetServers(t *testing.T) { t.Errorf("got %v != want %v", got, servers) } } + +func TestRelayManager_handleNewServerEndpointRunLoop(t *testing.T) { + wantHandshakeWorkCount := func(t *testing.T, rm *relayManager, n int) { + t.Helper() + byServerDiscoByEndpoint := 0 + for _, v := range rm.handshakeWorkByServerDiscoByEndpoint { + byServerDiscoByEndpoint += len(v) + } + byServerDiscoVNI := len(rm.handshakeWorkByServerDiscoVNI) + if byServerDiscoByEndpoint != n || + byServerDiscoVNI != n || + byServerDiscoByEndpoint != byServerDiscoVNI { + t.Fatalf("want handshake work count %d byServerDiscoByEndpoint=%d byServerDiscoVNI=%d", + n, + byServerDiscoByEndpoint, + byServerDiscoVNI, + ) + } + } + + conn := newConn(t.Logf) + epA := &endpoint{c: conn} + epB := &endpoint{c: conn} + serverDiscoA := key.NewDisco().Public() + serverDiscoB := key.NewDisco().Public() + + serverAendpointALamport1VNI1 := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epA}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoA, LamportID: 1, VNI: 1}, + } + serverAendpointALamport1VNI1LastBestMatching := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epA, lastBestIsTrusted: true, lastBest: addrQuality{relayServerDisco: serverDiscoA}}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoA, LamportID: 1, VNI: 1}, + } + serverAendpointALamport2VNI1 := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epA}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoA, LamportID: 2, VNI: 1}, + } + serverAendpointALamport2VNI2 := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epA}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoA, LamportID: 2, VNI: 2}, + } + serverAendpointBLamport1VNI2 := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epB}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoA, LamportID: 1, VNI: 2}, + } + serverBendpointALamport1VNI1 := newRelayServerEndpointEvent{ + wlb: endpointWithLastBest{ep: epA}, + se: udprelay.ServerEndpoint{ServerDisco: serverDiscoB, LamportID: 1, VNI: 1}, + } + + tests := []struct { + name string + events []newRelayServerEndpointEvent + want []newRelayServerEndpointEvent + }{ + { + // Test for http://go/corp/32978 + name: "eq server+ep neq VNI higher lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport1VNI1, + serverAendpointALamport2VNI2, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + }, + }, + { + name: "eq server+ep neq VNI lower lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + serverAendpointALamport1VNI1, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + }, + }, + { + name: "eq server+vni neq ep lower lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + serverAendpointBLamport1VNI2, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + }, + }, + { + name: "eq server+vni neq ep higher lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointBLamport1VNI2, + serverAendpointALamport2VNI2, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI2, + }, + }, + { + name: "eq server+endpoint+vni higher lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport1VNI1, + serverAendpointALamport2VNI1, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI1, + }, + }, + { + name: "eq server+endpoint+vni lower lamport", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI1, + serverAendpointALamport1VNI1, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport2VNI1, + }, + }, + { + name: "eq endpoint+vni+lamport neq server", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport1VNI1, + serverBendpointALamport1VNI1, + }, + want: []newRelayServerEndpointEvent{ + serverAendpointALamport1VNI1, + serverBendpointALamport1VNI1, + }, + }, + { + name: "trusted last best with matching server", + events: []newRelayServerEndpointEvent{ + serverAendpointALamport1VNI1LastBestMatching, + }, + want: []newRelayServerEndpointEvent{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rm := &relayManager{} + rm.init() + <-rm.runLoopStoppedCh // prevent runLoop() from starting + + // feed events + for _, event := range tt.events { + rm.handleNewServerEndpointRunLoop(event) + } + + // validate state + wantHandshakeWorkCount(t, rm, len(tt.want)) + for _, want := range tt.want { + byServerDisco, ok := rm.handshakeWorkByServerDiscoByEndpoint[want.wlb.ep] + if !ok { + t.Fatal("work not found by endpoint") + } + workByServerDiscoByEndpoint, ok := byServerDisco[want.se.ServerDisco] + if !ok { + t.Fatal("work not found by server disco by endpoint") + } + workByServerDiscoVNI, ok := rm.handshakeWorkByServerDiscoVNI[serverDiscoVNI{want.se.ServerDisco, want.se.VNI}] + if !ok { + t.Fatal("work not found by server disco + VNI") + } + if workByServerDiscoByEndpoint != workByServerDiscoVNI { + t.Fatal("workByServerDiscoByEndpoint != workByServerDiscoVNI") + } + } + + // cleanup + for _, event := range tt.events { + rm.stopWorkRunLoop(event.wlb.ep) + } + wantHandshakeWorkCount(t, rm, 0) + }) + } +} diff --git a/wgengine/netlog/logger.go b/wgengine/netlog/netlog.go similarity index 90% rename from wgengine/netlog/logger.go rename to wgengine/netlog/netlog.go index 3a696b246df54..2984df99471b6 100644 --- a/wgengine/netlog/logger.go +++ b/wgengine/netlog/netlog.go @@ -1,13 +1,17 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_netlog && !ts_omit_logtail + // Package netlog provides a logger that monitors a TUN device and // periodically records any traffic into a log stream. package netlog import ( + "cmp" "context" "encoding/json" + "errors" "fmt" "io" "log" @@ -19,14 +23,14 @@ import ( "tailscale.com/health" "tailscale.com/logpolicy" "tailscale.com/logtail" - "tailscale.com/net/connstats" "tailscale.com/net/netmon" "tailscale.com/net/sockstats" "tailscale.com/net/tsaddr" "tailscale.com/tailcfg" "tailscale.com/types/logid" + "tailscale.com/types/netlogfunc" "tailscale.com/types/netlogtype" - "tailscale.com/util/multierr" + "tailscale.com/util/eventbus" "tailscale.com/wgengine/router" ) @@ -36,12 +40,12 @@ const pollPeriod = 5 * time.Second // Device is an abstraction over a tunnel device or a magic socket. // Both *tstun.Wrapper and *magicsock.Conn implement this interface. type Device interface { - SetStatistics(*connstats.Statistics) + SetConnectionCounter(netlogfunc.ConnectionCounter) } type noopDevice struct{} -func (noopDevice) SetStatistics(*connstats.Statistics) {} +func (noopDevice) SetConnectionCounter(netlogfunc.ConnectionCounter) {} // Logger logs statistics about every connection. // At present, it only logs connections within a tailscale network. @@ -51,7 +55,7 @@ type Logger struct { mu sync.Mutex // protects all fields below logger *logtail.Logger - stats *connstats.Statistics + stats *statistics tun Device sock Device @@ -93,7 +97,7 @@ var testClient *http.Client // The IP protocol and source port are always zero. // The sock is used to populated the PhysicalTraffic field in Message. // The netMon parameter is optional; if non-nil it's used to do faster interface lookups. -func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID logid.PrivateID, tun, sock Device, netMon *netmon.Monitor, health *health.Tracker, logExitFlowEnabledEnabled bool) error { +func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID logid.PrivateID, tun, sock Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus, logExitFlowEnabledEnabled bool) error { nl.mu.Lock() defer nl.mu.Unlock() if nl.logger != nil { @@ -110,6 +114,7 @@ func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID lo Collection: "tailtraffic.log.tailscale.io", PrivateID: nodeLogID, CopyPrivateID: domainLogID, + Bus: bus, Stderr: io.Discard, CompressLogs: true, HTTPC: httpc, @@ -126,7 +131,7 @@ func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID lo // can upload to the Tailscale log service, so stay below this limit. const maxLogSize = 256 << 10 const maxConns = (maxLogSize - netlogtype.MaxMessageJSONSize) / netlogtype.MaxConnectionCountsJSONSize - nl.stats = connstats.NewStatistics(pollPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { + nl.stats = newStatistics(pollPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { nl.mu.Lock() addrs := nl.addrs prefixes := nl.prefixes @@ -135,23 +140,17 @@ func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID lo }) // Register the connection tracker into the TUN device. - if tun == nil { - tun = noopDevice{} - } - nl.tun = tun - nl.tun.SetStatistics(nl.stats) + nl.tun = cmp.Or[Device](tun, noopDevice{}) + nl.tun.SetConnectionCounter(nl.stats.UpdateVirtual) // Register the connection tracker into magicsock. - if sock == nil { - sock = noopDevice{} - } - nl.sock = sock - nl.sock.SetStatistics(nl.stats) + nl.sock = cmp.Or[Device](sock, noopDevice{}) + nl.sock.SetConnectionCounter(nl.stats.UpdatePhysical) return nil } -func recordStatistics(logger *logtail.Logger, nodeID tailcfg.StableNodeID, start, end time.Time, connstats, sockStats map[netlogtype.Connection]netlogtype.Counts, addrs map[netip.Addr]bool, prefixes map[netip.Prefix]bool, logExitFlowEnabled bool) { +func recordStatistics(logger *logtail.Logger, nodeID tailcfg.StableNodeID, start, end time.Time, connStats, sockStats map[netlogtype.Connection]netlogtype.Counts, addrs map[netip.Addr]bool, prefixes map[netip.Prefix]bool, logExitFlowEnabled bool) { m := netlogtype.Message{NodeID: nodeID, Start: start.UTC(), End: end.UTC()} classifyAddr := func(a netip.Addr) (isTailscale, withinRoute bool) { @@ -170,7 +169,7 @@ func recordStatistics(logger *logtail.Logger, nodeID tailcfg.StableNodeID, start } exitTraffic := make(map[netlogtype.Connection]netlogtype.Counts) - for conn, cnts := range connstats { + for conn, cnts := range connStats { srcIsTailscaleIP, srcWithinSubnet := classifyAddr(conn.Src.Addr()) dstIsTailscaleIP, dstWithinSubnet := classifyAddr(conn.Dst.Addr()) switch { @@ -256,8 +255,8 @@ func (nl *Logger) Shutdown(ctx context.Context) error { // Shutdown in reverse order of Startup. // Do not hold lock while shutting down since this may flush one last time. nl.mu.Unlock() - nl.sock.SetStatistics(nil) - nl.tun.SetStatistics(nil) + nl.sock.SetConnectionCounter(nil) + nl.tun.SetConnectionCounter(nil) err1 := nl.stats.Shutdown(ctx) err2 := nl.logger.Shutdown(ctx) nl.mu.Lock() @@ -270,5 +269,5 @@ func (nl *Logger) Shutdown(ctx context.Context) error { nl.addrs = nil nl.prefixes = nil - return multierr.New(err1, err2) + return errors.Join(err1, err2) } diff --git a/wgengine/netlog/netlog_omit.go b/wgengine/netlog/netlog_omit.go new file mode 100644 index 0000000000000..43209df919ace --- /dev/null +++ b/wgengine/netlog/netlog_omit.go @@ -0,0 +1,13 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_netlog || ts_omit_logtail + +package netlog + +type Logger struct{} + +func (*Logger) Startup(...any) error { return nil } +func (*Logger) Running() bool { return false } +func (*Logger) Shutdown(any) error { return nil } +func (*Logger) ReconfigRoutes(any) {} diff --git a/net/connstats/stats.go b/wgengine/netlog/stats.go similarity index 76% rename from net/connstats/stats.go rename to wgengine/netlog/stats.go index 4e6d8e109aaad..c06068803f125 100644 --- a/net/connstats/stats.go +++ b/wgengine/netlog/stats.go @@ -1,9 +1,9 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -// Package connstats maintains statistics about connections -// flowing through a TUN device (which operate at the IP layer). -package connstats +//go:build !ts_omit_netlog && !ts_omit_logtail + +package netlog import ( "context" @@ -14,13 +14,14 @@ import ( "golang.org/x/sync/errgroup" "tailscale.com/net/packet" "tailscale.com/net/tsaddr" + "tailscale.com/types/ipproto" "tailscale.com/types/netlogtype" ) -// Statistics maintains counters for every connection. +// statistics maintains counters for every connection. // All methods are safe for concurrent use. // The zero value is ready for use. -type Statistics struct { +type statistics struct { maxConns int // immutable once set mu sync.Mutex @@ -39,13 +40,13 @@ type connCnts struct { physical map[netlogtype.Connection]netlogtype.Counts } -// NewStatistics creates a data structure for tracking connection statistics +// newStatistics creates a data structure for tracking connection statistics // that periodically dumps the virtual and physical connection counts // depending on whether the maxPeriod or maxConns is exceeded. // The dump function is called from a single goroutine. // Shutdown must be called to cleanup resources. -func NewStatistics(maxPeriod time.Duration, maxConns int, dump func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts)) *Statistics { - s := &Statistics{maxConns: maxConns} +func newStatistics(maxPeriod time.Duration, maxConns int, dump func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts)) *statistics { + s := &statistics{maxConns: maxConns} s.connCntsCh = make(chan connCnts, 256) s.shutdownCtx, s.shutdown = context.WithCancel(context.Background()) s.group.Go(func() error { @@ -82,15 +83,19 @@ func NewStatistics(maxPeriod time.Duration, maxConns int, dump func(start, end t // UpdateTxVirtual updates the counters for a transmitted IP packet // The source and destination of the packet directly correspond with // the source and destination in netlogtype.Connection. -func (s *Statistics) UpdateTxVirtual(b []byte) { - s.updateVirtual(b, false) +func (s *statistics) UpdateTxVirtual(b []byte) { + var p packet.Parsed + p.Decode(b) + s.UpdateVirtual(p.IPProto, p.Src, p.Dst, 1, len(b), false) } // UpdateRxVirtual updates the counters for a received IP packet. // The source and destination of the packet are inverted with respect to // the source and destination in netlogtype.Connection. -func (s *Statistics) UpdateRxVirtual(b []byte) { - s.updateVirtual(b, true) +func (s *statistics) UpdateRxVirtual(b []byte) { + var p packet.Parsed + p.Decode(b) + s.UpdateVirtual(p.IPProto, p.Dst, p.Src, 1, len(b), true) } var ( @@ -98,23 +103,18 @@ var ( tailscaleServiceIPv6 = tsaddr.TailscaleServiceIPv6() ) -func (s *Statistics) updateVirtual(b []byte, receive bool) { - var p packet.Parsed - p.Decode(b) - conn := netlogtype.Connection{Proto: p.IPProto, Src: p.Src, Dst: p.Dst} - if receive { - conn.Src, conn.Dst = conn.Dst, conn.Src - } - +func (s *statistics) UpdateVirtual(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, receive bool) { // Network logging is defined as traffic between two Tailscale nodes. // Traffic with the internal Tailscale service is not with another node // and should not be logged. It also happens to be a high volume // amount of discrete traffic flows (e.g., DNS lookups). - switch conn.Dst.Addr() { + switch dst.Addr() { case tailscaleServiceIPv4, tailscaleServiceIPv6: return } + conn := netlogtype.Connection{Proto: proto, Src: src, Dst: dst} + s.mu.Lock() defer s.mu.Unlock() cnts, found := s.virtual[conn] @@ -122,11 +122,11 @@ func (s *Statistics) updateVirtual(b []byte, receive bool) { return } if receive { - cnts.RxPackets++ - cnts.RxBytes += uint64(len(b)) + cnts.RxPackets += uint64(packets) + cnts.RxBytes += uint64(bytes) } else { - cnts.TxPackets++ - cnts.TxBytes += uint64(len(b)) + cnts.TxPackets += uint64(packets) + cnts.TxBytes += uint64(bytes) } s.virtual[conn] = cnts } @@ -135,20 +135,20 @@ func (s *Statistics) updateVirtual(b []byte, receive bool) { // The src is always a Tailscale IP address, representing some remote peer. // The dst is a remote IP address and port that corresponds // with some physical peer backing the Tailscale IP address. -func (s *Statistics) UpdateTxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { - s.updatePhysical(src, dst, packets, bytes, false) +func (s *statistics) UpdateTxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { + s.UpdatePhysical(0, netip.AddrPortFrom(src, 0), dst, packets, bytes, false) } // UpdateRxPhysical updates the counters for zero or more received wireguard packets. // The src is always a Tailscale IP address, representing some remote peer. // The dst is a remote IP address and port that corresponds // with some physical peer backing the Tailscale IP address. -func (s *Statistics) UpdateRxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { - s.updatePhysical(src, dst, packets, bytes, true) +func (s *statistics) UpdateRxPhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int) { + s.UpdatePhysical(0, netip.AddrPortFrom(src, 0), dst, packets, bytes, true) } -func (s *Statistics) updatePhysical(src netip.Addr, dst netip.AddrPort, packets, bytes int, receive bool) { - conn := netlogtype.Connection{Src: netip.AddrPortFrom(src, 0), Dst: dst} +func (s *statistics) UpdatePhysical(proto ipproto.Proto, src, dst netip.AddrPort, packets, bytes int, receive bool) { + conn := netlogtype.Connection{Proto: proto, Src: src, Dst: dst} s.mu.Lock() defer s.mu.Unlock() @@ -168,7 +168,7 @@ func (s *Statistics) updatePhysical(src netip.Addr, dst netip.AddrPort, packets, // preInsertConn updates the maps to handle insertion of a new connection. // It reports false if insertion is not allowed (i.e., after shutdown). -func (s *Statistics) preInsertConn() bool { +func (s *statistics) preInsertConn() bool { // Check whether insertion of a new connection will exceed maxConns. if len(s.virtual)+len(s.physical) == s.maxConns && s.maxConns > 0 { // Extract the current statistics and send it to the serializer. @@ -190,13 +190,13 @@ func (s *Statistics) preInsertConn() bool { return s.shutdownCtx.Err() == nil } -func (s *Statistics) extract() connCnts { +func (s *statistics) extract() connCnts { s.mu.Lock() defer s.mu.Unlock() return s.extractLocked() } -func (s *Statistics) extractLocked() connCnts { +func (s *statistics) extractLocked() connCnts { if len(s.virtual)+len(s.physical) == 0 { return connCnts{} } @@ -208,7 +208,7 @@ func (s *Statistics) extractLocked() connCnts { // TestExtract synchronously extracts the current network statistics map // and resets the counters. This should only be used for testing purposes. -func (s *Statistics) TestExtract() (virtual, physical map[netlogtype.Connection]netlogtype.Counts) { +func (s *statistics) TestExtract() (virtual, physical map[netlogtype.Connection]netlogtype.Counts) { cc := s.extract() return cc.virtual, cc.physical } @@ -216,7 +216,7 @@ func (s *Statistics) TestExtract() (virtual, physical map[netlogtype.Connection] // Shutdown performs a final flush of statistics. // Statistics for any subsequent calls to Update will be dropped. // It is safe to call Shutdown concurrently and repeatedly. -func (s *Statistics) Shutdown(context.Context) error { +func (s *statistics) Shutdown(context.Context) error { s.shutdown() return s.group.Wait() } diff --git a/net/connstats/stats_test.go b/wgengine/netlog/stats_test.go similarity index 95% rename from net/connstats/stats_test.go rename to wgengine/netlog/stats_test.go index ae0bca8a5f008..6cf7eb9983817 100644 --- a/net/connstats/stats_test.go +++ b/wgengine/netlog/stats_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package connstats +package netlog import ( "context" @@ -54,7 +54,7 @@ func TestInterval(t *testing.T) { const maxConns = 2048 gotDump := make(chan struct{}, 1) - stats := NewStatistics(maxPeriod, maxConns, func(_, _ time.Time, _, _ map[netlogtype.Connection]netlogtype.Counts) { + stats := newStatistics(maxPeriod, maxConns, func(_, _ time.Time, _, _ map[netlogtype.Connection]netlogtype.Counts) { select { case gotDump <- struct{}{}: default: @@ -86,7 +86,7 @@ func TestConcurrent(t *testing.T) { const maxPeriod = 10 * time.Millisecond const maxConns = 10 virtualAggregate := make(map[netlogtype.Connection]netlogtype.Counts) - stats := NewStatistics(maxPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { + stats := newStatistics(maxPeriod, maxConns, func(start, end time.Time, virtual, physical map[netlogtype.Connection]netlogtype.Counts) { c.Assert(start.IsZero(), qt.IsFalse) c.Assert(end.IsZero(), qt.IsFalse) c.Assert(end.Before(start), qt.IsFalse) @@ -170,7 +170,7 @@ func Benchmark(b *testing.B) { b.ResetTimer() b.ReportAllocs() for range b.N { - s := NewStatistics(0, 0, nil) + s := newStatistics(0, 0, nil) for j := 0; j < 1e3; j++ { s.UpdateTxVirtual(p) } @@ -181,7 +181,7 @@ func Benchmark(b *testing.B) { b.ResetTimer() b.ReportAllocs() for range b.N { - s := NewStatistics(0, 0, nil) + s := newStatistics(0, 0, nil) for j := 0; j < 1e3; j++ { binary.BigEndian.PutUint32(p[20:], uint32(j)) // unique port combination s.UpdateTxVirtual(p) @@ -193,7 +193,7 @@ func Benchmark(b *testing.B) { b.ResetTimer() b.ReportAllocs() for range b.N { - s := NewStatistics(0, 0, nil) + s := newStatistics(0, 0, nil) var group sync.WaitGroup for j := 0; j < runtime.NumCPU(); j++ { group.Add(1) @@ -215,7 +215,7 @@ func Benchmark(b *testing.B) { b.ResetTimer() b.ReportAllocs() for range b.N { - s := NewStatistics(0, 0, nil) + s := newStatistics(0, 0, nil) var group sync.WaitGroup for j := 0; j < runtime.NumCPU(); j++ { group.Add(1) diff --git a/wgengine/netstack/gro/gro.go b/wgengine/netstack/gro/gro.go index 654d170566f0d..c8e5e56e1acb5 100644 --- a/wgengine/netstack/gro/gro.go +++ b/wgengine/netstack/gro/gro.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_netstack + // Package gro implements GRO for the receive (write) path into gVisor. package gro diff --git a/wgengine/netstack/gro/gro_default.go b/wgengine/netstack/gro/gro_default.go index f92ee15ecac15..c70e19f7c5861 100644 --- a/wgengine/netstack/gro/gro_default.go +++ b/wgengine/netstack/gro/gro_default.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !ios +//go:build !ios && !ts_omit_gro package gro diff --git a/wgengine/netstack/gro/gro_ios.go b/wgengine/netstack/gro/gro_disabled.go similarity index 59% rename from wgengine/netstack/gro/gro_ios.go rename to wgengine/netstack/gro/gro_disabled.go index 627b42d7e5cfd..d7ffbd9139d99 100644 --- a/wgengine/netstack/gro/gro_ios.go +++ b/wgengine/netstack/gro/gro_disabled.go @@ -1,22 +1,27 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build ios +//go:build ios || ts_omit_gro package gro import ( - "gvisor.dev/gvisor/pkg/tcpip/stack" + "runtime" + "tailscale.com/net/packet" ) type GRO struct{} func NewGRO() *GRO { - panic("unsupported on iOS") + if runtime.GOOS == "ios" { + panic("unsupported on iOS") + } + panic("GRO disabled in build") + } -func (g *GRO) SetDispatcher(_ stack.NetworkDispatcher) {} +func (g *GRO) SetDispatcher(any) {} func (g *GRO) Enqueue(_ *packet.Parsed) {} diff --git a/wgengine/netstack/gro/netstack_disabled.go b/wgengine/netstack/gro/netstack_disabled.go new file mode 100644 index 0000000000000..a0f56fa4499cf --- /dev/null +++ b/wgengine/netstack/gro/netstack_disabled.go @@ -0,0 +1,10 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_netstack + +package gro + +func RXChecksumOffload(any) any { + panic("unreachable") +} diff --git a/wgengine/netstack/link_endpoint.go b/wgengine/netstack/link_endpoint.go index 39da64b5503cc..260b3196ab2fc 100644 --- a/wgengine/netstack/link_endpoint.go +++ b/wgengine/netstack/link_endpoint.go @@ -10,6 +10,7 @@ import ( "gvisor.dev/gvisor/pkg/tcpip" "gvisor.dev/gvisor/pkg/tcpip/header" "gvisor.dev/gvisor/pkg/tcpip/stack" + "tailscale.com/feature/buildfeatures" "tailscale.com/net/packet" "tailscale.com/types/ipproto" "tailscale.com/wgengine/netstack/gro" @@ -133,7 +134,7 @@ func newLinkEndpoint(size int, mtu uint32, linkAddr tcpip.LinkAddress, supported // If gro allocates a *gro.GRO it will have l's stack.NetworkDispatcher set via // SetDispatcher(). func (l *linkEndpoint) gro(p *packet.Parsed, g *gro.GRO) *gro.GRO { - if l.supportedGRO == groNotSupported || p.IPProto != ipproto.TCP { + if !buildfeatures.HasGRO || l.supportedGRO == groNotSupported || p.IPProto != ipproto.TCP { // IPv6 may have extension headers preceding a TCP header, but we trade // for a fast path and assume p cannot be coalesced in such a case. l.injectInbound(p) @@ -186,7 +187,7 @@ func (l *linkEndpoint) injectInbound(p *packet.Parsed) { l.mu.RLock() d := l.dispatcher l.mu.RUnlock() - if d == nil { + if d == nil || !buildfeatures.HasNetstack { return } pkt := gro.RXChecksumOffload(p) diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index d97c669463d78..c2b5d8a3266c7 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -33,6 +33,7 @@ import ( "gvisor.dev/gvisor/pkg/tcpip/transport/udp" "gvisor.dev/gvisor/pkg/waiter" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn/ipnlocal" "tailscale.com/metrics" "tailscale.com/net/dns" @@ -343,7 +344,7 @@ func Create(logf logger.Logf, tundev *tstun.Wrapper, e wgengine.Engine, mc *magi } supportedGSOKind := stack.GSONotSupported supportedGROKind := groNotSupported - if runtime.GOOS == "linux" { + if runtime.GOOS == "linux" && buildfeatures.HasGRO { // TODO(jwhited): add Windows support https://github.com/tailscale/corp/issues/21874 supportedGROKind = tcpGROSupported supportedGSOKind = stack.HostGSOSupported @@ -577,9 +578,16 @@ func (ns *Impl) decrementInFlightTCPForward(tei stack.TransportEndpointID, remot } } +// LocalBackend is a fake name for *ipnlocal.LocalBackend to avoid an import cycle. +type LocalBackend = any + // Start sets up all the handlers so netstack can start working. Implements // wgengine.FakeImpl. -func (ns *Impl) Start(lb *ipnlocal.LocalBackend) error { +func (ns *Impl) Start(b LocalBackend) error { + if b == nil { + panic("nil LocalBackend interface") + } + lb := b.(*ipnlocal.LocalBackend) if lb == nil { panic("nil LocalBackend") } @@ -643,13 +651,15 @@ func (ns *Impl) UpdateNetstackIPs(nm *netmap.NetworkMap) { var selfNode tailcfg.NodeView var serviceAddrSet set.Set[netip.Addr] if nm != nil { - vipServiceIPMap := nm.GetVIPServiceIPMap() - serviceAddrSet = make(set.Set[netip.Addr], len(vipServiceIPMap)*2) - for _, addrs := range vipServiceIPMap { - serviceAddrSet.AddSlice(addrs) - } ns.atomicIsLocalIPFunc.Store(ipset.NewContainsIPFunc(nm.GetAddresses())) - ns.atomicIsVIPServiceIPFunc.Store(serviceAddrSet.Contains) + if buildfeatures.HasServe { + vipServiceIPMap := nm.GetVIPServiceIPMap() + serviceAddrSet = make(set.Set[netip.Addr], len(vipServiceIPMap)*2) + for _, addrs := range vipServiceIPMap { + serviceAddrSet.AddSlice(addrs) + } + ns.atomicIsVIPServiceIPFunc.Store(serviceAddrSet.Contains) + } selfNode = nm.SelfNode } else { ns.atomicIsLocalIPFunc.Store(ipset.FalseContainsIPFunc()) @@ -1032,6 +1042,9 @@ func (ns *Impl) isLocalIP(ip netip.Addr) bool { // isVIPServiceIP reports whether ip is an IP address that's // assigned to a VIP service. func (ns *Impl) isVIPServiceIP(ip netip.Addr) bool { + if !buildfeatures.HasServe { + return false + } return ns.atomicIsVIPServiceIPFunc.Load()(ip) } @@ -1074,7 +1087,7 @@ func (ns *Impl) shouldProcessInbound(p *packet.Parsed, t *tstun.Wrapper) bool { return true } } - if isService { + if buildfeatures.HasServe && isService { if p.IsEchoRequest() { return true } @@ -1892,7 +1905,6 @@ func (ns *Impl) ExpVar() expvar.Var { {"option_unknown_received", ipStats.OptionUnknownReceived}, } for _, metric := range ipMetrics { - metric := metric m.Set("counter_ip_"+metric.name, expvar.Func(func() any { return readStatCounter(metric.field) })) @@ -1919,7 +1931,6 @@ func (ns *Impl) ExpVar() expvar.Var { {"errors", fwdStats.Errors}, } for _, metric := range fwdMetrics { - metric := metric m.Set("counter_ip_forward_"+metric.name, expvar.Func(func() any { return readStatCounter(metric.field) })) @@ -1963,7 +1974,6 @@ func (ns *Impl) ExpVar() expvar.Var { {"forward_max_in_flight_drop", tcpStats.ForwardMaxInFlightDrop}, } for _, metric := range tcpMetrics { - metric := metric m.Set("counter_tcp_"+metric.name, expvar.Func(func() any { return readStatCounter(metric.field) })) @@ -1990,7 +2000,6 @@ func (ns *Impl) ExpVar() expvar.Var { {"checksum_errors", udpStats.ChecksumErrors}, } for _, metric := range udpMetrics { - metric := metric m.Set("counter_udp_"+metric.name, expvar.Func(func() any { return readStatCounter(metric.field) })) diff --git a/wgengine/netstack/netstack_test.go b/wgengine/netstack/netstack_test.go index 584b3babc6004..93022811ce409 100644 --- a/wgengine/netstack/netstack_test.go +++ b/wgengine/netstack/netstack_test.go @@ -50,7 +50,7 @@ func TestInjectInboundLeak(t *testing.T) { Tun: tunDev, Dialer: dialer, SetSubsystem: sys.Set, - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), EventBus: sys.Bus.Get(), }) @@ -110,7 +110,7 @@ func makeNetstack(tb testing.TB, config func(*Impl)) *Impl { Tun: tunDev, Dialer: dialer, SetSubsystem: sys.Set, - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), EventBus: sys.Bus.Get(), }) diff --git a/wgengine/netstack/netstack_userping.go b/wgengine/netstack/netstack_userping.go index ee635bd877dca..b35a6eca9e11b 100644 --- a/wgengine/netstack/netstack_userping.go +++ b/wgengine/netstack/netstack_userping.go @@ -13,6 +13,7 @@ import ( "runtime" "time" + "tailscale.com/feature/buildfeatures" "tailscale.com/version/distro" ) @@ -20,7 +21,7 @@ import ( // CAP_NET_RAW from tailscaled's binary. var setAmbientCapsRaw func(*exec.Cmd) -var isSynology = runtime.GOOS == "linux" && distro.Get() == distro.Synology +var isSynology = runtime.GOOS == "linux" && buildfeatures.HasSynology && distro.Get() == distro.Synology // sendOutboundUserPing sends a non-privileged ICMP (or ICMPv6) ping to dstIP with the given timeout. func (ns *Impl) sendOutboundUserPing(dstIP netip.Addr, timeout time.Duration) error { @@ -61,7 +62,7 @@ func (ns *Impl) sendOutboundUserPing(dstIP netip.Addr, timeout time.Duration) er ping = "/bin/ping" } cmd := exec.Command(ping, "-c", "1", "-W", "3", dstIP.String()) - if isSynology && os.Getuid() != 0 { + if buildfeatures.HasSynology && isSynology && os.Getuid() != 0 { // On DSM7 we run as non-root and need to pass // CAP_NET_RAW if our binary has it. setAmbientCapsRaw(cmd) diff --git a/wgengine/pendopen.go b/wgengine/pendopen.go index 28d1f4f9d59e4..7eaf43e52a816 100644 --- a/wgengine/pendopen.go +++ b/wgengine/pendopen.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ts_omit_debug + package wgengine import ( @@ -20,6 +22,8 @@ import ( "tailscale.com/wgengine/filter" ) +type flowtrackTuple = flowtrack.Tuple + const tcpTimeoutBeforeDebug = 5 * time.Second type pendingOpenFlow struct { @@ -56,6 +60,10 @@ func (e *userspaceEngine) noteFlowProblemFromPeer(f flowtrack.Tuple, problem pac of.problem = problem } +func tsRejectFlow(rh packet.TailscaleRejectedHeader) flowtrack.Tuple { + return flowtrack.MakeTuple(rh.Proto, rh.Src, rh.Dst) +} + func (e *userspaceEngine) trackOpenPreFilterIn(pp *packet.Parsed, t *tstun.Wrapper) (res filter.Response) { res = filter.Accept // always @@ -66,8 +74,8 @@ func (e *userspaceEngine) trackOpenPreFilterIn(pp *packet.Parsed, t *tstun.Wrapp return } if rh.MaybeBroken { - e.noteFlowProblemFromPeer(rh.Flow(), rh.Reason) - } else if f := rh.Flow(); e.removeFlow(f) { + e.noteFlowProblemFromPeer(tsRejectFlow(rh), rh.Reason) + } else if f := tsRejectFlow(rh); e.removeFlow(f) { e.logf("open-conn-track: flow %v %v > %v rejected due to %v", rh.Proto, rh.Src, rh.Dst, rh.Reason) } return diff --git a/wgengine/pendopen_omit.go b/wgengine/pendopen_omit.go new file mode 100644 index 0000000000000..013425d357f26 --- /dev/null +++ b/wgengine/pendopen_omit.go @@ -0,0 +1,24 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ts_omit_debug + +package wgengine + +import ( + "tailscale.com/net/packet" + "tailscale.com/net/tstun" + "tailscale.com/wgengine/filter" +) + +type flowtrackTuple = struct{} + +type pendingOpenFlow struct{} + +func (*userspaceEngine) trackOpenPreFilterIn(pp *packet.Parsed, t *tstun.Wrapper) (res filter.Response) { + panic("unreachable") +} + +func (*userspaceEngine) trackOpenPostFilterOut(pp *packet.Parsed, t *tstun.Wrapper) (res filter.Response) { + panic("unreachable") +} diff --git a/wgengine/router/callback.go b/wgengine/router/callback.go index 1d90912778226..c1838539ba2a3 100644 --- a/wgengine/router/callback.go +++ b/wgengine/router/callback.go @@ -56,13 +56,6 @@ func (r *CallbackRouter) Set(rcfg *Config) error { return r.SetBoth(r.rcfg, r.dcfg) } -// UpdateMagicsockPort implements the Router interface. This implementation -// does nothing and returns nil because this router does not currently need -// to know what the magicsock UDP port is. -func (r *CallbackRouter) UpdateMagicsockPort(_ uint16, _ string) error { - return nil -} - // SetDNS implements dns.OSConfigurator. func (r *CallbackRouter) SetDNS(dcfg dns.OSConfig) error { r.mu.Lock() diff --git a/wgengine/router/consolidating_router_test.go b/wgengine/router/consolidating_router_test.go index 871682d1346bc..ba2e4d07a746a 100644 --- a/wgengine/router/consolidating_router_test.go +++ b/wgengine/router/consolidating_router_test.go @@ -4,7 +4,6 @@ package router import ( - "log" "net/netip" "testing" @@ -56,7 +55,7 @@ func TestConsolidateRoutes(t *testing.T) { }, } - cr := &consolidatingRouter{logf: log.Printf} + cr := &consolidatingRouter{logf: t.Logf} for _, test := range tests { t.Run(test.name, func(t *testing.T) { got := cr.consolidateRoutes(test.cfg) diff --git a/wgengine/router/ifconfig_windows.go b/wgengine/router/osrouter/ifconfig_windows.go similarity index 99% rename from wgengine/router/ifconfig_windows.go rename to wgengine/router/osrouter/ifconfig_windows.go index 40e9dc6e0cdfd..cb87ad5f24114 100644 --- a/wgengine/router/ifconfig_windows.go +++ b/wgengine/router/osrouter/ifconfig_windows.go @@ -3,7 +3,7 @@ * Copyright (C) 2019 WireGuard LLC. All Rights Reserved. */ -package router +package osrouter import ( "errors" @@ -18,7 +18,7 @@ import ( "tailscale.com/net/netmon" "tailscale.com/net/tsaddr" "tailscale.com/net/tstun" - "tailscale.com/util/multierr" + "tailscale.com/wgengine/router" "tailscale.com/wgengine/winnet" ole "github.com/go-ole/go-ole" @@ -246,7 +246,7 @@ var networkCategoryWarnable = health.Register(&health.Warnable{ MapDebugFlag: "warn-network-category-unhealthy", }) -func configureInterface(cfg *Config, tun *tun.NativeTun, ht *health.Tracker) (retErr error) { +func configureInterface(cfg *router.Config, tun *tun.NativeTun, ht *health.Tracker) (retErr error) { var mtu = tstun.DefaultTUNMTU() luid := winipcfg.LUID(tun.LUID()) iface, err := interfaceFromLUID(luid, @@ -830,5 +830,5 @@ func syncRoutes(ifc *winipcfg.IPAdapterAddresses, want []*routeData, dontDelete } } - return multierr.New(errs...) + return errors.Join(errs...) } diff --git a/wgengine/router/ifconfig_windows_test.go b/wgengine/router/osrouter/ifconfig_windows_test.go similarity index 99% rename from wgengine/router/ifconfig_windows_test.go rename to wgengine/router/osrouter/ifconfig_windows_test.go index 11b98d1d77d98..b858ef4f60d19 100644 --- a/wgengine/router/ifconfig_windows_test.go +++ b/wgengine/router/osrouter/ifconfig_windows_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "fmt" diff --git a/wgengine/router/osrouter/osrouter.go b/wgengine/router/osrouter/osrouter.go new file mode 100644 index 0000000000000..281454b069984 --- /dev/null +++ b/wgengine/router/osrouter/osrouter.go @@ -0,0 +1,15 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package osrouter contains OS-specific router implementations. +// This package has no API; it exists purely to import +// for the side effect of it registering itself with the wgengine/router +// package. +package osrouter + +import "tailscale.com/wgengine/router" + +// shutdownConfig is a routing configuration that removes all router +// state from the OS. It's the config used when callers pass in a nil +// Config. +var shutdownConfig router.Config diff --git a/wgengine/router/osrouter/osrouter_test.go b/wgengine/router/osrouter/osrouter_test.go new file mode 100644 index 0000000000000..d0cb3db6968c1 --- /dev/null +++ b/wgengine/router/osrouter/osrouter_test.go @@ -0,0 +1,15 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package osrouter + +import "net/netip" + +//lint:ignore U1000 used in Windows/Linux tests only +func mustCIDRs(ss ...string) []netip.Prefix { + var ret []netip.Prefix + for _, s := range ss { + ret = append(ret, netip.MustParsePrefix(s)) + } + return ret +} diff --git a/wgengine/router/router_freebsd.go b/wgengine/router/osrouter/router_freebsd.go similarity index 54% rename from wgengine/router/router_freebsd.go rename to wgengine/router/osrouter/router_freebsd.go index ce4753d7dc611..a142e7a84e14a 100644 --- a/wgengine/router/router_freebsd.go +++ b/wgengine/router/osrouter/router_freebsd.go @@ -1,23 +1,18 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( - "github.com/tailscale/wireguard-go/tun" - "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" - "tailscale.com/util/eventbus" + "tailscale.com/wgengine/router" ) -// For now this router only supports the userspace WireGuard implementations. -// -// Work is currently underway for an in-kernel FreeBSD implementation of wireguard -// https://svnweb.freebsd.org/base?view=revision&revision=357986 - -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { - return newUserspaceBSDRouter(logf, tundev, netMon, health) +func init() { + router.HookCleanUp.Set(func(logf logger.Logf, netMon *netmon.Monitor, ifName string) { + cleanUp(logf, ifName) + }) } func cleanUp(logf logger.Logf, interfaceName string) { diff --git a/wgengine/router/router_linux.go b/wgengine/router/osrouter/router_linux.go similarity index 93% rename from wgengine/router/router_linux.go rename to wgengine/router/osrouter/router_linux.go index 2382e87cd5185..58bd0513ab768 100644 --- a/wgengine/router/router_linux.go +++ b/wgengine/router/osrouter/router_linux.go @@ -3,7 +3,7 @@ //go:build !android -package router +package osrouter import ( "errors" @@ -14,6 +14,7 @@ import ( "os/exec" "strconv" "strings" + "sync" "sync/atomic" "syscall" "time" @@ -26,15 +27,25 @@ import ( "tailscale.com/envknob" "tailscale.com/health" "tailscale.com/net/netmon" + "tailscale.com/tsconst" "tailscale.com/types/logger" "tailscale.com/types/opt" "tailscale.com/types/preftype" "tailscale.com/util/eventbus" "tailscale.com/util/linuxfw" - "tailscale.com/util/multierr" "tailscale.com/version/distro" + "tailscale.com/wgengine/router" ) +func init() { + router.HookNewUserspaceRouter.Set(func(opts router.NewOpts) (router.Router, error) { + return newUserspaceRouter(opts.Logf, opts.Tun, opts.NetMon, opts.Health, opts.Bus) + }) + router.HookCleanUp.Set(func(logf logger.Logf, netMon *netmon.Monitor, ifName string) { + cleanUp(logf, ifName) + }) +} + var getDistroFunc = distro.Get const ( @@ -44,22 +55,14 @@ const ( ) type linuxRouter struct { - closed atomic.Bool - logf func(fmt string, args ...any) - tunname string - netMon *netmon.Monitor - health *health.Tracker - eventClient *eventbus.Client - ruleDeletedSub *eventbus.Subscriber[netmon.RuleDeleted] - rulesAddedPub *eventbus.Publisher[AddIPRules] - unregNetMon func() - addrs map[netip.Prefix]bool - routes map[netip.Prefix]bool - localRoutes map[netip.Prefix]bool - snatSubnetRoutes bool - statefulFiltering bool - netfilterMode preftype.NetfilterMode - netfilterKind string + closed atomic.Bool + logf func(fmt string, args ...any) + tunname string + netMon *netmon.Monitor + health *health.Tracker + eventClient *eventbus.Client + rulesAddedPub *eventbus.Publisher[AddIPRules] + unregNetMon func() // ruleRestorePending is whether a timer has been started to // restore deleted ip rules. @@ -77,11 +80,19 @@ type linuxRouter struct { cmd commandRunner nfr linuxfw.NetfilterRunner - magicsockPortV4 uint16 - magicsockPortV6 uint16 + mu sync.Mutex + addrs map[netip.Prefix]bool + routes map[netip.Prefix]bool + localRoutes map[netip.Prefix]bool + snatSubnetRoutes bool + statefulFiltering bool + netfilterMode preftype.NetfilterMode + netfilterKind string + magicsockPortV4 uint16 + magicsockPortV6 uint16 } -func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (router.Router, error) { tunname, err := tunDev.Name() if err != nil { return nil, err @@ -94,13 +105,12 @@ func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Moni return newUserspaceRouterAdvanced(logf, tunname, netMon, cmd, health, bus) } -func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon.Monitor, cmd commandRunner, health *health.Tracker, bus *eventbus.Bus) (Router, error) { +func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon.Monitor, cmd commandRunner, health *health.Tracker, bus *eventbus.Bus) (router.Router, error) { r := &linuxRouter{ logf: logf, tunname: tunname, netfilterMode: netfilterOff, netMon: netMon, - eventClient: bus.Client("router-linux"), health: health, cmd: cmd, @@ -108,9 +118,18 @@ func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon ipRuleFixLimiter: rate.NewLimiter(rate.Every(5*time.Second), 10), ipPolicyPrefBase: 5200, } - r.ruleDeletedSub = eventbus.Subscribe[netmon.RuleDeleted](r.eventClient) - r.rulesAddedPub = eventbus.Publish[AddIPRules](r.eventClient) - go r.consumeEventbusTopics() + ec := bus.Client("router-linux") + r.rulesAddedPub = eventbus.Publish[AddIPRules](ec) + eventbus.SubscribeFunc(ec, func(rs netmon.RuleDeleted) { + r.onIPRuleDeleted(rs.Table, rs.Priority) + }) + eventbus.SubscribeFunc(ec, func(pu router.PortUpdate) { + r.logf("portUpdate(port=%v, network=%s)", pu.UDPPort, pu.EndpointNetwork) + if err := r.updateMagicsockPort(pu.UDPPort, pu.EndpointNetwork); err != nil { + r.logf("updateMagicsockPort(port=%v, network=%s) failed: %v", pu.UDPPort, pu.EndpointNetwork, err) + } + }) + r.eventClient = ec if r.useIPCommand() { r.ipRuleAvailable = (cmd.run("ip", "rule") == nil) @@ -154,24 +173,6 @@ func newUserspaceRouterAdvanced(logf logger.Logf, tunname string, netMon *netmon return r, nil } -// consumeEventbusTopics consumes events from all [Conn]-relevant -// [eventbus.Subscriber]'s and passes them to their related handler. Events are -// always handled in the order they are received, i.e. the next event is not -// read until the previous event's handler has returned. It returns when the -// [portmapper.Mapping] subscriber is closed, which is interpreted to be the -// same as the [eventbus.Client] closing ([eventbus.Subscribers] are either -// all open or all closed). -func (r *linuxRouter) consumeEventbusTopics() { - for { - select { - case <-r.ruleDeletedSub.Done(): - return - case rulesDeleted := <-r.ruleDeletedSub.Events(): - r.onIPRuleDeleted(rulesDeleted.Table, rulesDeleted.Priority) - } - } -} - // ipCmdSupportsFwmask returns true if the system 'ip' binary supports using a // fwmark stanza with a mask specified. To our knowledge, everything except busybox // pre-1.33 supports this. @@ -346,7 +347,9 @@ func (r *linuxRouter) onIPRuleDeleted(table uint8, priority uint32) { } func (r *linuxRouter) Up() error { - if err := r.setNetfilterMode(netfilterOff); err != nil { + r.mu.Lock() + defer r.mu.Unlock() + if err := r.setNetfilterModeLocked(netfilterOff); err != nil { return fmt.Errorf("setting netfilter mode: %w", err) } if err := r.addIPRules(); err != nil { @@ -360,6 +363,8 @@ func (r *linuxRouter) Up() error { } func (r *linuxRouter) Close() error { + r.mu.Lock() + defer r.mu.Unlock() r.closed.Store(true) if r.unregNetMon != nil { r.unregNetMon() @@ -371,7 +376,7 @@ func (r *linuxRouter) Close() error { if err := r.delIPRules(); err != nil { return err } - if err := r.setNetfilterMode(netfilterOff); err != nil { + if err := r.setNetfilterModeLocked(netfilterOff); err != nil { return err } if err := r.delRoutes(); err != nil { @@ -385,10 +390,10 @@ func (r *linuxRouter) Close() error { return nil } -// setupNetfilter initializes the NetfilterRunner in r.nfr. It expects r.nfr +// setupNetfilterLocked initializes the NetfilterRunner in r.nfr. It expects r.nfr // to be nil, or the current netfilter to be set to netfilterOff. // kind should be either a linuxfw.FirewallMode, or the empty string for auto. -func (r *linuxRouter) setupNetfilter(kind string) error { +func (r *linuxRouter) setupNetfilterLocked(kind string) error { r.netfilterKind = kind var err error @@ -401,25 +406,27 @@ func (r *linuxRouter) setupNetfilter(kind string) error { } // Set implements the Router interface. -func (r *linuxRouter) Set(cfg *Config) error { +func (r *linuxRouter) Set(cfg *router.Config) error { + r.mu.Lock() + defer r.mu.Unlock() var errs []error if cfg == nil { cfg = &shutdownConfig } if cfg.NetfilterKind != r.netfilterKind { - if err := r.setNetfilterMode(netfilterOff); err != nil { + if err := r.setNetfilterModeLocked(netfilterOff); err != nil { err = fmt.Errorf("could not disable existing netfilter: %w", err) errs = append(errs, err) } else { r.nfr = nil - if err := r.setupNetfilter(cfg.NetfilterKind); err != nil { + if err := r.setupNetfilterLocked(cfg.NetfilterKind); err != nil { errs = append(errs, err) } } } - if err := r.setNetfilterMode(cfg.NetfilterMode); err != nil { + if err := r.setNetfilterModeLocked(cfg.NetfilterMode); err != nil { errs = append(errs, err) } @@ -461,11 +468,11 @@ func (r *linuxRouter) Set(cfg *Config) error { case cfg.StatefulFiltering == r.statefulFiltering: // state already correct, nothing to do. case cfg.StatefulFiltering: - if err := r.addStatefulRule(); err != nil { + if err := r.addStatefulRuleLocked(); err != nil { errs = append(errs, err) } default: - if err := r.delStatefulRule(); err != nil { + if err := r.delStatefulRuleLocked(); err != nil { errs = append(errs, err) } } @@ -478,7 +485,7 @@ func (r *linuxRouter) Set(cfg *Config) error { r.enableIPForwarding() } - return multierr.New(errs...) + return errors.Join(errs...) } var dockerStatefulFilteringWarnable = health.Register(&health.Warnable{ @@ -488,7 +495,7 @@ var dockerStatefulFilteringWarnable = health.Register(&health.Warnable{ Text: health.StaticMessage("Stateful filtering is enabled and Docker was detected; this may prevent Docker containers on this host from resolving DNS and connecting to Tailscale nodes. See https://tailscale.com/s/stateful-docker"), }) -func (r *linuxRouter) updateStatefulFilteringWithDockerWarning(cfg *Config) { +func (r *linuxRouter) updateStatefulFilteringWithDockerWarning(cfg *router.Config) { // If stateful filtering is disabled, clear the warning. if !r.statefulFiltering { r.health.SetHealthy(dockerStatefulFilteringWarnable) @@ -529,10 +536,12 @@ func (r *linuxRouter) updateStatefulFilteringWithDockerWarning(cfg *Config) { r.health.SetHealthy(dockerStatefulFilteringWarnable) } -// UpdateMagicsockPort implements the Router interface. -func (r *linuxRouter) UpdateMagicsockPort(port uint16, network string) error { +// updateMagicsockPort implements the Router interface. +func (r *linuxRouter) updateMagicsockPort(port uint16, network string) error { + r.mu.Lock() + defer r.mu.Unlock() if r.nfr == nil { - if err := r.setupNetfilter(r.netfilterKind); err != nil { + if err := r.setupNetfilterLocked(r.netfilterKind); err != nil { return fmt.Errorf("could not setup netfilter: %w", err) } } @@ -581,19 +590,17 @@ func (r *linuxRouter) UpdateMagicsockPort(port uint16, network string) error { return nil } -// setNetfilterMode switches the router to the given netfilter +// setNetfilterModeLocked switches the router to the given netfilter // mode. Netfilter state is created or deleted appropriately to // reflect the new mode, and r.snatSubnetRoutes is updated to reflect // the current state of subnet SNATing. -func (r *linuxRouter) setNetfilterMode(mode preftype.NetfilterMode) error { +func (r *linuxRouter) setNetfilterModeLocked(mode preftype.NetfilterMode) error { if !platformCanNetfilter() { mode = netfilterOff } if r.nfr == nil { - var err error - r.nfr, err = linuxfw.New(r.logf, r.netfilterKind) - if err != nil { + if err := r.setupNetfilterLocked(r.netfilterKind); err != nil { return err } } @@ -1239,14 +1246,14 @@ var baseIPRules = []netlink.Rule{ // main routing table. { Priority: 10, - Mark: linuxfw.TailscaleBypassMarkNum, + Mark: tsconst.LinuxBypassMarkNum, Table: mainRouteTable.Num, }, // ...and then we try the 'default' table, for correctness, // even though it's been empty on every Linux system I've ever seen. { Priority: 30, - Mark: linuxfw.TailscaleBypassMarkNum, + Mark: tsconst.LinuxBypassMarkNum, Table: defaultRouteTable.Num, }, // If neither of those matched (no default route on this system?) @@ -1254,7 +1261,7 @@ var baseIPRules = []netlink.Rule{ // to the tailscale routes, because that would create routing loops. { Priority: 50, - Mark: linuxfw.TailscaleBypassMarkNum, + Mark: tsconst.LinuxBypassMarkNum, Type: unix.RTN_UNREACHABLE, }, // If we get to this point, capture all packets and send them @@ -1284,7 +1291,7 @@ var ubntIPRules = []netlink.Rule{ { Priority: 70, Invert: true, - Mark: linuxfw.TailscaleBypassMarkNum, + Mark: tsconst.LinuxBypassMarkNum, Table: tailscaleRouteTable.Num, }, } @@ -1312,7 +1319,7 @@ func (r *linuxRouter) justAddIPRules() error { // Note: r is a value type here; safe to mutate it. ru.Family = family.netlinkInt() if ru.Mark != 0 { - ru.Mask = linuxfw.TailscaleFwmarkMaskNum + ru.Mask = tsconst.LinuxFwmarkMaskNum } ru.Goto = -1 ru.SuppressIfgroup = -1 @@ -1345,7 +1352,7 @@ func (r *linuxRouter) addIPRulesWithIPCommand() error { } if rule.Mark != 0 { if r.fwmaskWorks() { - args = append(args, "fwmark", fmt.Sprintf("0x%x/%s", rule.Mark, linuxfw.TailscaleFwmarkMask)) + args = append(args, "fwmark", fmt.Sprintf("0x%x/%s", rule.Mark, tsconst.LinuxFwmarkMask)) } else { args = append(args, "fwmark", fmt.Sprintf("0x%x", rule.Mark)) } @@ -1472,9 +1479,9 @@ func (r *linuxRouter) delSNATRule() error { return nil } -// addStatefulRule adds a netfilter rule to perform stateful filtering from +// addStatefulRuleLocked adds a netfilter rule to perform stateful filtering from // subnets onto the tailnet. -func (r *linuxRouter) addStatefulRule() error { +func (r *linuxRouter) addStatefulRuleLocked() error { if r.netfilterMode == netfilterOff { return nil } @@ -1482,9 +1489,9 @@ func (r *linuxRouter) addStatefulRule() error { return r.nfr.AddStatefulRule(r.tunname) } -// delStatefulRule removes the netfilter rule to perform stateful filtering +// delStatefulRuleLocked removes the netfilter rule to perform stateful filtering // from subnets onto the tailnet. -func (r *linuxRouter) delStatefulRule() error { +func (r *linuxRouter) delStatefulRuleLocked() error { if r.netfilterMode == netfilterOff { return nil } diff --git a/wgengine/router/router_linux_test.go b/wgengine/router/osrouter/router_linux_test.go similarity index 98% rename from wgengine/router/router_linux_test.go rename to wgengine/router/osrouter/router_linux_test.go index b6a5a1ac04753..39210ddef14a2 100644 --- a/wgengine/router/router_linux_test.go +++ b/wgengine/router/osrouter/router_linux_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "errors" @@ -25,14 +25,18 @@ import ( "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/net/tsaddr" + "tailscale.com/tsconst" "tailscale.com/tstest" "tailscale.com/types/logger" "tailscale.com/util/eventbus" "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/linuxfw" "tailscale.com/version/distro" + "tailscale.com/wgengine/router" ) +type Config = router.Config + func TestRouterStates(t *testing.T) { basic := ` ip rule add -4 pref 5210 fwmark 0x80000/0xff0000 table main @@ -375,7 +379,7 @@ ip route add throw 192.168.0.0/24 table 52` + basic, defer mon.Close() fake := NewFakeOS(t) - ht := new(health.Tracker) + ht := health.NewTracker(bus) router, err := newUserspaceRouterAdvanced(t.Logf, "tailscale0", mon, fake, ht, bus) router.(*linuxRouter).nfr = fake.nfr if err != nil { @@ -572,8 +576,8 @@ func (n *fakeIPTablesRunner) addBase4(tunname string) error { newRules := []struct{ chain, rule string }{ {"filter/ts-input", fmt.Sprintf("! -i %s -s %s -j RETURN", tunname, tsaddr.ChromeOSVMRange().String())}, {"filter/ts-input", fmt.Sprintf("! -i %s -s %s -j DROP", tunname, tsaddr.CGNATRange().String())}, - {"filter/ts-forward", fmt.Sprintf("-i %s -j MARK --set-mark %s/%s", tunname, linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask)}, - {"filter/ts-forward", fmt.Sprintf("-m mark --mark %s/%s -j ACCEPT", linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask)}, + {"filter/ts-forward", fmt.Sprintf("-i %s -j MARK --set-mark %s/%s", tunname, tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask)}, + {"filter/ts-forward", fmt.Sprintf("-m mark --mark %s/%s -j ACCEPT", tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask)}, {"filter/ts-forward", fmt.Sprintf("-o %s -s %s -j DROP", tunname, tsaddr.CGNATRange().String())}, {"filter/ts-forward", fmt.Sprintf("-o %s -j ACCEPT", tunname)}, } @@ -588,8 +592,8 @@ func (n *fakeIPTablesRunner) addBase4(tunname string) error { func (n *fakeIPTablesRunner) addBase6(tunname string) error { curIPT := n.ipt6 newRules := []struct{ chain, rule string }{ - {"filter/ts-forward", fmt.Sprintf("-i %s -j MARK --set-mark %s/%s", tunname, linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask)}, - {"filter/ts-forward", fmt.Sprintf("-m mark --mark %s/%s -j ACCEPT", linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask)}, + {"filter/ts-forward", fmt.Sprintf("-i %s -j MARK --set-mark %s/%s", tunname, tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask)}, + {"filter/ts-forward", fmt.Sprintf("-m mark --mark %s/%s -j ACCEPT", tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask)}, {"filter/ts-forward", fmt.Sprintf("-o %s -j ACCEPT", tunname)}, } for _, rule := range newRules { @@ -673,7 +677,7 @@ func (n *fakeIPTablesRunner) DelBase() error { } func (n *fakeIPTablesRunner) AddSNATRule() error { - newRule := fmt.Sprintf("-m mark --mark %s/%s -j MASQUERADE", linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask) + newRule := fmt.Sprintf("-m mark --mark %s/%s -j MASQUERADE", tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask) for _, ipt := range []map[string][]string{n.ipt4, n.ipt6} { if err := appendRule(n, ipt, "nat/ts-postrouting", newRule); err != nil { return err @@ -683,7 +687,7 @@ func (n *fakeIPTablesRunner) AddSNATRule() error { } func (n *fakeIPTablesRunner) DelSNATRule() error { - delRule := fmt.Sprintf("-m mark --mark %s/%s -j MASQUERADE", linuxfw.TailscaleSubnetRouteMark, linuxfw.TailscaleFwmarkMask) + delRule := fmt.Sprintf("-m mark --mark %s/%s -j MASQUERADE", tsconst.LinuxSubnetRouteMark, tsconst.LinuxFwmarkMask) for _, ipt := range []map[string][]string{n.ipt4, n.ipt6} { if err := deleteRule(n, ipt, "nat/ts-postrouting", delRule); err != nil { return err diff --git a/wgengine/router/router_openbsd.go b/wgengine/router/osrouter/router_openbsd.go similarity index 91% rename from wgengine/router/router_openbsd.go rename to wgengine/router/osrouter/router_openbsd.go index f91878b4c993d..55b485f0e7a9e 100644 --- a/wgengine/router/router_openbsd.go +++ b/wgengine/router/osrouter/router_openbsd.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "errors" @@ -17,10 +17,18 @@ import ( "tailscale.com/types/logger" "tailscale.com/util/eventbus" "tailscale.com/util/set" + "tailscale.com/wgengine/router" ) -// For now this router only supports the WireGuard userspace implementation. -// There is an experimental kernel version in the works for OpenBSD: +func init() { + router.HookNewUserspaceRouter.Set(func(opts router.NewOpts) (router.Router, error) { + return newUserspaceRouter(opts.Logf, opts.Tun, opts.NetMon, opts.Health, opts.Bus) + }) + router.HookCleanUp.Set(func(logf logger.Logf, netMon *netmon.Monitor, ifName string) { + cleanUp(logf, ifName) + }) +} + // https://git.zx2c4.com/wireguard-openbsd. type openbsdRouter struct { @@ -32,7 +40,7 @@ type openbsdRouter struct { routes set.Set[netip.Prefix] } -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (router.Router, error) { tunname, err := tundev.Name() if err != nil { return nil, err @@ -68,7 +76,7 @@ func inet(p netip.Prefix) string { return "inet" } -func (r *openbsdRouter) Set(cfg *Config) error { +func (r *openbsdRouter) Set(cfg *router.Config) error { if cfg == nil { cfg = &shutdownConfig } @@ -230,13 +238,6 @@ func (r *openbsdRouter) Set(cfg *Config) error { return errq } -// UpdateMagicsockPort implements the Router interface. This implementation -// does nothing and returns nil because this router does not currently need -// to know what the magicsock UDP port is. -func (r *openbsdRouter) UpdateMagicsockPort(_ uint16, _ string) error { - return nil -} - func (r *openbsdRouter) Close() error { cleanUp(r.logf, r.tunname) return nil diff --git a/wgengine/router/router_plan9.go b/wgengine/router/osrouter/router_plan9.go similarity index 86% rename from wgengine/router/router_plan9.go rename to wgengine/router/osrouter/router_plan9.go index fd6850ade3762..a5b461a6fff67 100644 --- a/wgengine/router/router_plan9.go +++ b/wgengine/router/osrouter/router_plan9.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "bufio" @@ -15,10 +15,19 @@ import ( "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" - "tailscale.com/util/eventbus" + "tailscale.com/wgengine/router" ) -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { +func init() { + router.HookCleanUp.Set(func(logf logger.Logf, netMon *netmon.Monitor, ifName string) { + cleanAllTailscaleRoutes(logf) + }) + router.HookNewUserspaceRouter.Set(func(opts router.NewOpts) (router.Router, error) { + return newUserspaceRouter(opts.Logf, opts.Tun, opts.NetMon) + }) +} + +func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor) (router.Router, error) { r := &plan9Router{ logf: logf, tundev: tundev, @@ -39,7 +48,7 @@ func (r *plan9Router) Up() error { return nil } -func (r *plan9Router) Set(cfg *Config) error { +func (r *plan9Router) Set(cfg *router.Config) error { if cfg == nil { cleanAllTailscaleRoutes(r.logf) return nil @@ -106,22 +115,11 @@ func (r *plan9Router) Set(cfg *Config) error { return nil } -// UpdateMagicsockPort implements the Router interface. This implementation -// does nothing and returns nil because this router does not currently need -// to know what the magicsock UDP port is. -func (r *plan9Router) UpdateMagicsockPort(_ uint16, _ string) error { - return nil -} - func (r *plan9Router) Close() error { // TODO(bradfitz): unbind return nil } -func cleanUp(logf logger.Logf, _ string) { - cleanAllTailscaleRoutes(logf) -} - func cleanAllTailscaleRoutes(logf logger.Logf) { routes, err := os.OpenFile("/net/iproute", os.O_RDWR, 0) if err != nil { diff --git a/wgengine/router/router_userspace_bsd.go b/wgengine/router/osrouter/router_userspace_bsd.go similarity index 92% rename from wgengine/router/router_userspace_bsd.go rename to wgengine/router/osrouter/router_userspace_bsd.go index 0b7e4f36aa6e5..70ef2b6bf3ca9 100644 --- a/wgengine/router/router_userspace_bsd.go +++ b/wgengine/router/osrouter/router_userspace_bsd.go @@ -3,7 +3,7 @@ //go:build darwin || freebsd -package router +package osrouter import ( "fmt" @@ -19,8 +19,15 @@ import ( "tailscale.com/net/tsaddr" "tailscale.com/types/logger" "tailscale.com/version" + "tailscale.com/wgengine/router" ) +func init() { + router.HookNewUserspaceRouter.Set(func(opts router.NewOpts) (router.Router, error) { + return newUserspaceBSDRouter(opts.Logf, opts.Tun, opts.NetMon, opts.Health) + }) +} + type userspaceBSDRouter struct { logf logger.Logf netMon *netmon.Monitor @@ -30,7 +37,7 @@ type userspaceBSDRouter struct { routes map[netip.Prefix]bool } -func newUserspaceBSDRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (Router, error) { +func newUserspaceBSDRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker) (router.Router, error) { tunname, err := tundev.Name() if err != nil { return nil, err @@ -99,7 +106,7 @@ func inet(p netip.Prefix) string { return "inet" } -func (r *userspaceBSDRouter) Set(cfg *Config) (reterr error) { +func (r *userspaceBSDRouter) Set(cfg *router.Config) (reterr error) { if cfg == nil { cfg = &shutdownConfig } @@ -199,13 +206,6 @@ func (r *userspaceBSDRouter) Set(cfg *Config) (reterr error) { return reterr } -// UpdateMagicsockPort implements the Router interface. This implementation -// does nothing and returns nil because this router does not currently need -// to know what the magicsock UDP port is. -func (r *userspaceBSDRouter) UpdateMagicsockPort(_ uint16, _ string) error { - return nil -} - func (r *userspaceBSDRouter) Close() error { return nil } diff --git a/wgengine/router/router_windows.go b/wgengine/router/osrouter/router_windows.go similarity index 95% rename from wgengine/router/router_windows.go rename to wgengine/router/osrouter/router_windows.go index 32d05110dca45..a1acbe3b67287 100644 --- a/wgengine/router/router_windows.go +++ b/wgengine/router/osrouter/router_windows.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "bufio" @@ -23,13 +23,20 @@ import ( "golang.org/x/sys/windows" "golang.zx2c4.com/wireguard/windows/tunnel/winipcfg" "tailscale.com/health" - "tailscale.com/logtail/backoff" "tailscale.com/net/dns" "tailscale.com/net/netmon" "tailscale.com/types/logger" + "tailscale.com/util/backoff" "tailscale.com/util/eventbus" + "tailscale.com/wgengine/router" ) +func init() { + router.HookNewUserspaceRouter.Set(func(opts router.NewOpts) (router.Router, error) { + return newUserspaceRouter(opts.Logf, opts.Tun, opts.NetMon, opts.Health, opts.Bus) + }) +} + type winRouter struct { logf func(fmt string, args ...any) netMon *netmon.Monitor // may be nil @@ -39,7 +46,7 @@ type winRouter struct { firewall *firewallTweaker } -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { +func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (router.Router, error) { nativeTun := tundev.(*tun.NativeTun) luid := winipcfg.LUID(nativeTun.LUID()) guid, err := luid.GUID() @@ -73,7 +80,7 @@ func (r *winRouter) Up() error { return nil } -func (r *winRouter) Set(cfg *Config) error { +func (r *winRouter) Set(cfg *router.Config) error { if cfg == nil { cfg = &shutdownConfig } @@ -107,13 +114,6 @@ func hasDefaultRoute(routes []netip.Prefix) bool { return false } -// UpdateMagicsockPort implements the Router interface. This implementation -// does nothing and returns nil because this router does not currently need -// to know what the magicsock UDP port is. -func (r *winRouter) UpdateMagicsockPort(_ uint16, _ string) error { - return nil -} - func (r *winRouter) Close() error { r.firewall.clear() @@ -124,10 +124,6 @@ func (r *winRouter) Close() error { return nil } -func cleanUp(logf logger.Logf, interfaceName string) { - // Nothing to do here. -} - // firewallTweaker changes the Windows firewall. Normally this wouldn't be so complicated, // but it can be REALLY SLOW to change the Windows firewall for reasons not understood. // Like 4 minutes slow. But usually it's tens of milliseconds. diff --git a/wgengine/router/router_windows_test.go b/wgengine/router/osrouter/router_windows_test.go similarity index 95% rename from wgengine/router/router_windows_test.go rename to wgengine/router/osrouter/router_windows_test.go index 9989ddbc735a6..119b6a77867f9 100644 --- a/wgengine/router/router_windows_test.go +++ b/wgengine/router/osrouter/router_windows_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -package router +package osrouter import ( "path/filepath" diff --git a/wgengine/router/runner.go b/wgengine/router/osrouter/runner.go similarity index 99% rename from wgengine/router/runner.go rename to wgengine/router/osrouter/runner.go index 8fa068e335e66..7afb7fdc2088f 100644 --- a/wgengine/router/runner.go +++ b/wgengine/router/osrouter/runner.go @@ -3,7 +3,7 @@ //go:build linux -package router +package osrouter import ( "errors" diff --git a/wgengine/router/router.go b/wgengine/router/router.go index 25d1c08a29f4d..04cc898876557 100644 --- a/wgengine/router/router.go +++ b/wgengine/router/router.go @@ -6,10 +6,16 @@ package router import ( + "errors" + "fmt" "net/netip" "reflect" + "runtime" + "slices" "github.com/tailscale/wireguard-go/tun" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/net/netmon" "tailscale.com/types/logger" @@ -29,18 +35,34 @@ type Router interface { // implementation should handle gracefully. Set(*Config) error - // UpdateMagicsockPort tells the OS network stack what port magicsock - // is currently listening on, so it can be threaded through firewalls - // and such. This is distinct from Set() since magicsock may rebind - // ports independently from the Config changing. - // - // network should be either "udp4" or "udp6". - UpdateMagicsockPort(port uint16, network string) error - // Close closes the router. Close() error } +// NewOpts are the options passed to the NewUserspaceRouter hook. +type NewOpts struct { + Logf logger.Logf // required + Tun tun.Device // required + NetMon *netmon.Monitor // optional + Health *health.Tracker // required (but TODO: support optional later) + Bus *eventbus.Bus // required +} + +// PortUpdate is an eventbus value, reporting the port and address family +// magicsock is currently listening on, so it can be threaded through firewalls +// and such. +type PortUpdate struct { + UDPPort uint16 + EndpointNetwork string // either "udp4" or "udp6". +} + +// HookNewUserspaceRouter is the registration point for router implementations +// to register a constructor for userspace routers. It's meant for implementations +// in wgengine/router/osrouter. +// +// If no implementation is registered, [New] will return an error. +var HookNewUserspaceRouter feature.Hook[func(NewOpts) (Router, error)] + // New returns a new Router for the current platform, using the // provided tun device. // @@ -50,14 +72,33 @@ func New(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus, ) (Router, error) { logf = logger.WithPrefix(logf, "router: ") - return newUserspaceRouter(logf, tundev, netMon, health, bus) + if f, ok := HookNewUserspaceRouter.GetOk(); ok { + return f(NewOpts{ + Logf: logf, + Tun: tundev, + NetMon: netMon, + Health: health, + Bus: bus, + }) + } + if !buildfeatures.HasOSRouter { + return nil, errors.New("router: tailscaled was built without OSRouter support") + } + return nil, fmt.Errorf("unsupported OS %q", runtime.GOOS) } +// HookCleanUp is the optional registration point for router implementations +// to register a cleanup function for [CleanUp] to use. It's meant for +// implementations in wgengine/router/osrouter. +var HookCleanUp feature.Hook[func(_ logger.Logf, _ *netmon.Monitor, ifName string)] + // CleanUp restores the system network configuration to its original state // in case the Tailscale daemon terminated without closing the router. // No other state needs to be instantiated before this runs. func CleanUp(logf logger.Logf, netMon *netmon.Monitor, interfaceName string) { - cleanUp(logf, interfaceName) + if f, ok := HookCleanUp.GetOk(); ok { + f(logf, netMon, interfaceName) + } } // Config is the subset of Tailscale configuration that is relevant to @@ -94,7 +135,7 @@ type Config struct { SNATSubnetRoutes bool // SNAT traffic to local subnets StatefulFiltering bool // Apply stateful filtering to inbound connections NetfilterMode preftype.NetfilterMode // how much to manage netfilter rules - NetfilterKind string // what kind of netfilter to use (nftables, iptables) + NetfilterKind string // what kind of netfilter to use ("nftables", "iptables", or "" to auto-detect) } func (a *Config) Equal(b *Config) bool { @@ -107,7 +148,14 @@ func (a *Config) Equal(b *Config) bool { return reflect.DeepEqual(a, b) } -// shutdownConfig is a routing configuration that removes all router -// state from the OS. It's the config used when callers pass in a nil -// Config. -var shutdownConfig = Config{} +func (c *Config) Clone() *Config { + if c == nil { + return nil + } + c2 := *c + c2.LocalAddrs = slices.Clone(c.LocalAddrs) + c2.Routes = slices.Clone(c.Routes) + c2.LocalRoutes = slices.Clone(c.LocalRoutes) + c2.SubnetRoutes = slices.Clone(c.SubnetRoutes) + return &c2 +} diff --git a/wgengine/router/router_android.go b/wgengine/router/router_android.go deleted file mode 100644 index de680606f19cf..0000000000000 --- a/wgengine/router/router_android.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build android - -package router - -import ( - "github.com/tailscale/wireguard-go/tun" - "tailscale.com/health" - "tailscale.com/net/netmon" - "tailscale.com/types/logger" - "tailscale.com/util/eventbus" -) - -func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker, _ *eventbus.Bus) (Router, error) { - // Note, this codepath is _not_ used when building the android app - // from github.com/tailscale/tailscale-android. The android app - // constructs its own wgengine with a custom router implementation - // that plugs into Android networking APIs. - // - // In practice, the only place this fake router gets used is when - // you build a tsnet app for android, in which case we don't want - // to touch the OS network stack and a no-op router is correct. - return NewFake(logf), nil -} - -func cleanUp(logf logger.Logf, interfaceName string) { - // Nothing to do here. -} diff --git a/wgengine/router/router_darwin.go b/wgengine/router/router_darwin.go deleted file mode 100644 index ebb2615a0ed1f..0000000000000 --- a/wgengine/router/router_darwin.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -package router - -import ( - "github.com/tailscale/wireguard-go/tun" - "tailscale.com/health" - "tailscale.com/net/netmon" - "tailscale.com/types/logger" - "tailscale.com/util/eventbus" -) - -func newUserspaceRouter(logf logger.Logf, tundev tun.Device, netMon *netmon.Monitor, health *health.Tracker, bus *eventbus.Bus) (Router, error) { - return newUserspaceBSDRouter(logf, tundev, netMon, health) -} - -func cleanUp(logger.Logf, string) { - // Nothing to do. -} diff --git a/wgengine/router/router_default.go b/wgengine/router/router_default.go deleted file mode 100644 index 190575973a4ee..0000000000000 --- a/wgengine/router/router_default.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !windows && !linux && !darwin && !openbsd && !freebsd && !plan9 - -package router - -import ( - "fmt" - "runtime" - - "github.com/tailscale/wireguard-go/tun" - "tailscale.com/health" - "tailscale.com/net/netmon" - "tailscale.com/types/logger" - "tailscale.com/util/eventbus" -) - -func newUserspaceRouter(logf logger.Logf, tunDev tun.Device, netMon *netmon.Monitor, health *health.Tracker, _ *eventbus.Bus) (Router, error) { - return nil, fmt.Errorf("unsupported OS %q", runtime.GOOS) -} - -func cleanUp(logf logger.Logf, interfaceName string) { - // Nothing to do here. -} diff --git a/wgengine/router/router_fake.go b/wgengine/router/router_fake.go index 549867ecaa342..db35fc9eebe15 100644 --- a/wgengine/router/router_fake.go +++ b/wgengine/router/router_fake.go @@ -27,11 +27,6 @@ func (r fakeRouter) Set(cfg *Config) error { return nil } -func (r fakeRouter) UpdateMagicsockPort(_ uint16, _ string) error { - r.logf("[v1] warning: fakeRouter.UpdateMagicsockPort: not implemented.") - return nil -} - func (r fakeRouter) Close() error { r.logf("[v1] warning: fakeRouter.Close: not implemented.") return nil diff --git a/wgengine/router/router_test.go b/wgengine/router/router_test.go index 8842173d7e4b4..fd17b8c5d5297 100644 --- a/wgengine/router/router_test.go +++ b/wgengine/router/router_test.go @@ -11,15 +11,6 @@ import ( "tailscale.com/types/preftype" ) -//lint:ignore U1000 used in Windows/Linux tests only -func mustCIDRs(ss ...string) []netip.Prefix { - var ret []netip.Prefix - for _, s := range ss { - ret = append(ret, netip.MustParsePrefix(s)) - } - return ret -} - func TestConfigEqual(t *testing.T) { testedFields := []string{ "LocalAddrs", "Routes", "LocalRoutes", "NewMTU", diff --git a/wgengine/userspace.go b/wgengine/userspace.go index 4a9f321430c12..8856a3eaf4d11 100644 --- a/wgengine/userspace.go +++ b/wgengine/userspace.go @@ -10,8 +10,10 @@ import ( "errors" "fmt" "io" + "maps" "math" "net/netip" + "reflect" "runtime" "slices" "strings" @@ -23,18 +25,18 @@ import ( "tailscale.com/control/controlknobs" "tailscale.com/drive" "tailscale.com/envknob" + "tailscale.com/feature" + "tailscale.com/feature/buildfeatures" "tailscale.com/health" "tailscale.com/ipn/ipnstate" "tailscale.com/net/dns" "tailscale.com/net/dns/resolver" - "tailscale.com/net/flowtrack" "tailscale.com/net/ipset" "tailscale.com/net/netmon" "tailscale.com/net/packet" "tailscale.com/net/sockstats" "tailscale.com/net/tsaddr" "tailscale.com/net/tsdial" - "tailscale.com/net/tshttpproxy" "tailscale.com/net/tstun" "tailscale.com/syncs" "tailscale.com/tailcfg" @@ -45,8 +47,8 @@ import ( "tailscale.com/types/logger" "tailscale.com/types/netmap" "tailscale.com/types/views" + "tailscale.com/util/checkchange" "tailscale.com/util/clientmetric" - "tailscale.com/util/deephash" "tailscale.com/util/eventbus" "tailscale.com/util/mak" "tailscale.com/util/set" @@ -92,27 +94,26 @@ const networkLoggerUploadTimeout = 5 * time.Second type userspaceEngine struct { // eventBus will eventually become required, but for now may be nil. - // TODO(creachadair): Enforce that this is non-nil at construction. - eventBus *eventbus.Bus - - logf logger.Logf - wgLogger *wglog.Logger // a wireguard-go logging wrapper - reqCh chan struct{} - waitCh chan struct{} // chan is closed when first Close call completes; contrast with closing bool - timeNow func() mono.Time - tundev *tstun.Wrapper - wgdev *device.Device - router router.Router - dialer *tsdial.Dialer - confListenPort uint16 // original conf.ListenPort - dns *dns.Manager - magicConn *magicsock.Conn - netMon *netmon.Monitor - health *health.Tracker - netMonOwned bool // whether we created netMon (and thus need to close it) - netMonUnregister func() // unsubscribes from changes; used regardless of netMonOwned - birdClient BIRDClient // or nil - controlKnobs *controlknobs.Knobs // or nil + eventBus *eventbus.Bus + eventClient *eventbus.Client + + logf logger.Logf + wgLogger *wglog.Logger // a wireguard-go logging wrapper + reqCh chan struct{} + waitCh chan struct{} // chan is closed when first Close call completes; contrast with closing bool + timeNow func() mono.Time + tundev *tstun.Wrapper + wgdev *device.Device + router router.Router + dialer *tsdial.Dialer + confListenPort uint16 // original conf.ListenPort + dns *dns.Manager + magicConn *magicsock.Conn + netMon *netmon.Monitor + health *health.Tracker + netMonOwned bool // whether we created netMon (and thus need to close it) + birdClient BIRDClient // or nil + controlKnobs *controlknobs.Knobs // or nil testMaybeReconfigHook func() // for tests; if non-nil, fires if maybeReconfigWireguardLocked called @@ -128,11 +129,11 @@ type userspaceEngine struct { wgLock sync.Mutex // serializes all wgdev operations; see lock order comment below lastCfgFull wgcfg.Config lastNMinPeers int - lastRouterSig deephash.Sum // of router.Config - lastEngineSigFull deephash.Sum // of full wireguard config - lastEngineSigTrim deephash.Sum // of trimmed wireguard config - lastDNSConfig *dns.Config - lastIsSubnetRouter bool // was the node a primary subnet router in the last run. + lastRouter *router.Config + lastEngineFull *wgcfg.Config // of full wireguard config, not trimmed + lastEngineInputs *maybeReconfigInputs + lastDNSConfig dns.ConfigView // or invalid if none + lastIsSubnetRouter bool // was the node a primary subnet router in the last run. recvActivityAt map[key.NodePublic]mono.Time trimmedNodes map[key.NodePublic]bool // set of node keys of peers currently excluded from wireguard config sentActivityAt map[netip.Addr]*mono.Time // value is accessed atomically @@ -146,7 +147,7 @@ type userspaceEngine struct { statusCallback StatusCallback peerSequence []key.NodePublic endpoints []tailcfg.Endpoint - pendOpen map[flowtrack.Tuple]*pendingOpenFlow // see pendopen.go + pendOpen map[flowtrackTuple]*pendingOpenFlow // see pendopen.go // pongCallback is the map of response handlers waiting for disco or TSMP // pong callbacks. The map key is a random slice of bytes. @@ -311,6 +312,9 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } if conf.Dialer == nil { conf.Dialer = &tsdial.Dialer{Logf: logf} + if conf.EventBus != nil { + conf.Dialer.SetBus(conf.EventBus) + } } var tsTUNDev *tstun.Wrapper @@ -378,6 +382,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) tunName, _ := conf.Tun.Name() conf.Dialer.SetTUNName(tunName) conf.Dialer.SetNetMon(e.netMon) + conf.Dialer.SetBus(e.eventBus) e.dns = dns.NewManager(logf, conf.DNS, e.health, conf.Dialer, fwdDNSLinkSelector{e, tunName}, conf.ControlKnobs, runtime.GOOS) // TODO: there's probably a better place for this @@ -385,13 +390,6 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) logf("link state: %+v", e.netMon.InterfaceState()) - unregisterMonWatch := e.netMon.RegisterChangeCallback(func(delta *netmon.ChangeDelta) { - tshttpproxy.InvalidateCache() - e.linkChange(delta) - }) - closePool.addFunc(unregisterMonWatch) - e.netMonUnregister = unregisterMonWatch - endpointsFn := func(endpoints []tailcfg.Endpoint) { e.mu.Lock() e.endpoints = append(e.endpoints[:0], endpoints...) @@ -399,27 +397,21 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) e.RequestStatus() } - onPortUpdate := func(port uint16, network string) { - e.logf("onPortUpdate(port=%v, network=%s)", port, network) - - if err := e.router.UpdateMagicsockPort(port, network); err != nil { - e.logf("UpdateMagicsockPort(port=%v, network=%s) failed: %v", port, network, err) - } - } magicsockOpts := magicsock.Options{ - EventBus: e.eventBus, - Logf: logf, - Port: conf.ListenPort, - EndpointsFunc: endpointsFn, - DERPActiveFunc: e.RequestStatus, - IdleFunc: e.tundev.IdleDuration, - NoteRecvActivity: e.noteRecvActivity, - NetMon: e.netMon, - HealthTracker: e.health, - Metrics: conf.Metrics, - ControlKnobs: conf.ControlKnobs, - OnPortUpdate: onPortUpdate, - PeerByKeyFunc: e.PeerByKey, + EventBus: e.eventBus, + Logf: logf, + Port: conf.ListenPort, + EndpointsFunc: endpointsFn, + DERPActiveFunc: e.RequestStatus, + IdleFunc: e.tundev.IdleDuration, + NetMon: e.netMon, + HealthTracker: e.health, + Metrics: conf.Metrics, + ControlKnobs: conf.ControlKnobs, + PeerByKeyFunc: e.PeerByKey, + } + if buildfeatures.HasLazyWG { + magicsockOpts.NoteRecvActivity = e.noteRecvActivity } var err error @@ -437,7 +429,7 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } e.tundev.PreFilterPacketOutboundToWireGuardEngineIntercept = e.handleLocalPackets - if envknob.BoolDefaultTrue("TS_DEBUG_CONNECT_FAILURES") { + if buildfeatures.HasDebug && envknob.BoolDefaultTrue("TS_DEBUG_CONNECT_FAILURES") { if e.tundev.PreFilterPacketInboundFromWireGuard != nil { return nil, errors.New("unexpected PreFilterIn already set") } @@ -546,6 +538,14 @@ func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) } } + ec := e.eventBus.Client("userspaceEngine") + eventbus.SubscribeFunc(ec, func(cd netmon.ChangeDelta) { + if f, ok := feature.HookProxyInvalidateCache.GetOk(); ok { + f() + } + e.linkChange(&cd) + }) + e.eventClient = ec e.logf("Engine created.") return e, nil } @@ -702,6 +702,29 @@ func (e *userspaceEngine) isActiveSinceLocked(nk key.NodePublic, ip netip.Addr, return timePtr.LoadAtomic().After(t) } +// maybeReconfigInputs holds the inputs to the maybeReconfigWireguardLocked +// function. If these things don't change between calls, there's nothing to do. +type maybeReconfigInputs struct { + WGConfig *wgcfg.Config + TrimmedNodes map[key.NodePublic]bool + TrackNodes views.Slice[key.NodePublic] + TrackIPs views.Slice[netip.Addr] +} + +func (i *maybeReconfigInputs) Equal(o *maybeReconfigInputs) bool { + return reflect.DeepEqual(i, o) +} + +func (i *maybeReconfigInputs) Clone() *maybeReconfigInputs { + if i == nil { + return nil + } + v := *i + v.WGConfig = i.WGConfig.Clone() + v.TrimmedNodes = maps.Clone(i.TrimmedNodes) + return &v +} + // discoChanged are the set of peers whose disco keys have changed, implying they've restarted. // If a peer is in this set and was previously in the live wireguard config, // it needs to be first removed and then re-added to flush out its wireguard session key. @@ -727,15 +750,22 @@ func (e *userspaceEngine) maybeReconfigWireguardLocked(discoChanged map[key.Node // the past 5 minutes. That's more than WireGuard's key // rotation time anyway so it's no harm if we remove it // later if it's been inactive. - activeCutoff := e.timeNow().Add(-lazyPeerIdleThreshold) + var activeCutoff mono.Time + if buildfeatures.HasLazyWG { + activeCutoff = e.timeNow().Add(-lazyPeerIdleThreshold) + } // Not all peers can be trimmed from the network map (see // isTrimmablePeer). For those that are trimmable, keep track of // their NodeKey and Tailscale IPs. These are the ones we'll need // to install tracking hooks for to watch their send/receive // activity. - trackNodes := make([]key.NodePublic, 0, len(full.Peers)) - trackIPs := make([]netip.Addr, 0, len(full.Peers)) + var trackNodes []key.NodePublic + var trackIPs []netip.Addr + if buildfeatures.HasLazyWG { + trackNodes = make([]key.NodePublic, 0, len(full.Peers)) + trackIPs = make([]netip.Addr, 0, len(full.Peers)) + } // Don't re-alloc the map; the Go compiler optimizes map clears as of // Go 1.11, so we can re-use the existing + allocated map. @@ -749,7 +779,7 @@ func (e *userspaceEngine) maybeReconfigWireguardLocked(discoChanged map[key.Node for i := range full.Peers { p := &full.Peers[i] nk := p.PublicKey - if !e.isTrimmablePeer(p, len(full.Peers)) { + if !buildfeatures.HasLazyWG || !e.isTrimmablePeer(p, len(full.Peers)) { min.Peers = append(min.Peers, *p) if discoChanged[nk] { needRemoveStep = true @@ -773,16 +803,18 @@ func (e *userspaceEngine) maybeReconfigWireguardLocked(discoChanged map[key.Node } e.lastNMinPeers = len(min.Peers) - if changed := deephash.Update(&e.lastEngineSigTrim, &struct { - WGConfig *wgcfg.Config - TrimmedNodes map[key.NodePublic]bool - TrackNodes []key.NodePublic - TrackIPs []netip.Addr - }{&min, e.trimmedNodes, trackNodes, trackIPs}); !changed { + if changed := checkchange.Update(&e.lastEngineInputs, &maybeReconfigInputs{ + WGConfig: &min, + TrimmedNodes: e.trimmedNodes, + TrackNodes: views.SliceOf(trackNodes), + TrackIPs: views.SliceOf(trackIPs), + }); !changed { return nil } - e.updateActivityMapsLocked(trackNodes, trackIPs) + if buildfeatures.HasLazyWG { + e.updateActivityMapsLocked(trackNodes, trackIPs) + } if needRemoveStep { minner := min @@ -818,6 +850,9 @@ func (e *userspaceEngine) maybeReconfigWireguardLocked(discoChanged map[key.Node // // e.wgLock must be held. func (e *userspaceEngine) updateActivityMapsLocked(trackNodes []key.NodePublic, trackIPs []netip.Addr) { + if !buildfeatures.HasLazyWG { + return + } // Generate the new map of which nodekeys we want to track // receive times for. mr := map[key.NodePublic]mono.Time{} // TODO: only recreate this if set of keys changed @@ -902,7 +937,6 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, e.wgLock.Lock() defer e.wgLock.Unlock() e.tundev.SetWGConfig(cfg) - e.lastDNSConfig = dnsCfg peerSet := make(set.Set[key.NodePublic], len(cfg.Peers)) e.mu.Lock() @@ -922,22 +956,24 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, peerMTUEnable := e.magicConn.ShouldPMTUD() isSubnetRouter := false - if e.birdClient != nil && nm != nil && nm.SelfNode.Valid() { + if buildfeatures.HasBird && e.birdClient != nil && nm != nil && nm.SelfNode.Valid() { isSubnetRouter = hasOverlap(nm.SelfNode.PrimaryRoutes(), nm.SelfNode.Hostinfo().RoutableIPs()) e.logf("[v1] Reconfig: hasOverlap(%v, %v) = %v; isSubnetRouter=%v lastIsSubnetRouter=%v", nm.SelfNode.PrimaryRoutes(), nm.SelfNode.Hostinfo().RoutableIPs(), isSubnetRouter, isSubnetRouter, e.lastIsSubnetRouter) } - isSubnetRouterChanged := isSubnetRouter != e.lastIsSubnetRouter + isSubnetRouterChanged := buildfeatures.HasAdvertiseRoutes && isSubnetRouter != e.lastIsSubnetRouter + + engineChanged := checkchange.Update(&e.lastEngineFull, cfg) + routerChanged := checkchange.Update(&e.lastRouter, routerCfg) + dnsChanged := buildfeatures.HasDNS && !e.lastDNSConfig.Equal(dnsCfg.View()) + if dnsChanged { + e.lastDNSConfig = dnsCfg.View() + } - engineChanged := deephash.Update(&e.lastEngineSigFull, cfg) - routerChanged := deephash.Update(&e.lastRouterSig, &struct { - RouterConfig *router.Config - DNSConfig *dns.Config - }{routerCfg, dnsCfg}) listenPortChanged := listenPort != e.magicConn.LocalPort() peerMTUChanged := peerMTUEnable != e.magicConn.PeerMTUEnabled() - if !engineChanged && !routerChanged && !listenPortChanged && !isSubnetRouterChanged && !peerMTUChanged { + if !engineChanged && !routerChanged && !dnsChanged && !listenPortChanged && !isSubnetRouterChanged && !peerMTUChanged { return ErrNoChanges } newLogIDs := cfg.NetworkLogging @@ -946,7 +982,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, netLogIDsWasValid := !oldLogIDs.NodeID.IsZero() && !oldLogIDs.DomainID.IsZero() netLogIDsChanged := netLogIDsNowValid && netLogIDsWasValid && newLogIDs != oldLogIDs netLogRunning := netLogIDsNowValid && !routerCfg.Equal(&router.Config{}) - if envknob.NoLogsNoSupport() { + if !buildfeatures.HasNetLog || envknob.NoLogsNoSupport() { netLogRunning = false } @@ -955,7 +991,9 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, // instead have ipnlocal populate a map of DNS IP => linkName and // put that in the *dns.Config instead, and plumb it down to the // dns.Manager. Maybe also with isLocalAddr above. - e.isDNSIPOverTailscale.Store(ipset.NewContainsIPFunc(views.SliceOf(dnsIPsOverTailscale(dnsCfg, routerCfg)))) + if buildfeatures.HasDNS { + e.isDNSIPOverTailscale.Store(ipset.NewContainsIPFunc(views.SliceOf(dnsIPsOverTailscale(dnsCfg, routerCfg)))) + } // See if any peers have changed disco keys, which means they've restarted. // If so, we need to update the wireguard-go/device.Device in two phases: @@ -1001,7 +1039,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, // Shutdown the network logger because the IDs changed. // Let it be started back up by subsequent logic. - if netLogIDsChanged && e.networkLogger.Running() { + if buildfeatures.HasNetLog && netLogIDsChanged && e.networkLogger.Running() { e.logf("wgengine: Reconfig: shutting down network logger") ctx, cancel := context.WithTimeout(context.Background(), networkLoggerUploadTimeout) defer cancel() @@ -1012,12 +1050,12 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, // Startup the network logger. // Do this before configuring the router so that we capture initial packets. - if netLogRunning && !e.networkLogger.Running() { + if buildfeatures.HasNetLog && netLogRunning && !e.networkLogger.Running() { nid := cfg.NetworkLogging.NodeID tid := cfg.NetworkLogging.DomainID logExitFlowEnabled := cfg.NetworkLogging.LogExitFlowEnabled e.logf("wgengine: Reconfig: starting up network logger (node:%s tailnet:%s)", nid.Public(), tid.Public()) - if err := e.networkLogger.Startup(cfg.NodeID, nid, tid, e.tundev, e.magicConn, e.netMon, e.health, logExitFlowEnabled); err != nil { + if err := e.networkLogger.Startup(cfg.NodeID, nid, tid, e.tundev, e.magicConn, e.netMon, e.health, e.eventBus, logExitFlowEnabled); err != nil { e.logf("wgengine: Reconfig: error starting up network logger: %v", err) } e.networkLogger.ReconfigRoutes(routerCfg) @@ -1031,7 +1069,18 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, if err != nil { return err } + } + // We've historically re-set DNS even after just a router change. While + // refactoring in tailscale/tailscale#17448 and and + // tailscale/tailscale#17499, I'm erring on the side of keeping that + // historical quirk for now (2025-10-08), lest it's load bearing in + // unexpected ways + // + // TODO(bradfitz): try to do the "configuring DNS" part below only if + // dnsChanged, not routerChanged. The "resolver.ShouldUseRoutes" part + // probably needs to keep happening for both. + if buildfeatures.HasDNS && (routerChanged || dnsChanged) { if resolver.ShouldUseRoutes(e.controlKnobs) { e.logf("wgengine: Reconfig: user dialer") e.dialer.SetRoutes(routerCfg.Routes, routerCfg.LocalRoutes) @@ -1043,7 +1092,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, // DNS managers refuse to apply settings if the device has no // assigned address. e.logf("wgengine: Reconfig: configuring DNS") - err = e.dns.Set(*dnsCfg) + err := e.dns.Set(*dnsCfg) e.health.SetDNSHealth(err) if err != nil { return err @@ -1065,7 +1114,7 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, } } - if isSubnetRouterChanged && e.birdClient != nil { + if buildfeatures.HasBird && isSubnetRouterChanged && e.birdClient != nil { e.logf("wgengine: Reconfig: configuring BIRD") var err error if isSubnetRouter { @@ -1208,6 +1257,7 @@ func (e *userspaceEngine) RequestStatus() { } func (e *userspaceEngine) Close() { + e.eventClient.Close() e.mu.Lock() if e.closing { e.mu.Unlock() @@ -1219,7 +1269,6 @@ func (e *userspaceEngine) Close() { r := bufio.NewReader(strings.NewReader("")) e.wgdev.IpcSetOperation(r) e.magicConn.Close() - e.netMonUnregister() if e.netMonOwned { e.netMon.Close() } @@ -1276,8 +1325,8 @@ func (e *userspaceEngine) linkChange(delta *netmon.ChangeDelta) { e.wgLock.Lock() dnsCfg := e.lastDNSConfig e.wgLock.Unlock() - if dnsCfg != nil { - if err := e.dns.Set(*dnsCfg); err != nil { + if dnsCfg.Valid() { + if err := e.dns.Set(*dnsCfg.AsStruct()); err != nil { e.logf("wgengine: error setting DNS config after major link change: %v", err) } else if err := e.reconfigureVPNIfNecessary(); err != nil { e.logf("wgengine: error reconfiguring VPN after major link change: %v", err) @@ -1637,6 +1686,9 @@ var ( ) func (e *userspaceEngine) InstallCaptureHook(cb packet.CaptureCallback) { + if !buildfeatures.HasCapture { + return + } e.tundev.InstallCaptureHook(cb) e.magicConn.InstallCaptureHook(cb) } diff --git a/wgengine/userspace_ext_test.go b/wgengine/userspace_ext_test.go index 5e7d1ce6a517d..8e7bbb7a9c5c9 100644 --- a/wgengine/userspace_ext_test.go +++ b/wgengine/userspace_ext_test.go @@ -21,7 +21,7 @@ func TestIsNetstack(t *testing.T) { tstest.WhileTestRunningLogger(t), wgengine.Config{ SetSubsystem: sys.Set, - HealthTracker: sys.HealthTracker(), + HealthTracker: sys.HealthTracker.Get(), Metrics: sys.UserMetricsRegistry(), EventBus: sys.Bus.Get(), }, @@ -73,7 +73,7 @@ func TestIsNetstackRouter(t *testing.T) { } conf := tt.conf conf.SetSubsystem = sys.Set - conf.HealthTracker = sys.HealthTracker() + conf.HealthTracker = sys.HealthTracker.Get() conf.Metrics = sys.UserMetricsRegistry() conf.EventBus = sys.Bus.Get() e, err := wgengine.NewUserspaceEngine(logger.Discard, conf) diff --git a/wgengine/userspace_test.go b/wgengine/userspace_test.go index 87a36c6734f08..89d75b98adafb 100644 --- a/wgengine/userspace_test.go +++ b/wgengine/userspace_test.go @@ -25,7 +25,7 @@ import ( "tailscale.com/types/key" "tailscale.com/types/netmap" "tailscale.com/types/opt" - "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/usermetric" "tailscale.com/wgengine/router" "tailscale.com/wgengine/wgcfg" @@ -101,10 +101,9 @@ func nodeViews(v []*tailcfg.Node) []tailcfg.NodeView { } func TestUserspaceEngineReconfig(t *testing.T) { - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(t) - ht := new(health.Tracker) + ht := health.NewTracker(bus) reg := new(usermetric.Registry) e, err := NewFakeUserspaceEngine(t.Logf, 0, ht, reg, bus) if err != nil { @@ -170,12 +169,11 @@ func TestUserspaceEnginePortReconfig(t *testing.T) { var knobs controlknobs.Knobs - bus := eventbus.New() - defer bus.Close() + bus := eventbustest.NewBus(t) // Keep making a wgengine until we find an unused port var ue *userspaceEngine - ht := new(health.Tracker) + ht := health.NewTracker(bus) reg := new(usermetric.Registry) for i := range 100 { attempt := uint16(defaultPort + i) @@ -258,9 +256,8 @@ func TestUserspaceEnginePeerMTUReconfig(t *testing.T) { var knobs controlknobs.Knobs - bus := eventbus.New() - defer bus.Close() - ht := new(health.Tracker) + bus := eventbustest.NewBus(t) + ht := health.NewTracker(bus) reg := new(usermetric.Registry) e, err := NewFakeUserspaceEngine(t.Logf, 0, &knobs, ht, reg, bus) if err != nil { diff --git a/wgengine/watchdog.go b/wgengine/watchdog.go index 74a1917488dd8..0500e6f7fd4c7 100644 --- a/wgengine/watchdog.go +++ b/wgengine/watchdog.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build !js +//go:build !js && !ts_omit_debug package wgengine @@ -15,6 +15,7 @@ import ( "time" "tailscale.com/envknob" + "tailscale.com/feature/buildfeatures" "tailscale.com/ipn/ipnstate" "tailscale.com/net/dns" "tailscale.com/net/packet" @@ -163,6 +164,9 @@ func (e *watchdogEngine) Done() <-chan struct{} { } func (e *watchdogEngine) InstallCaptureHook(cb packet.CaptureCallback) { + if !buildfeatures.HasCapture { + return + } e.wrap.InstallCaptureHook(cb) } diff --git a/wgengine/watchdog_js.go b/wgengine/watchdog_js.go deleted file mode 100644 index 872ce36d5fd5d..0000000000000 --- a/wgengine/watchdog_js.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build js - -package wgengine - -import "tailscale.com/net/dns/resolver" - -type watchdogEngine struct { - Engine - wrap Engine -} - -func (e *watchdogEngine) GetResolver() (r *resolver.Resolver, ok bool) { - return nil, false -} diff --git a/wgengine/watchdog_omit.go b/wgengine/watchdog_omit.go new file mode 100644 index 0000000000000..1d175b41a87eb --- /dev/null +++ b/wgengine/watchdog_omit.go @@ -0,0 +1,8 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build js || ts_omit_debug + +package wgengine + +func NewWatchdog(e Engine) Engine { return e } diff --git a/wgengine/watchdog_test.go b/wgengine/watchdog_test.go index a54a0d3fa1e13..35fd8f33105e6 100644 --- a/wgengine/watchdog_test.go +++ b/wgengine/watchdog_test.go @@ -9,7 +9,7 @@ import ( "time" "tailscale.com/health" - "tailscale.com/util/eventbus" + "tailscale.com/util/eventbus/eventbustest" "tailscale.com/util/usermetric" ) @@ -25,9 +25,8 @@ func TestWatchdog(t *testing.T) { t.Run("default watchdog does not fire", func(t *testing.T) { t.Parallel() - bus := eventbus.New() - defer bus.Close() - ht := new(health.Tracker) + bus := eventbustest.NewBus(t) + ht := health.NewTracker(bus) reg := new(usermetric.Registry) e, err := NewFakeUserspaceEngine(t.Logf, 0, ht, reg, bus) if err != nil { diff --git a/wgengine/wgcfg/config.go b/wgengine/wgcfg/config.go index 154dc0a304773..926964a4bdc20 100644 --- a/wgengine/wgcfg/config.go +++ b/wgengine/wgcfg/config.go @@ -6,6 +6,7 @@ package wgcfg import ( "net/netip" + "slices" "tailscale.com/tailcfg" "tailscale.com/types/key" @@ -35,6 +36,20 @@ type Config struct { } } +func (c *Config) Equal(o *Config) bool { + if c == nil || o == nil { + return c == o + } + return c.Name == o.Name && + c.NodeID == o.NodeID && + c.PrivateKey.Equal(o.PrivateKey) && + c.MTU == o.MTU && + c.NetworkLogging == o.NetworkLogging && + slices.Equal(c.Addresses, o.Addresses) && + slices.Equal(c.DNS, o.DNS) && + slices.EqualFunc(c.Peers, o.Peers, Peer.Equal) +} + type Peer struct { PublicKey key.NodePublic DiscoKey key.DiscoPublic // present only so we can handle restarts within wgengine, not passed to WireGuard @@ -50,6 +65,24 @@ type Peer struct { WGEndpoint key.NodePublic } +func addrPtrEq(a, b *netip.Addr) bool { + if a == nil || b == nil { + return a == b + } + return *a == *b +} + +func (p Peer) Equal(o Peer) bool { + return p.PublicKey == o.PublicKey && + p.DiscoKey == o.DiscoKey && + slices.Equal(p.AllowedIPs, o.AllowedIPs) && + p.IsJailed == o.IsJailed && + p.PersistentKeepalive == o.PersistentKeepalive && + addrPtrEq(p.V4MasqAddr, o.V4MasqAddr) && + addrPtrEq(p.V6MasqAddr, o.V6MasqAddr) && + p.WGEndpoint == o.WGEndpoint +} + // PeerWithKey returns the Peer with key k and reports whether it was found. func (config Config) PeerWithKey(k key.NodePublic) (Peer, bool) { for _, p := range config.Peers { diff --git a/wgengine/wgcfg/config_test.go b/wgengine/wgcfg/config_test.go new file mode 100644 index 0000000000000..5ac3b7cd56376 --- /dev/null +++ b/wgengine/wgcfg/config_test.go @@ -0,0 +1,41 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package wgcfg + +import ( + "reflect" + "testing" +) + +// Tests that [Config.Equal] tests all fields of [Config], even ones +// that might get added in the future. +func TestConfigEqual(t *testing.T) { + rt := reflect.TypeFor[Config]() + for i := range rt.NumField() { + sf := rt.Field(i) + switch sf.Name { + case "Name", "NodeID", "PrivateKey", "MTU", "Addresses", "DNS", "Peers", + "NetworkLogging": + // These are compared in [Config.Equal]. + default: + t.Errorf("Have you added field %q to Config.Equal? Do so if not, and then update TestConfigEqual", sf.Name) + } + } +} + +// Tests that [Peer.Equal] tests all fields of [Peer], even ones +// that might get added in the future. +func TestPeerEqual(t *testing.T) { + rt := reflect.TypeFor[Peer]() + for i := range rt.NumField() { + sf := rt.Field(i) + switch sf.Name { + case "PublicKey", "DiscoKey", "AllowedIPs", "IsJailed", + "PersistentKeepalive", "V4MasqAddr", "V6MasqAddr", "WGEndpoint": + // These are compared in [Peer.Equal]. + default: + t.Errorf("Have you added field %q to Peer.Equal? Do so if not, and then update TestPeerEqual", sf.Name) + } + } +} diff --git a/wgengine/wgcfg/device.go b/wgengine/wgcfg/device.go index 80fa159e38972..ee7eb91c93b66 100644 --- a/wgengine/wgcfg/device.go +++ b/wgengine/wgcfg/device.go @@ -4,6 +4,7 @@ package wgcfg import ( + "errors" "io" "sort" @@ -11,7 +12,6 @@ import ( "github.com/tailscale/wireguard-go/device" "github.com/tailscale/wireguard-go/tun" "tailscale.com/types/logger" - "tailscale.com/util/multierr" ) // NewDevice returns a wireguard-go Device configured for Tailscale use. @@ -31,7 +31,7 @@ func DeviceConfig(d *device.Device) (*Config, error) { cfg, fromErr := FromUAPI(r) r.Close() getErr := <-errc - err := multierr.New(getErr, fromErr) + err := errors.Join(getErr, fromErr) if err != nil { return nil, err } @@ -64,5 +64,5 @@ func ReconfigDevice(d *device.Device, cfg *Config, logf logger.Logf) (err error) toErr := cfg.ToUAPI(logf, w, prev) w.Close() setErr := <-errc - return multierr.New(setErr, toErr) + return errors.Join(setErr, toErr) } diff --git a/words/scales.txt b/words/scales.txt index 532734f6dcf8a..bb623fb6f1ab8 100644 --- a/words/scales.txt +++ b/words/scales.txt @@ -442,3 +442,12 @@ salary fujita caiman cichlid +logarithm +exponential +geological +cosmological +barometric +ph +pain +temperature +wyrm diff --git a/words/tails.txt b/words/tails.txt index 20ff326c1e6fd..f5e93bf504687 100644 --- a/words/tails.txt +++ b/words/tails.txt @@ -764,3 +764,12 @@ sailfish billfish taimen sargo +story +tale +gecko +wyrm +meteor +ribbon +echo +lemming +worm