diff --git a/pb/c1/connector/v2/grant.pb.go b/pb/c1/connector/v2/grant.pb.go index 485decbee..5fb9a29b1 100644 --- a/pb/c1/connector/v2/grant.pb.go +++ b/pb/c1/connector/v2/grant.pb.go @@ -228,14 +228,18 @@ func (b0 Grant_builder) Build() *Grant { } type GrantsServiceListGrantsRequest struct { - state protoimpl.MessageState `protogen:"hybrid.v1"` - Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` - PageSize uint32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` - Annotations []*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3" json:"annotations,omitempty"` - ActiveSyncId string `protobuf:"bytes,5,opt,name=active_sync_id,json=activeSyncId,proto3" json:"active_sync_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"hybrid.v1"` + Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + PageSize uint32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + Annotations []*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3" json:"annotations,omitempty"` + ActiveSyncId string `protobuf:"bytes,5,opt,name=active_sync_id,json=activeSyncId,proto3" json:"active_sync_id,omitempty"` + // If true, only return grants that are expandable (have GrantExpandable annotation). + ExpandableOnly bool `protobuf:"varint,6,opt,name=expandable_only,json=expandableOnly,proto3" json:"expandable_only,omitempty"` + // If true, only return grants that need expansion processing. + NeedsExpansionOnly bool `protobuf:"varint,7,opt,name=needs_expansion_only,json=needsExpansionOnly,proto3" json:"needs_expansion_only,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GrantsServiceListGrantsRequest) Reset() { @@ -298,6 +302,20 @@ func (x *GrantsServiceListGrantsRequest) GetActiveSyncId() string { return "" } +func (x *GrantsServiceListGrantsRequest) GetExpandableOnly() bool { + if x != nil { + return x.ExpandableOnly + } + return false +} + +func (x *GrantsServiceListGrantsRequest) GetNeedsExpansionOnly() bool { + if x != nil { + return x.NeedsExpansionOnly + } + return false +} + func (x *GrantsServiceListGrantsRequest) SetResource(v *Resource) { x.Resource = v } @@ -318,6 +336,14 @@ func (x *GrantsServiceListGrantsRequest) SetActiveSyncId(v string) { x.ActiveSyncId = v } +func (x *GrantsServiceListGrantsRequest) SetExpandableOnly(v bool) { + x.ExpandableOnly = v +} + +func (x *GrantsServiceListGrantsRequest) SetNeedsExpansionOnly(v bool) { + x.NeedsExpansionOnly = v +} + func (x *GrantsServiceListGrantsRequest) HasResource() bool { if x == nil { return false @@ -337,6 +363,10 @@ type GrantsServiceListGrantsRequest_builder struct { PageToken string Annotations []*anypb.Any ActiveSyncId string + // If true, only return grants that are expandable (have GrantExpandable annotation). + ExpandableOnly bool + // If true, only return grants that need expansion processing. + NeedsExpansionOnly bool } func (b0 GrantsServiceListGrantsRequest_builder) Build() *GrantsServiceListGrantsRequest { @@ -348,6 +378,8 @@ func (b0 GrantsServiceListGrantsRequest_builder) Build() *GrantsServiceListGrant x.PageToken = b.PageToken x.Annotations = b.Annotations x.ActiveSyncId = b.ActiveSyncId + x.ExpandableOnly = b.ExpandableOnly + x.NeedsExpansionOnly = b.NeedsExpansionOnly return m0 } @@ -813,16 +845,18 @@ const file_c1_connector_v2_grant_proto_rawDesc = "" + "\x02id\x18\x03 \x01(\tB\n" + "\xfaB\ar\x05 \x01(\x80\bR\x02id\x12A\n" + "\asources\x18\x05 \x01(\v2\x1d.c1.connector.v2.GrantSourcesB\b\xfaB\x05\x8a\x01\x02\x10\x00R\asources\x126\n" + - "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xa6\x02\n" + - "\x1eGrantsServiceListGrantsRequest\x12?\n" + - "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceB\b\xfaB\x05\x8a\x01\x02\x10\x01R\bresource\x12'\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xf7\x02\n" + + "\x1eGrantsServiceListGrantsRequest\x125\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource\x12'\n" + "\tpage_size\x18\x02 \x01(\rB\n" + "\xfaB\a*\x05\x18\xfa\x01@\x01R\bpageSize\x12-\n" + "\n" + "page_token\x18\x03 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\tpageToken\x126\n" + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x123\n" + "\x0eactive_sync_id\x18\x05 \x01(\tB\r\xfaB\n" + - "r\b \x01(\x80\b\xd0\x01\x01R\factiveSyncId\"\xbd\x01\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\factiveSyncId\x12'\n" + + "\x0fexpandable_only\x18\x06 \x01(\bR\x0eexpandableOnly\x120\n" + + "\x14needs_expansion_only\x18\a \x01(\bR\x12needsExpansionOnly\"\xbd\x01\n" + "\x1fGrantsServiceListGrantsResponse\x12*\n" + "\x04list\x18\x01 \x03(\v2\x16.c1.connector.v2.GrantR\x04list\x126\n" + "\x0fnext_page_token\x18\x02 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\rnextPageToken\x126\n" + diff --git a/pb/c1/connector/v2/grant.pb.validate.go b/pb/c1/connector/v2/grant.pb.validate.go index a66db3e1f..8d62e13c4 100644 --- a/pb/c1/connector/v2/grant.pb.validate.go +++ b/pb/c1/connector/v2/grant.pb.validate.go @@ -454,17 +454,6 @@ func (m *GrantsServiceListGrantsRequest) validate(all bool) error { var errors []error - if m.GetResource() == nil { - err := GrantsServiceListGrantsRequestValidationError{ - field: "Resource", - reason: "value is required", - } - if !all { - return err - } - errors = append(errors, err) - } - if all { switch v := interface{}(m.GetResource()).(type) { case interface{ ValidateAll() error }: @@ -573,6 +562,10 @@ func (m *GrantsServiceListGrantsRequest) validate(all bool) error { } + // no validation rules for ExpandableOnly + + // no validation rules for NeedsExpansionOnly + if len(errors) > 0 { return GrantsServiceListGrantsRequestMultiError(errors) } diff --git a/pb/c1/connector/v2/grant_protoopaque.pb.go b/pb/c1/connector/v2/grant_protoopaque.pb.go index 2aa8d446e..540ba93ec 100644 --- a/pb/c1/connector/v2/grant_protoopaque.pb.go +++ b/pb/c1/connector/v2/grant_protoopaque.pb.go @@ -230,14 +230,16 @@ func (b0 Grant_builder) Build() *Grant { } type GrantsServiceListGrantsRequest struct { - state protoimpl.MessageState `protogen:"opaque.v1"` - xxx_hidden_Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3"` - xxx_hidden_PageSize uint32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3"` - xxx_hidden_PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3"` - xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3"` - xxx_hidden_ActiveSyncId string `protobuf:"bytes,5,opt,name=active_sync_id,json=activeSyncId,proto3"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"opaque.v1"` + xxx_hidden_Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3"` + xxx_hidden_PageSize uint32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3"` + xxx_hidden_PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3"` + xxx_hidden_Annotations *[]*anypb.Any `protobuf:"bytes,4,rep,name=annotations,proto3"` + xxx_hidden_ActiveSyncId string `protobuf:"bytes,5,opt,name=active_sync_id,json=activeSyncId,proto3"` + xxx_hidden_ExpandableOnly bool `protobuf:"varint,6,opt,name=expandable_only,json=expandableOnly,proto3"` + xxx_hidden_NeedsExpansionOnly bool `protobuf:"varint,7,opt,name=needs_expansion_only,json=needsExpansionOnly,proto3"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GrantsServiceListGrantsRequest) Reset() { @@ -302,6 +304,20 @@ func (x *GrantsServiceListGrantsRequest) GetActiveSyncId() string { return "" } +func (x *GrantsServiceListGrantsRequest) GetExpandableOnly() bool { + if x != nil { + return x.xxx_hidden_ExpandableOnly + } + return false +} + +func (x *GrantsServiceListGrantsRequest) GetNeedsExpansionOnly() bool { + if x != nil { + return x.xxx_hidden_NeedsExpansionOnly + } + return false +} + func (x *GrantsServiceListGrantsRequest) SetResource(v *Resource) { x.xxx_hidden_Resource = v } @@ -322,6 +338,14 @@ func (x *GrantsServiceListGrantsRequest) SetActiveSyncId(v string) { x.xxx_hidden_ActiveSyncId = v } +func (x *GrantsServiceListGrantsRequest) SetExpandableOnly(v bool) { + x.xxx_hidden_ExpandableOnly = v +} + +func (x *GrantsServiceListGrantsRequest) SetNeedsExpansionOnly(v bool) { + x.xxx_hidden_NeedsExpansionOnly = v +} + func (x *GrantsServiceListGrantsRequest) HasResource() bool { if x == nil { return false @@ -341,6 +365,10 @@ type GrantsServiceListGrantsRequest_builder struct { PageToken string Annotations []*anypb.Any ActiveSyncId string + // If true, only return grants that are expandable (have GrantExpandable annotation). + ExpandableOnly bool + // If true, only return grants that need expansion processing. + NeedsExpansionOnly bool } func (b0 GrantsServiceListGrantsRequest_builder) Build() *GrantsServiceListGrantsRequest { @@ -352,6 +380,8 @@ func (b0 GrantsServiceListGrantsRequest_builder) Build() *GrantsServiceListGrant x.xxx_hidden_PageToken = b.PageToken x.xxx_hidden_Annotations = &b.Annotations x.xxx_hidden_ActiveSyncId = b.ActiveSyncId + x.xxx_hidden_ExpandableOnly = b.ExpandableOnly + x.xxx_hidden_NeedsExpansionOnly = b.NeedsExpansionOnly return m0 } @@ -831,16 +861,18 @@ const file_c1_connector_v2_grant_proto_rawDesc = "" + "\x02id\x18\x03 \x01(\tB\n" + "\xfaB\ar\x05 \x01(\x80\bR\x02id\x12A\n" + "\asources\x18\x05 \x01(\v2\x1d.c1.connector.v2.GrantSourcesB\b\xfaB\x05\x8a\x01\x02\x10\x00R\asources\x126\n" + - "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xa6\x02\n" + - "\x1eGrantsServiceListGrantsRequest\x12?\n" + - "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceB\b\xfaB\x05\x8a\x01\x02\x10\x01R\bresource\x12'\n" + + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\"\xf7\x02\n" + + "\x1eGrantsServiceListGrantsRequest\x125\n" + + "\bresource\x18\x01 \x01(\v2\x19.c1.connector.v2.ResourceR\bresource\x12'\n" + "\tpage_size\x18\x02 \x01(\rB\n" + "\xfaB\a*\x05\x18\xfa\x01@\x01R\bpageSize\x12-\n" + "\n" + "page_token\x18\x03 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\tpageToken\x126\n" + "\vannotations\x18\x04 \x03(\v2\x14.google.protobuf.AnyR\vannotations\x123\n" + "\x0eactive_sync_id\x18\x05 \x01(\tB\r\xfaB\n" + - "r\b \x01(\x80\b\xd0\x01\x01R\factiveSyncId\"\xbd\x01\n" + + "r\b \x01(\x80\b\xd0\x01\x01R\factiveSyncId\x12'\n" + + "\x0fexpandable_only\x18\x06 \x01(\bR\x0eexpandableOnly\x120\n" + + "\x14needs_expansion_only\x18\a \x01(\bR\x12needsExpansionOnly\"\xbd\x01\n" + "\x1fGrantsServiceListGrantsResponse\x12*\n" + "\x04list\x18\x01 \x03(\v2\x16.c1.connector.v2.GrantR\x04list\x126\n" + "\x0fnext_page_token\x18\x02 \x01(\tB\x0e\xfaB\vr\t \x01(\x80\x80@\xd0\x01\x01R\rnextPageToken\x126\n" + diff --git a/pkg/connectorbuilder/resource_syncer.go b/pkg/connectorbuilder/resource_syncer.go index 4378730f9..b72796741 100644 --- a/pkg/connectorbuilder/resource_syncer.go +++ b/pkg/connectorbuilder/resource_syncer.go @@ -286,6 +286,13 @@ func (b *builder) ListGrants(ctx context.Context, request *v2.GrantsServiceListG start := b.nowFunc() tt := tasks.ListGrantsType + + if request.GetResource() == nil { + err := fmt.Errorf("error: list grants requires a resource") + b.m.RecordTaskFailure(ctx, tt, b.nowFunc().Sub(start), err) + return nil, err + } + rid := request.GetResource().GetId() rb, ok := b.resourceSyncers[rid.GetResourceType()] if !ok { diff --git a/pkg/connectorstore/connectorstore.go b/pkg/connectorstore/connectorstore.go index 65793949e..0b85801ce 100644 --- a/pkg/connectorstore/connectorstore.go +++ b/pkg/connectorstore/connectorstore.go @@ -70,3 +70,10 @@ type Writer interface { PutEntitlements(ctx context.Context, entitlements ...*v2.Entitlement) error DeleteGrant(ctx context.Context, grantId string) error } + +// ExpansionStore provides methods for grant expansion operations. +// Not all store implementations support expansion; callers should type-assert. +type ExpansionStore interface { + // SetSupportsDiff marks the sync as supporting diff operations. + SetSupportsDiff(ctx context.Context, syncID string) error +} diff --git a/pkg/dotc1z/c1file_attached.go b/pkg/dotc1z/c1file_attached.go index 54e64f4f2..556760c74 100644 --- a/pkg/dotc1z/c1file_attached.go +++ b/pkg/dotc1z/c1file_attached.go @@ -5,6 +5,7 @@ import ( "database/sql" "errors" "fmt" + "strings" "time" reader_v2 "github.com/conductorone/baton-sdk/pb/c1/reader/v2" @@ -178,6 +179,11 @@ func (c *C1FileAttached) UpdateSync(ctx context.Context, baseSync *reader_v2.Syn return nil } +// ErrOldSyncMissingExpansionMarker is returned when the old sync doesn't have the supports_diff +// marker set. This indicates the sync was expanded with older code that dropped grant annotations, +// making it unsuitable for diff-based incremental expansion. +var ErrOldSyncMissingExpansionMarker = errors.New("old sync is missing expansion marker; cannot generate diff from sync expanded with older code that dropped annotations") + // GenerateSyncDiffFromFile compares the old sync (in attached) with the new sync (in main) // and generates two new syncs in the main database. // @@ -198,6 +204,20 @@ func (c *C1FileAttached) GenerateSyncDiffFromFile(ctx context.Context, oldSyncID ctx, span := tracer.Start(ctx, "C1FileAttached.GenerateSyncDiffFromFile") defer span.End() + // Check that the old sync has the supports_diff marker set. + // Syncs expanded with older code dropped annotations, making them unusable for diffs. + var supportsDiffInt int + err := c.file.db.QueryRowContext(ctx, + fmt.Sprintf("SELECT supports_diff FROM attached.%s WHERE sync_id = ?", syncRuns.Name()), + oldSyncID, + ).Scan(&supportsDiffInt) + if err != nil { + return "", "", fmt.Errorf("failed to check expansion marker for old sync: %w", err) + } + if supportsDiffInt == 0 { + return "", "", ErrOldSyncMissingExpansionMarker + } + // Generate unique IDs for the diff syncs deletionsSyncID := ksuid.New().String() upsertsSyncID := ksuid.New().String() @@ -227,6 +247,8 @@ func (c *C1FileAttached) GenerateSyncDiffFromFile(ctx context.Context, oldSyncID "sync_type": connectorstore.SyncTypePartialDeletions, "parent_sync_id": oldSyncID, "linked_sync_id": upsertsSyncID, + // This sync is generated by the new SQL-layer diff logic, so it is safe for diff operations. + "supports_diff": 1, }) query, args, err := deletionsInsert.ToSQL() if err != nil { @@ -244,6 +266,8 @@ func (c *C1FileAttached) GenerateSyncDiffFromFile(ctx context.Context, oldSyncID "sync_type": connectorstore.SyncTypePartialUpserts, "parent_sync_id": oldSyncID, "linked_sync_id": deletionsSyncID, + // This sync is generated by the new SQL-layer diff logic, so it is safe for diff operations. + "supports_diff": 1, }) query, args, err = upsertsInsert.ToSQL() if err != nil { @@ -259,12 +283,29 @@ func (c *C1FileAttached) GenerateSyncDiffFromFile(ctx context.Context, oldSyncID // - diffTableFromMainTx finds items in NEW not in OLD or modified = upserts tables := []string{"v1_resource_types", "v1_resources", "v1_entitlements", "v1_grants"} for _, tableName := range tables { + // Always include resource types in the upserts diff. Targeted/partial syncs may not emit a complete + // snapshot of resource types, and we do not want missing types to be interpreted as deletions. + if tableName == "v1_resource_types" { + if err := c.copyTableFromMainTx(ctx, tx, tableName, newSyncID, upsertsSyncID); err != nil { + return "", "", fmt.Errorf("failed to copy resource types for %s: %w", tableName, err) + } + continue + } if err := c.diffTableFromAttachedTx(ctx, tx, tableName, oldSyncID, newSyncID, deletionsSyncID); err != nil { return "", "", fmt.Errorf("failed to generate deletions for %s: %w", tableName, err) } if err := c.diffTableFromMainTx(ctx, tx, tableName, oldSyncID, newSyncID, upsertsSyncID); err != nil { return "", "", fmt.Errorf("failed to generate upserts for %s: %w", tableName, err) } + // For grants, also include the OLD version of modified rows in the deletions sync. + // This allows downstream consumers (including incremental expansion) to treat modifications + // as delete+insert and compute accurate edge/source invalidation without looking back into + // the attached database later. + if tableName == "v1_grants" { + if err := c.diffModifiedFromAttachedTx(ctx, tx, tableName, oldSyncID, newSyncID, deletionsSyncID); err != nil { + return "", "", fmt.Errorf("failed to generate modified-row deletions for %s: %w", tableName, err) + } + } } // End the syncs (deletions first, then upserts) @@ -374,6 +415,18 @@ func (c *C1FileAttached) diffTableFromMainTx(ctx context.Context, tx *sql.Tx, ta // 1. Not in attached (OLD) - additions // 2. In attached but with different data - modifications // newSyncID is in main, oldSyncID is in attached + // + // For grants, we also compare the expansion column since GrantExpandable + // annotation is stored separately from data. + var dataCompare string + if strings.Contains(tableName, "grants") { + // For grants: compare both data AND expansion columns. + // Use IFNULL to handle NULL expansion values. + dataCompare = "(a.data != m.data OR IFNULL(a.expansion, X'') != IFNULL(m.expansion, X''))" + } else { + dataCompare = "a.data != m.data" + } + //nolint:gosec // table names are from hardcoded list, not user input query := fmt.Sprintf(` INSERT INTO main.%s (%s) @@ -389,11 +442,89 @@ func (c *C1FileAttached) diffTableFromMainTx(ctx context.Context, tx *sql.Tx, ta SELECT 1 FROM attached.%s AS a WHERE a.external_id = m.external_id AND a.sync_id = ? - AND a.data != m.data + AND %s ) ) - `, tableName, columnList, selectList, tableName, tableName, tableName) + `, tableName, columnList, selectList, tableName, tableName, tableName, dataCompare) _, err = tx.ExecContext(ctx, query, targetSyncID, newSyncID, oldSyncID, oldSyncID) return err } + +// diffModifiedFromAttachedTx inserts the OLD version of rows that were modified between OLD (attached) and NEW (main). +// This is used to make modifications behave like delete+insert when applying diffs. +func (c *C1FileAttached) diffModifiedFromAttachedTx(ctx context.Context, tx *sql.Tx, tableName string, oldSyncID string, newSyncID string, targetSyncID string) error { + columns, err := c.getTableColumns(ctx, tableName) + if err != nil { + return err + } + + // Build column lists + columnList := "" + selectList := "" + for i, col := range columns { + if i > 0 { + columnList += ", " + selectList += ", " + } + columnList += col + if col == "sync_id" { + selectList += "? as sync_id" + } else { + selectList += col + } + } + + // Insert OLD rows for modified records. + //nolint:gosec // table names are from hardcoded list, not user input + query := fmt.Sprintf(` + INSERT INTO main.%s (%s) + SELECT %s + FROM attached.%s AS a + WHERE a.sync_id = ? + AND EXISTS ( + SELECT 1 FROM main.%s AS m + WHERE m.external_id = a.external_id + AND m.sync_id = ? + AND a.data != m.data + ) + `, tableName, columnList, selectList, tableName, tableName) + + _, err = tx.ExecContext(ctx, query, targetSyncID, oldSyncID, newSyncID) + return err +} + +// copyTableFromMainTx copies all rows for newSyncID (NEW) into targetSyncID. This is used for tables where we +// want the upserts sync to always contain a full snapshot (e.g., resource types). +func (c *C1FileAttached) copyTableFromMainTx(ctx context.Context, tx *sql.Tx, tableName string, newSyncID string, targetSyncID string) error { + columns, err := c.getTableColumns(ctx, tableName) + if err != nil { + return err + } + + columnList := "" + selectList := "" + for i, col := range columns { + if i > 0 { + columnList += ", " + selectList += ", " + } + columnList += col + if col == "sync_id" { + selectList += "? as sync_id" + } else { + selectList += col + } + } + + //nolint:gosec // table names are from hardcoded list, not user input + query := fmt.Sprintf(` + INSERT INTO main.%s (%s) + SELECT %s + FROM main.%s AS m + WHERE m.sync_id = ? + `, tableName, columnList, selectList, tableName) + + _, err = tx.ExecContext(ctx, query, targetSyncID, newSyncID) + return err +} diff --git a/pkg/dotc1z/diff_test.go b/pkg/dotc1z/diff_test.go index b5b64f055..6b5382ee7 100644 --- a/pkg/dotc1z/diff_test.go +++ b/pkg/dotc1z/diff_test.go @@ -2,6 +2,7 @@ package dotc1z import ( "context" + "errors" "os" "path/filepath" "testing" @@ -9,6 +10,7 @@ import ( "github.com/stretchr/testify/require" v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + "github.com/conductorone/baton-sdk/pkg/annotations" "github.com/conductorone/baton-sdk/pkg/connectorstore" ) @@ -133,6 +135,9 @@ func TestGenerateSyncDiffFromFile_Additions(t *testing.T) { }.Build()) require.NoError(t, err) + err = oldFile.SetSupportsDiff(ctx, oldSyncID) + require.NoError(t, err) + err = oldFile.EndSync(ctx) require.NoError(t, err) @@ -265,6 +270,9 @@ func TestGenerateSyncDiffFromFile_Deletions(t *testing.T) { }.Build()) require.NoError(t, err) + err = oldFile.SetSupportsDiff(ctx, oldSyncID) + require.NoError(t, err) + err = oldFile.EndSync(ctx) require.NoError(t, err) @@ -355,6 +363,9 @@ func TestGenerateSyncDiffFromFile_Modifications(t *testing.T) { }.Build()) require.NoError(t, err) + err = oldFile.SetSupportsDiff(ctx, oldSyncID) + require.NoError(t, err) + err = oldFile.EndSync(ctx) require.NoError(t, err) @@ -448,6 +459,9 @@ func TestGenerateSyncDiffFromFile_MixedChanges(t *testing.T) { require.NoError(t, err) } + err = oldFile.SetSupportsDiff(ctx, oldSyncID) + require.NoError(t, err) + err = oldFile.EndSync(ctx) require.NoError(t, err) @@ -574,6 +588,9 @@ func TestGenerateSyncDiffFromFile_NoChanges(t *testing.T) { }.Build()) require.NoError(t, err) + err = oldFile.SetSupportsDiff(ctx, oldSyncID) + require.NoError(t, err) + err = oldFile.EndSync(ctx) require.NoError(t, err) @@ -617,7 +634,12 @@ func TestGenerateSyncDiffFromFile_NoChanges(t *testing.T) { ResourceTypeId: resourceTypeID, }.Build()) require.NoError(t, err) - require.Len(t, resourcesResp.GetList(), 0, "upserts should be empty when no changes") + require.Len(t, resourcesResp.GetList(), 0, "upserts should not include resources when no changes") + + // Resource types are always included in upserts. + rtResp, err := newFile.ListResourceTypes(ctx, v2.ResourceTypesServiceListResourceTypesRequest_builder{}.Build()) + require.NoError(t, err) + require.Len(t, rtResp.GetList(), 1, "upserts should include resource types") // Verify deletions is empty err = newFile.ViewSync(ctx, deletionsSyncID) @@ -629,6 +651,10 @@ func TestGenerateSyncDiffFromFile_NoChanges(t *testing.T) { require.NoError(t, err) require.Len(t, resourcesResp.GetList(), 0, "deletions should be empty when no changes") + rtResp, err = newFile.ListResourceTypes(ctx, v2.ResourceTypesServiceListResourceTypesRequest_builder{}.Build()) + require.NoError(t, err) + require.Len(t, rtResp.GetList(), 0, "deletions should not include resource types when no changes") + _ = oldFile.Close(ctx) _ = newFile.Close(ctx) } @@ -666,6 +692,9 @@ func TestGenerateSyncDiffFromFile_EntitlementsOnly(t *testing.T) { }.Build()) require.NoError(t, err) + err = oldFile.SetSupportsDiff(ctx, oldSyncID) + require.NoError(t, err) + err = oldFile.EndSync(ctx) require.NoError(t, err) @@ -778,6 +807,9 @@ func TestGenerateSyncDiffFromFile_GrantsOnly(t *testing.T) { }.Build()) require.NoError(t, err) + err = oldFile.SetSupportsDiff(ctx, oldSyncID) + require.NoError(t, err) + err = oldFile.EndSync(ctx) require.NoError(t, err) @@ -888,6 +920,9 @@ func TestGenerateSyncDiffFromFile_EmptyBase(t *testing.T) { // No resources added - empty sync + err = oldFile.SetSupportsDiff(ctx, oldSyncID) + require.NoError(t, err) + err = oldFile.EndSync(ctx) require.NoError(t, err) @@ -1003,6 +1038,9 @@ func TestGenerateSyncDiffFromFile_EmptyNew(t *testing.T) { }.Build()) require.NoError(t, err) + err = oldFile.SetSupportsDiff(ctx, oldSyncID) + require.NoError(t, err) + err = oldFile.EndSync(ctx) require.NoError(t, err) @@ -1108,6 +1146,9 @@ func TestGenerateSyncDiffFromFile_EntitlementsDeletions(t *testing.T) { }.Build()) require.NoError(t, err) + err = oldFile.SetSupportsDiff(ctx, oldSyncID) + require.NoError(t, err) + err = oldFile.EndSync(ctx) require.NoError(t, err) @@ -1200,6 +1241,9 @@ func TestGenerateSyncDiffFromFile_EntitlementsModifications(t *testing.T) { }.Build()) require.NoError(t, err) + err = oldFile.SetSupportsDiff(ctx, oldSyncID) + require.NoError(t, err) + err = oldFile.EndSync(ctx) require.NoError(t, err) @@ -1321,6 +1365,9 @@ func TestGenerateSyncDiffFromFile_GrantsDeletions(t *testing.T) { }.Build()) require.NoError(t, err) + err = oldFile.SetSupportsDiff(ctx, oldSyncID) + require.NoError(t, err) + err = oldFile.EndSync(ctx) require.NoError(t, err) @@ -1387,3 +1434,203 @@ func TestGenerateSyncDiffFromFile_GrantsDeletions(t *testing.T) { _ = oldFile.Close(ctx) _ = newFile.Close(ctx) } + +func TestGenerateSyncDiffFromFile_MissingExpansionMarker(t *testing.T) { + ctx := context.Background() + + oldPath := filepath.Join(c1zTests.workingDir, "diff_missing_marker_old.c1z") + newPath := filepath.Join(c1zTests.workingDir, "diff_missing_marker_new.c1z") + defer os.Remove(oldPath) + defer os.Remove(newPath) + + opts := []C1ZOption{WithPragma("journal_mode", "WAL")} + + // Create the OLD file with an expandable grant (but WITHOUT setting expansion marker) + oldFile, err := NewC1ZFile(ctx, oldPath, opts...) + require.NoError(t, err) + + oldSyncID, err := oldFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + + groupRT := v2.ResourceType_builder{Id: "group", DisplayName: "Group"}.Build() + userRT := v2.ResourceType_builder{Id: "user", DisplayName: "User"}.Build() + err = oldFile.PutResourceTypes(ctx, groupRT, userRT) + require.NoError(t, err) + + g1 := v2.Resource_builder{ + Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g1"}.Build(), + DisplayName: "G1", + }.Build() + g2 := v2.Resource_builder{ + Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g2"}.Build(), + DisplayName: "G2", + }.Build() + err = oldFile.PutResources(ctx, g1, g2) + require.NoError(t, err) + + e1 := v2.Entitlement_builder{ + Id: "group:g1:member", + Resource: g1, + Slug: "member", + DisplayName: "member", + }.Build() + e2 := v2.Entitlement_builder{ + Id: "group:g2:member", + Resource: g2, + Slug: "member", + DisplayName: "member", + }.Build() + err = oldFile.PutEntitlements(ctx, e1, e2) + require.NoError(t, err) + + // Create an expandable grant (g1 -> e2 with expansion annotation) + expandableGrant := v2.Grant_builder{ + Id: "grant:g1:e2", + Entitlement: e2, + Principal: g1, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{e1.GetId()}, + Shallow: false, + ResourceTypeIds: []string{"user"}, + }.Build()), + }.Build() + err = oldFile.PutGrants(ctx, expandableGrant) + require.NoError(t, err) + + // Simulate an old sync that predates supports_diff by explicitly clearing it. + // (New code defaults supports_diff=1 for newly created syncs.) + _, err = oldFile.db.ExecContext(ctx, "UPDATE "+syncRuns.Name()+" SET supports_diff=0 WHERE sync_id = ?", oldSyncID) + require.NoError(t, err) + + err = oldFile.EndSync(ctx) + require.NoError(t, err) + + // Create the NEW file (minimal, just needs to exist) + newFile, err := NewC1ZFile(ctx, newPath, opts...) + require.NoError(t, err) + + newSyncID, err := newFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + err = newFile.PutResourceTypes(ctx, groupRT, userRT) + require.NoError(t, err) + err = newFile.PutResources(ctx, g1, g2) + require.NoError(t, err) + err = newFile.PutEntitlements(ctx, e1, e2) + require.NoError(t, err) + err = newFile.EndSync(ctx) + require.NoError(t, err) + + // Attach and try to generate diff - should fail + attached, err := newFile.AttachFile(oldFile, "attached") + require.NoError(t, err) + + _, _, err = attached.GenerateSyncDiffFromFile(ctx, oldSyncID, newSyncID) + require.Error(t, err) + require.True(t, errors.Is(err, ErrOldSyncMissingExpansionMarker), "expected ErrOldSyncMissingExpansionMarker, got: %v", err) + + _, err = attached.DetachFile("attached") + require.NoError(t, err) + + _ = oldFile.Close(ctx) + _ = newFile.Close(ctx) +} + +func TestGenerateSyncDiffFromFile_WithExpansionMarker(t *testing.T) { + ctx := context.Background() + + oldPath := filepath.Join(c1zTests.workingDir, "diff_with_marker_old.c1z") + newPath := filepath.Join(c1zTests.workingDir, "diff_with_marker_new.c1z") + defer os.Remove(oldPath) + defer os.Remove(newPath) + + opts := []C1ZOption{WithPragma("journal_mode", "WAL")} + + // Create the OLD file with an expandable grant AND set expansion marker + oldFile, err := NewC1ZFile(ctx, oldPath, opts...) + require.NoError(t, err) + + oldSyncID, err := oldFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + + groupRT := v2.ResourceType_builder{Id: "group", DisplayName: "Group"}.Build() + userRT := v2.ResourceType_builder{Id: "user", DisplayName: "User"}.Build() + err = oldFile.PutResourceTypes(ctx, groupRT, userRT) + require.NoError(t, err) + + g1 := v2.Resource_builder{ + Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g1"}.Build(), + DisplayName: "G1", + }.Build() + g2 := v2.Resource_builder{ + Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g2"}.Build(), + DisplayName: "G2", + }.Build() + err = oldFile.PutResources(ctx, g1, g2) + require.NoError(t, err) + + e1 := v2.Entitlement_builder{ + Id: "group:g1:member", + Resource: g1, + Slug: "member", + DisplayName: "member", + }.Build() + e2 := v2.Entitlement_builder{ + Id: "group:g2:member", + Resource: g2, + Slug: "member", + DisplayName: "member", + }.Build() + err = oldFile.PutEntitlements(ctx, e1, e2) + require.NoError(t, err) + + // Create an expandable grant (g1 -> e2 with expansion annotation) + expandableGrant := v2.Grant_builder{ + Id: "grant:g1:e2", + Entitlement: e2, + Principal: g1, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{e1.GetId()}, + Shallow: false, + ResourceTypeIds: []string{"user"}, + }.Build()), + }.Build() + err = oldFile.PutGrants(ctx, expandableGrant) + require.NoError(t, err) + + // Set the expansion marker - simulates sync expanded with new code + err = oldFile.SetSupportsDiff(ctx, oldSyncID) + require.NoError(t, err) + + err = oldFile.EndSync(ctx) + require.NoError(t, err) + + // Create the NEW file (minimal, just needs to exist) + newFile, err := NewC1ZFile(ctx, newPath, opts...) + require.NoError(t, err) + + newSyncID, err := newFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + err = newFile.PutResourceTypes(ctx, groupRT, userRT) + require.NoError(t, err) + err = newFile.PutResources(ctx, g1, g2) + require.NoError(t, err) + err = newFile.PutEntitlements(ctx, e1, e2) + require.NoError(t, err) + err = newFile.EndSync(ctx) + require.NoError(t, err) + + // Attach and generate diff - should succeed + attached, err := newFile.AttachFile(oldFile, "attached") + require.NoError(t, err) + + upsertsSyncID, deletionsSyncID, err := attached.GenerateSyncDiffFromFile(ctx, oldSyncID, newSyncID) + require.NoError(t, err) + require.NotEmpty(t, upsertsSyncID) + require.NotEmpty(t, deletionsSyncID) + + _, err = attached.DetachFile("attached") + require.NoError(t, err) + + _ = oldFile.Close(ctx) + _ = newFile.Close(ctx) +} diff --git a/pkg/dotc1z/entitlements.go b/pkg/dotc1z/entitlements.go index 31b1aac67..90fb1e60b 100644 --- a/pkg/dotc1z/entitlements.go +++ b/pkg/dotc1z/entitlements.go @@ -49,6 +49,8 @@ func (r *entitlementsTable) Schema() (string, []interface{}) { } func (r *entitlementsTable) Migrations(ctx context.Context, db *goqu.Database) error { + _ = ctx + _ = db return nil } diff --git a/pkg/dotc1z/grants.go b/pkg/dotc1z/grants.go index c091d7471..faa681433 100644 --- a/pkg/dotc1z/grants.go +++ b/pkg/dotc1z/grants.go @@ -3,8 +3,10 @@ package dotc1z import ( "context" "fmt" + "strings" "github.com/doug-martin/goqu/v9" + "google.golang.org/protobuf/proto" v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" reader_v2 "github.com/conductorone/baton-sdk/pb/c1/reader/v2" @@ -22,6 +24,8 @@ create table if not exists %s ( principal_resource_type_id text not null, principal_resource_id text not null, external_id text not null, + expansion blob, -- Serialized GrantExpandable proto; NULL if grant is not expandable. + needs_expansion integer not null default 0, -- 1 if grant should be processed during expansion. data blob not null, sync_id text not null, discovered_at datetime not null @@ -59,8 +63,46 @@ func (r *grantsTable) Schema() (string, []any) { } } +// isAlreadyExistsError returns true if err is a SQLite "duplicate column name" error. +func isAlreadyExistsError(err error) bool { + return err != nil && strings.Contains(err.Error(), "duplicate column name") +} + func (r *grantsTable) Migrations(ctx context.Context, db *goqu.Database) error { - return nil + // Add expansion column if missing (for older files). + if _, err := db.ExecContext(ctx, fmt.Sprintf( + "alter table %s add column expansion blob", r.Name(), + )); err != nil && !isAlreadyExistsError(err) { + return err + } + + // Add needs_expansion column if missing. + if _, err := db.ExecContext(ctx, fmt.Sprintf( + "alter table %s add column needs_expansion integer not null default 0", r.Name(), + )); err != nil && !isAlreadyExistsError(err) { + return err + } + + // Create partial index for efficient queries on expandable grants. + if _, err := db.ExecContext(ctx, fmt.Sprintf( + "create index if not exists %s on %s (sync_id) where expansion is not null", + fmt.Sprintf("idx_grants_sync_expansion_v%s", r.Version()), + r.Name(), + )); err != nil { + return err + } + + // Create index for needs_expansion queries. + if _, err := db.ExecContext(ctx, fmt.Sprintf( + "create index if not exists %s on %s (sync_id, needs_expansion)", + fmt.Sprintf("idx_grants_sync_needs_expansion_v%s", r.Version()), + r.Name(), + )); err != nil { + return err + } + + // Backfill expansion column from stored grant bytes. + return backfillGrantExpansionColumn(ctx, db, r.Name()) } func (c *C1File) ListGrants(ctx context.Context, request *v2.GrantsServiceListGrantsRequest) (*v2.GrantsServiceListGrantsResponse, error) { @@ -154,14 +196,14 @@ func (c *C1File) PutGrants(ctx context.Context, bulkGrants ...*v2.Grant) error { ctx, span := tracer.Start(ctx, "C1File.PutGrants") defer span.End() - return c.putGrantsInternal(ctx, bulkPutConnectorObject, bulkGrants...) + return c.putGrantsInternal(ctx, bulkPutGrants, bulkGrants...) } func (c *C1File) PutGrantsIfNewer(ctx context.Context, bulkGrants ...*v2.Grant) error { ctx, span := tracer.Start(ctx, "C1File.PutGrantsIfNewer") defer span.End() - return c.putGrantsInternal(ctx, bulkPutConnectorObjectIfNewer, bulkGrants...) + return c.putGrantsInternal(ctx, bulkPutGrantsIfNewer, bulkGrants...) } type grantPutFunc func(context.Context, *C1File, string, func(m *v2.Grant) (goqu.Record, error), ...*v2.Grant) error @@ -171,17 +213,31 @@ func (c *C1File) putGrantsInternal(ctx context.Context, f grantPutFunc, bulkGran return ErrReadOnly } + // We intentionally do not mutate caller-owned grant objects. The write path strips + // GrantExpandable from the stored data blob, so operate on clones. + grantsToStore := make([]*v2.Grant, 0, len(bulkGrants)) + for _, g := range bulkGrants { + if g == nil { + continue + } + grantsToStore = append(grantsToStore, proto.Clone(g).(*v2.Grant)) + } + err := f(ctx, c, grants.Name(), func(grant *v2.Grant) (goqu.Record, error) { + expansionBytes, needsExpansion := extractAndStripExpansion(grant) + return goqu.Record{ "resource_type_id": grant.GetEntitlement().GetResource().GetId().GetResourceType(), "resource_id": grant.GetEntitlement().GetResource().GetId().GetResource(), "entitlement_id": grant.GetEntitlement().GetId(), "principal_resource_type_id": grant.GetPrincipal().GetId().GetResourceType(), "principal_resource_id": grant.GetPrincipal().GetId().GetResource(), + "expansion": expansionBytes, // nil for non-expandable grants + "needs_expansion": needsExpansion, }, nil }, - bulkGrants..., + grantsToStore..., ) if err != nil { return err @@ -190,6 +246,239 @@ func (c *C1File) putGrantsInternal(ctx context.Context, f grantPutFunc, bulkGran return nil } +// extractAndStripExpansion extracts the GrantExpandable annotation from the grant, +// removes it from the grant's annotations, and returns the serialized proto bytes. +// Returns (nil, false) if the grant has no valid GrantExpandable annotation. +func extractAndStripExpansion(grant *v2.Grant) ([]byte, bool) { + annos := annotations.Annotations(grant.GetAnnotations()) + expandable := &v2.GrantExpandable{} + ok, err := annos.Pick(expandable) + if err != nil || !ok || len(expandable.GetEntitlementIds()) == 0 { + return nil, false + } + + // Check that there's at least one non-whitespace entitlement ID. + hasValid := false + for _, id := range expandable.GetEntitlementIds() { + if strings.TrimSpace(id) != "" { + hasValid = true + break + } + } + if !hasValid { + return nil, false + } + + // Strip the GrantExpandable annotation from the grant by filtering it out. + filtered := annotations.Annotations{} + for _, a := range annos { + if !a.MessageIs(expandable) { + filtered = append(filtered, a) + } + } + grant.SetAnnotations(filtered) + + // Serialize the expandable annotation. + data, err := proto.Marshal(expandable) + if err != nil { + return nil, false + } + return data, true +} + +func backfillGrantExpansionColumn(ctx context.Context, db *goqu.Database, tableName string) error { + // Only backfill grants from syncs that don't support diff (old syncs created before + // this code change). New syncs set supports_diff=1 at creation and write grants with + // the expansion column populated correctly, so they don't need backfilling. + // + // The LIKE filter skips the 99%+ of rows that don't have expandable annotations, + // making this fast even on large tables. + for { + rows, err := db.QueryContext(ctx, fmt.Sprintf( + `SELECT g.id, g.data FROM %s g + JOIN %s sr ON g.sync_id = sr.sync_id + WHERE g.expansion IS NULL + AND g.data LIKE '%%GrantExpandable%%' + AND sr.supports_diff = 0 + LIMIT 1000`, + tableName, syncRuns.Name(), + )) + if err != nil { + return err + } + + type row struct { + id int64 + data []byte + } + batch := make([]row, 0, 1000) + for rows.Next() { + var r row + if err := rows.Scan(&r.id, &r.data); err != nil { + _ = rows.Close() + return err + } + batch = append(batch, r) + } + if err := rows.Err(); err != nil { + _ = rows.Close() + return err + } + _ = rows.Close() + + if len(batch) == 0 { + return nil + } + + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return err + } + + stmt, err := tx.PrepareContext(ctx, fmt.Sprintf( + `UPDATE %s SET expansion=?, needs_expansion=?, data=? WHERE id=?`, + tableName, + )) + if err != nil { + _ = tx.Rollback() + return err + } + + for _, r := range batch { + g := &v2.Grant{} + if err := proto.Unmarshal(r.data, g); err != nil { + _ = stmt.Close() + _ = tx.Rollback() + return err + } + + expansionBytes, needsExpansion := extractAndStripExpansion(g) + if expansionBytes == nil { + // Strip GrantExpandable so this row won't be retried forever. + annos := annotations.Annotations(g.GetAnnotations()) + filtered := annotations.Annotations{} + expandable := &v2.GrantExpandable{} + for _, a := range annos { + if !a.MessageIs(expandable) { + filtered = append(filtered, a) + } + } + g.SetAnnotations(filtered) + + newData, err := proto.Marshal(g) + if err != nil { + _ = stmt.Close() + _ = tx.Rollback() + return err + } + if _, err := stmt.ExecContext(ctx, nil, 0, newData, r.id); err != nil { + _ = stmt.Close() + _ = tx.Rollback() + return err + } + continue + } + + // Re-serialize the grant with the annotation stripped. + newData, err := proto.Marshal(g) + if err != nil { + _ = stmt.Close() + _ = tx.Rollback() + return err + } + + if _, err := stmt.ExecContext(ctx, expansionBytes, needsExpansion, newData, r.id); err != nil { + _ = stmt.Close() + _ = tx.Rollback() + return err + } + } + + _ = stmt.Close() + if err := tx.Commit(); err != nil { + return err + } + } +} + +func bulkPutGrants( + ctx context.Context, c *C1File, + tableName string, + extractFields func(m *v2.Grant) (goqu.Record, error), + msgs ...*v2.Grant, +) error { + return bulkPutGrantsInternal(ctx, c, tableName, extractFields, false, msgs...) +} + +func bulkPutGrantsIfNewer( + ctx context.Context, c *C1File, + tableName string, + extractFields func(m *v2.Grant) (goqu.Record, error), + msgs ...*v2.Grant, +) error { + return bulkPutGrantsInternal(ctx, c, tableName, extractFields, true, msgs...) +} + +func bulkPutGrantsInternal( + ctx context.Context, c *C1File, + tableName string, + extractFields func(m *v2.Grant) (goqu.Record, error), + ifNewer bool, + msgs ...*v2.Grant, +) error { + if len(msgs) == 0 { + return nil + } + ctx, span := tracer.Start(ctx, "C1File.bulkPutGrants") + defer span.End() + + if err := c.validateSyncDb(ctx); err != nil { + return err + } + + // Prepare rows. + rows, err := prepareConnectorObjectRows(c, msgs, extractFields) + if err != nil { + return err + } + + // needs_expansion should only flip to 1 when expansion changes from NULL to non-NULL. + // If a grant is no longer expandable (expansion IS NULL), needs_expansion should be forced to 0. + needsExpansionExpr := goqu.L( + `CASE + WHEN EXCLUDED.expansion IS NULL THEN 0 + WHEN ?.expansion IS NULL AND EXCLUDED.expansion IS NOT NULL THEN 1 + WHEN ?.expansion IS NOT NULL AND EXCLUDED.expansion IS NOT NULL AND ?.expansion != EXCLUDED.expansion THEN 1 + ELSE ?.needs_expansion + END`, + goqu.I(tableName), goqu.I(tableName), goqu.I(tableName), goqu.I(tableName), + ) + + buildQueryFn := func(insertDs *goqu.InsertDataset, chunkedRows []*goqu.Record) (*goqu.InsertDataset, error) { + update := goqu.Record{ + "data": goqu.I("EXCLUDED.data"), + "expansion": goqu.I("EXCLUDED.expansion"), + "needs_expansion": needsExpansionExpr, + } + if ifNewer { + update["discovered_at"] = goqu.I("EXCLUDED.discovered_at") + return insertDs. + OnConflict(goqu.DoUpdate("external_id, sync_id", update).Where( + goqu.L("EXCLUDED.discovered_at > ?.discovered_at", goqu.I(tableName)), + )). + Rows(chunkedRows). + Prepared(true), nil + } + + return insertDs. + OnConflict(goqu.DoUpdate("external_id, sync_id", update)). + Rows(chunkedRows). + Prepared(true), nil + } + + return executeChunkedInsert(ctx, c, tableName, rows, buildQueryFn) +} + func (c *C1File) DeleteGrant(ctx context.Context, grantId string) error { ctx, span := tracer.Start(ctx, "C1File.DeleteGrant") defer span.End() diff --git a/pkg/dotc1z/grants_diff_helpers.go b/pkg/dotc1z/grants_diff_helpers.go new file mode 100644 index 000000000..074a1c75a --- /dev/null +++ b/pkg/dotc1z/grants_diff_helpers.go @@ -0,0 +1,132 @@ +package dotc1z + +import ( + "context" + "fmt" + + "github.com/doug-martin/goqu/v9" +) + +// ListDistinctGrantEntitlementIDsForSync returns the set of entitlement IDs that appear in v1_grants for the given sync. +// This is used to efficiently seed incremental expansion invalidation from diff syncs. +func (c *C1File) ListDistinctGrantEntitlementIDsForSync(ctx context.Context, syncID string) ([]string, error) { + ctx, span := tracer.Start(ctx, "C1File.ListDistinctGrantEntitlementIDsForSync") + defer span.End() + + if err := c.validateDb(ctx); err != nil { + return nil, err + } + + q := c.db.From(grants.Name()).Prepared(true) + q = q.Select("entitlement_id").Distinct() + q = q.Where(goqu.C("sync_id").Eq(syncID)) + + query, args, err := q.ToSQL() + if err != nil { + return nil, err + } + + rows, err := c.db.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + out := make([]string, 0) + for rows.Next() { + var id string + if err := rows.Scan(&id); err != nil { + return nil, err + } + if id != "" { + out = append(out, id) + } + } + if err := rows.Err(); err != nil { + return nil, err + } + return out, nil +} + +// ListDistinctEntitlementIDsForSync returns the set of entitlement IDs that appear in v1_entitlements for the given sync. +func (c *C1File) ListDistinctEntitlementIDsForSync(ctx context.Context, syncID string) ([]string, error) { + ctx, span := tracer.Start(ctx, "C1File.ListDistinctEntitlementIDsForSync") + defer span.End() + + if err := c.validateDb(ctx); err != nil { + return nil, err + } + + q := c.db.From(entitlements.Name()).Prepared(true) + q = q.Select("external_id").Distinct() + q = q.Where(goqu.C("sync_id").Eq(syncID)) + + query, args, err := q.ToSQL() + if err != nil { + return nil, err + } + + rows, err := c.db.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + out := make([]string, 0) + for rows.Next() { + var id string + if err := rows.Scan(&id); err != nil { + return nil, err + } + if id != "" { + out = append(out, id) + } + } + if err := rows.Err(); err != nil { + return nil, err + } + return out, nil +} + +// ListDistinctResourceExternalIDsForSync returns external IDs that appear in v1_resources for the given sync. +func (c *C1File) ListDistinctResourceExternalIDsForSync(ctx context.Context, syncID string) ([]string, error) { + ctx, span := tracer.Start(ctx, "C1File.ListDistinctResourceExternalIDsForSync") + defer span.End() + + if err := c.validateDb(ctx); err != nil { + return nil, err + } + + q := c.db.From(resources.Name()).Prepared(true) + q = q.Select("external_id").Distinct() + q = q.Where(goqu.C("sync_id").Eq(syncID)) + + query, args, err := q.ToSQL() + if err != nil { + return nil, err + } + + rows, err := c.db.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + out := make([]string, 0) + for rows.Next() { + var id string + if err := rows.Scan(&id); err != nil { + return nil, err + } + if id != "" { + out = append(out, id) + } + } + if err := rows.Err(); err != nil { + return nil, err + } + return out, nil +} + +// sanity compile check: ensure tables referenced exist in this package. +var _ = fmt.Sprintf diff --git a/pkg/dotc1z/grants_expandable_query.go b/pkg/dotc1z/grants_expandable_query.go new file mode 100644 index 000000000..02be082d5 --- /dev/null +++ b/pkg/dotc1z/grants_expandable_query.go @@ -0,0 +1,198 @@ +package dotc1z + +import ( + "context" + "database/sql" + "fmt" + "strconv" + + "github.com/doug-martin/goqu/v9" + "google.golang.org/protobuf/proto" + + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" +) + +// ExpandableGrantDef is a lightweight representation of an expandable grant row, +// using queryable columns instead of unmarshalling the full grant proto. +type ExpandableGrantDef struct { + RowID int64 + GrantExternalID string + DstEntitlementID string + PrincipalResourceTypeID string + PrincipalResourceID string + SrcEntitlementIDs []string + Shallow bool + PrincipalResourceTypeIDs []string + NeedsExpansion bool +} + +type listExpandableGrantsOptions struct { + pageToken string + pageSize uint32 + needsExpansionOnly bool + syncID string +} + +type ListExpandableGrantsOption func(*listExpandableGrantsOptions) + +func WithExpandableGrantsPageToken(t string) ListExpandableGrantsOption { + return func(o *listExpandableGrantsOptions) { o.pageToken = t } +} + +func WithExpandableGrantsPageSize(n uint32) ListExpandableGrantsOption { + return func(o *listExpandableGrantsOptions) { o.pageSize = n } +} + +func WithExpandableGrantsNeedsExpansionOnly(b bool) ListExpandableGrantsOption { + return func(o *listExpandableGrantsOptions) { o.needsExpansionOnly = b } +} + +// WithExpandableGrantsSyncID forces listing expandable grants for a specific sync id. +// If omitted, we default to the current sync id, then view sync id, then latest finished sync. +func WithExpandableGrantsSyncID(syncID string) ListExpandableGrantsOption { + return func(o *listExpandableGrantsOptions) { o.syncID = syncID } +} + +// ListExpandableGrants lists expandable grants using the grants table's queryable columns. +// It avoids scanning/unmarshalling all grants. +func (c *C1File) ListExpandableGrants(ctx context.Context, opts ...ListExpandableGrantsOption) ([]*ExpandableGrantDef, string, error) { + ctx, span := tracer.Start(ctx, "C1File.ListExpandableGrants") + defer span.End() + + if err := c.validateDb(ctx); err != nil { + return nil, "", err + } + + o := &listExpandableGrantsOptions{} + for _, opt := range opts { + opt(o) + } + + syncID, err := c.resolveSyncIDForInternalQuery(ctx, o.syncID) + if err != nil { + return nil, "", err + } + + q := c.db.From(grants.Name()).Prepared(true) + q = q.Select( + "id", + "external_id", + "entitlement_id", + "principal_resource_type_id", + "principal_resource_id", + "expansion", + "needs_expansion", + ) + q = q.Where(goqu.C("sync_id").Eq(syncID)) + q = q.Where(goqu.C("expansion").IsNotNull()) + if o.needsExpansionOnly { + q = q.Where(goqu.C("needs_expansion").Eq(1)) + } + + if o.pageToken != "" { + // Page token is the grants table row ID. + id, err := strconv.ParseInt(o.pageToken, 10, 64) + if err != nil { + return nil, "", fmt.Errorf("invalid expandable grants page token %q: %w", o.pageToken, err) + } + q = q.Where(goqu.C("id").Gte(id)) + } + + pageSize := o.pageSize + if pageSize > maxPageSize || pageSize == 0 { + pageSize = maxPageSize + } + q = q.Order(goqu.C("id").Asc()).Limit(uint(pageSize + 1)) + + query, args, err := q.ToSQL() + if err != nil { + return nil, "", err + } + + rows, err := c.db.QueryContext(ctx, query, args...) + if err != nil { + return nil, "", err + } + defer rows.Close() + + defs := make([]*ExpandableGrantDef, 0, pageSize) + var ( + count uint32 + lastRow int64 + ) + for rows.Next() { + count++ + if count > pageSize { + break + } + + var ( + rowID int64 + externalID string + dstEntitlementID string + principalRTID string + principalRID string + expansionBlob []byte + needsExpansionInt int + ) + + if err := rows.Scan( + &rowID, + &externalID, + &dstEntitlementID, + &principalRTID, + &principalRID, + &expansionBlob, + &needsExpansionInt, + ); err != nil { + return nil, "", err + } + lastRow = rowID + + ge := &v2.GrantExpandable{} + if err := proto.Unmarshal(expansionBlob, ge); err != nil { + return nil, "", fmt.Errorf("invalid expansion data for %q: %w", externalID, err) + } + + defs = append(defs, &ExpandableGrantDef{ + RowID: rowID, + GrantExternalID: externalID, + DstEntitlementID: dstEntitlementID, + PrincipalResourceTypeID: principalRTID, + PrincipalResourceID: principalRID, + SrcEntitlementIDs: ge.GetEntitlementIds(), + Shallow: ge.GetShallow(), + PrincipalResourceTypeIDs: ge.GetResourceTypeIds(), + NeedsExpansion: needsExpansionInt != 0, + }) + } + if err := rows.Err(); err != nil { + return nil, "", err + } + + nextPageToken := "" + if count > pageSize { + nextPageToken = strconv.FormatInt(lastRow+1, 10) + } + return defs, nextPageToken, nil +} + +func (c *C1File) resolveSyncIDForInternalQuery(ctx context.Context, forced string) (string, error) { + switch { + case forced != "": + return forced, nil + case c.currentSyncID != "": + return c.currentSyncID, nil + case c.viewSyncID != "": + return c.viewSyncID, nil + default: + latest, err := c.getCachedViewSyncRun(ctx) + if err != nil { + return "", err + } + if latest == nil { + return "", sql.ErrNoRows + } + return latest.ID, nil + } +} diff --git a/pkg/dotc1z/grants_needs_expansion.go b/pkg/dotc1z/grants_needs_expansion.go new file mode 100644 index 000000000..38c7588a0 --- /dev/null +++ b/pkg/dotc1z/grants_needs_expansion.go @@ -0,0 +1,72 @@ +package dotc1z + +import ( + "context" + "fmt" + + "github.com/doug-martin/goqu/v9" +) + +// SetNeedsExpansionForGrants sets needs_expansion for the provided grant external IDs in the given sync. +func (c *C1File) SetNeedsExpansionForGrants(ctx context.Context, syncID string, grantExternalIDs []string, needsExpansion bool) error { + ctx, span := tracer.Start(ctx, "C1File.SetNeedsExpansionForGrants") + defer span.End() + + if c.readOnly { + return ErrReadOnly + } + if err := c.validateDb(ctx); err != nil { + return err + } + if len(grantExternalIDs) == 0 { + return nil + } + + val := 0 + if needsExpansion { + val = 1 + } + + q := c.db.Update(grants.Name()).Prepared(true) + q = q.Set(goqu.Record{"needs_expansion": val}) + q = q.Where(goqu.C("sync_id").Eq(syncID)) + q = q.Where(goqu.C("external_id").In(grantExternalIDs)) + + query, args, err := q.ToSQL() + if err != nil { + return err + } + if _, err := c.db.ExecContext(ctx, query, args...); err != nil { + return fmt.Errorf("set needs_expansion: %w", err) + } + c.dbUpdated = true + return nil +} + +// ClearNeedsExpansionForSync clears needs_expansion for all grants in the given sync. +func (c *C1File) ClearNeedsExpansionForSync(ctx context.Context, syncID string) error { + ctx, span := tracer.Start(ctx, "C1File.ClearNeedsExpansionForSync") + defer span.End() + + if c.readOnly { + return ErrReadOnly + } + if err := c.validateDb(ctx); err != nil { + return err + } + + q := c.db.Update(grants.Name()).Prepared(true) + q = q.Set(goqu.Record{"needs_expansion": 0}) + q = q.Where(goqu.C("sync_id").Eq(syncID)) + q = q.Where(goqu.C("needs_expansion").Eq(1)) + + query, args, err := q.ToSQL() + if err != nil { + return err + } + if _, err := c.db.ExecContext(ctx, query, args...); err != nil { + return fmt.Errorf("clear needs_expansion: %w", err) + } + c.dbUpdated = true + return nil +} diff --git a/pkg/dotc1z/grants_test.go b/pkg/dotc1z/grants_test.go new file mode 100644 index 000000000..1b17b73b8 --- /dev/null +++ b/pkg/dotc1z/grants_test.go @@ -0,0 +1,72 @@ +package dotc1z + +import ( + "testing" + + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/types/known/anypb" + + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" +) + +func TestExtractAndStripExpansion_WhitespaceOnlyEntitlementIDs(t *testing.T) { + // Create a GrantExpandable annotation with only whitespace entitlement IDs. + // This should return nil because there are no valid source entitlements. + expandable := v2.GrantExpandable_builder{ + EntitlementIds: []string{" ", "\t", " \n "}, + }.Build() + + expandableAny, err := anypb.New(expandable) + require.NoError(t, err) + + grant := v2.Grant_builder{ + Id: "test-grant", + Entitlement: v2.Entitlement_builder{ + Id: "test-entitlement", + }.Build(), + Principal: v2.Resource_builder{ + Id: v2.ResourceId_builder{ + ResourceType: "user", + Resource: "user1", + }.Build(), + }.Build(), + Annotations: []*anypb.Any{expandableAny}, + }.Build() + + expansionBytes, isExpandable := extractAndStripExpansion(grant) + require.False(t, isExpandable, "grant with only whitespace entitlement IDs should not be expandable") + require.Nil(t, expansionBytes, "expansion bytes should be nil for non-expandable grant") +} + +func TestExtractAndStripExpansion_MixedWhitespaceAndValidIDs(t *testing.T) { + // Create a GrantExpandable annotation with a mix of whitespace and valid IDs. + // The grant should still be expandable because there's at least one valid ID. + expandable := v2.GrantExpandable_builder{ + EntitlementIds: []string{" ", "valid-entitlement-id", "\t"}, + Shallow: true, + }.Build() + + expandableAny, err := anypb.New(expandable) + require.NoError(t, err) + + grant := v2.Grant_builder{ + Id: "test-grant", + Entitlement: v2.Entitlement_builder{ + Id: "test-entitlement", + }.Build(), + Principal: v2.Resource_builder{ + Id: v2.ResourceId_builder{ + ResourceType: "user", + Resource: "user1", + }.Build(), + }.Build(), + Annotations: []*anypb.Any{expandableAny}, + }.Build() + + expansionBytes, isExpandable := extractAndStripExpansion(grant) + require.True(t, isExpandable, "grant with valid entitlement ID should be expandable") + require.NotNil(t, expansionBytes, "expansion bytes should not be nil for expandable grant") + + // Verify that the annotation was stripped from the grant. + require.Len(t, grant.GetAnnotations(), 0, "GrantExpandable annotation should be stripped from grant") +} diff --git a/pkg/dotc1z/resouce_types.go b/pkg/dotc1z/resouce_types.go index ee02a8476..0b7732cc8 100644 --- a/pkg/dotc1z/resouce_types.go +++ b/pkg/dotc1z/resouce_types.go @@ -44,6 +44,8 @@ func (r *resourceTypesTable) Schema() (string, []interface{}) { } func (r *resourceTypesTable) Migrations(ctx context.Context, db *goqu.Database) error { + _ = ctx + _ = db return nil } diff --git a/pkg/dotc1z/resources.go b/pkg/dotc1z/resources.go index 7e3505622..c413846c4 100644 --- a/pkg/dotc1z/resources.go +++ b/pkg/dotc1z/resources.go @@ -54,6 +54,8 @@ func (r *resourcesTable) Schema() (string, []interface{}) { } func (r *resourcesTable) Migrations(ctx context.Context, db *goqu.Database) error { + _ = ctx + _ = db return nil } diff --git a/pkg/dotc1z/sql_helpers.go b/pkg/dotc1z/sql_helpers.go index 8528537b5..53f1a85ac 100644 --- a/pkg/dotc1z/sql_helpers.go +++ b/pkg/dotc1z/sql_helpers.go @@ -33,8 +33,8 @@ var allTableDescriptors = []tableDescriptor{ resourceTypes, resources, entitlements, + syncRuns, // Must be before grants since grants migration joins sync_runs. grants, - syncRuns, assets, sessionStore, } @@ -88,6 +88,16 @@ type hasParentResourceIdListRequest interface { GetParentResourceId() *v2.ResourceId } +type hasExpandableOnlyListRequest interface { + listRequest + GetExpandableOnly() bool +} + +type hasNeedsExpansionOnlyListRequest interface { + listRequest + GetNeedsExpansionOnly() bool +} + type protoHasID interface { proto.Message GetId() string @@ -146,7 +156,14 @@ func listConnectorObjects[T proto.Message](ctx context.Context, c *C1File, table } q := c.db.From(tableName).Prepared(true) - q = q.Select("id", "data") + // Grants are special-cased because GrantExpandable is stored in a separate SQL column. + // When listing grants, we re-attach the GrantExpandable annotation to the returned proto. + withExpansion := tableName == grants.Name() + if withExpansion { + q = q.Select("id", "data", "expansion") + } else { + q = q.Select("id", "data") + } // If the request allows filtering by resource type, apply the filter if resourceTypeReq, ok := req.(hasResourceTypeListRequest); ok { @@ -202,6 +219,14 @@ func listConnectorObjects[T proto.Message](ctx context.Context, c *C1File, table } } + if expandableReq, ok := req.(hasExpandableOnlyListRequest); ok && expandableReq.GetExpandableOnly() { + q = q.Where(goqu.C("expansion").IsNotNull()) + } + + if needsExpansionReq, ok := req.(hasNeedsExpansionOnlyListRequest); ok && needsExpansionReq.GetNeedsExpansionOnly() { + q = q.Where(goqu.C("needs_expansion").Eq(1)) + } + // If a sync is running, be sure we only select from the current values switch { case reqSyncID != "": @@ -270,15 +295,37 @@ func listConnectorObjects[T proto.Message](ctx context.Context, c *C1File, table if count > pageSize { break } - err := rows.Scan(&lastRow, &data) - if err != nil { - return nil, "", err + var expansionBytes []byte + if withExpansion { + // IMPORTANT: keep expansion scoped to this row. Some drivers may not overwrite a []byte + // destination on NULL, which would cause us to accidentally reuse bytes from a previous row. + expansionBytes = nil + err := rows.Scan(&lastRow, &data, &expansionBytes) + if err != nil { + return nil, "", err + } + } else { + err := rows.Scan(&lastRow, &data) + if err != nil { + return nil, "", err + } } t := factory() err = unmarshalerOptions.Unmarshal(data, t) if err != nil { return nil, "", err } + if withExpansion && len(expansionBytes) > 0 { + if g, ok := any(t).(*v2.Grant); ok { + expandable := &v2.GrantExpandable{} + if err := proto.Unmarshal(expansionBytes, expandable); err != nil { + return nil, "", fmt.Errorf("failed to unmarshal grant expansion: %w", err) + } + annos := annotations.Annotations(g.GetAnnotations()) + annos.Append(expandable) + g.SetAnnotations(annos) + } + } ret = append(ret, t) } if rows.Err() != nil { @@ -532,8 +579,9 @@ func bulkPutConnectorObject[T proto.Message]( // Define query building function buildQueryFn := func(insertDs *goqu.InsertDataset, chunkedRows []*goqu.Record) (*goqu.InsertDataset, error) { + update := goqu.Record{"data": goqu.I("EXCLUDED.data")} return insertDs. - OnConflict(goqu.DoUpdate("external_id, sync_id", goqu.C("data").Set(goqu.I("EXCLUDED.data")))). + OnConflict(goqu.DoUpdate("external_id, sync_id", update)). Rows(chunkedRows). Prepared(true), nil } @@ -567,12 +615,13 @@ func bulkPutConnectorObjectIfNewer[T proto.Message]( // Define query building function buildQueryFn := func(insertDs *goqu.InsertDataset, chunkedRows []*goqu.Record) (*goqu.InsertDataset, error) { + update := goqu.Record{ + "data": goqu.I("EXCLUDED.data"), + "discovered_at": goqu.I("EXCLUDED.discovered_at"), + } return insertDs. OnConflict(goqu.DoUpdate("external_id, sync_id", - goqu.Record{ - "data": goqu.I("EXCLUDED.data"), - "discovered_at": goqu.I("EXCLUDED.discovered_at"), - }).Where( + update).Where( goqu.L("EXCLUDED.discovered_at > ?.discovered_at", goqu.I(tableName)), )). Rows(chunkedRows). diff --git a/pkg/dotc1z/sync_runs.go b/pkg/dotc1z/sync_runs.go index 4426a3e57..4d72a7ea3 100644 --- a/pkg/dotc1z/sync_runs.go +++ b/pkg/dotc1z/sync_runs.go @@ -33,7 +33,8 @@ create table if not exists %s ( sync_token text not null, sync_type text not null default 'full', parent_sync_id text not null default '', - linked_sync_id text not null default '' + linked_sync_id text not null default '', + supports_diff integer not null default 0 ); create unique index if not exists %s on %s (sync_id);` @@ -97,6 +98,11 @@ func (r *syncRunsTable) Migrations(ctx context.Context, db *goqu.Database) error } } + // Add supports_diff column if missing (for older files). + if _, err = db.ExecContext(ctx, fmt.Sprintf("alter table %s add column supports_diff integer not null default 0", r.Name())); err != nil && !isAlreadyExistsError(err) { + return err + } + return nil } @@ -108,6 +114,7 @@ type syncRun struct { Type connectorstore.SyncType ParentSyncID string LinkedSyncID string + SupportsDiff bool } // getCachedViewSyncRun returns the cached sync run for read operations. @@ -159,7 +166,7 @@ func (c *C1File) getLatestUnfinishedSync(ctx context.Context, syncType connector oneWeekAgo := time.Now().AddDate(0, 0, -7) ret := &syncRun{} q := c.db.From(syncRuns.Name()) - q = q.Select("sync_id", "started_at", "ended_at", "sync_token", "sync_type", "parent_sync_id", "linked_sync_id") + q = q.Select("sync_id", "started_at", "ended_at", "sync_token", "sync_type", "parent_sync_id", "linked_sync_id", "supports_diff") q = q.Where(goqu.C("ended_at").IsNull()) q = q.Where(goqu.C("started_at").Gte(oneWeekAgo)) q = q.Order(goqu.C("started_at").Desc()) @@ -175,7 +182,7 @@ func (c *C1File) getLatestUnfinishedSync(ctx context.Context, syncType connector row := c.db.QueryRowContext(ctx, query, args...) - err = row.Scan(&ret.ID, &ret.StartedAt, &ret.EndedAt, &ret.SyncToken, &ret.Type, &ret.ParentSyncID, &ret.LinkedSyncID) + err = row.Scan(&ret.ID, &ret.StartedAt, &ret.EndedAt, &ret.SyncToken, &ret.Type, &ret.ParentSyncID, &ret.LinkedSyncID, &ret.SupportsDiff) if err != nil { if errors.Is(err, sql.ErrNoRows) { return nil, nil @@ -202,7 +209,7 @@ func (c *C1File) getFinishedSync(ctx context.Context, offset uint, syncType conn ret := &syncRun{} q := c.db.From(syncRuns.Name()) - q = q.Select("sync_id", "started_at", "ended_at", "sync_token", "sync_type", "parent_sync_id", "linked_sync_id") + q = q.Select("sync_id", "started_at", "ended_at", "sync_token", "sync_type", "parent_sync_id", "linked_sync_id", "supports_diff") q = q.Where(goqu.C("ended_at").IsNotNull()) if syncType != connectorstore.SyncTypeAny { q = q.Where(goqu.C("sync_type").Eq(syncType)) @@ -221,7 +228,7 @@ func (c *C1File) getFinishedSync(ctx context.Context, offset uint, syncType conn row := c.db.QueryRowContext(ctx, query, args...) - err = row.Scan(&ret.ID, &ret.StartedAt, &ret.EndedAt, &ret.SyncToken, &ret.Type, &ret.ParentSyncID, &ret.LinkedSyncID) + err = row.Scan(&ret.ID, &ret.StartedAt, &ret.EndedAt, &ret.SyncToken, &ret.Type, &ret.ParentSyncID, &ret.LinkedSyncID, &ret.SupportsDiff) if err != nil { if errors.Is(err, sql.ErrNoRows) { return nil, nil @@ -242,7 +249,7 @@ func (c *C1File) ListSyncRuns(ctx context.Context, pageToken string, pageSize ui } q := c.db.From(syncRuns.Name()).Prepared(true) - q = q.Select("id", "sync_id", "started_at", "ended_at", "sync_token", "sync_type", "parent_sync_id", "linked_sync_id") + q = q.Select("id", "sync_id", "started_at", "ended_at", "sync_token", "sync_type", "parent_sync_id", "linked_sync_id", "supports_diff") if pageToken != "" { q = q.Where(goqu.C("id").Gte(pageToken)) @@ -277,7 +284,7 @@ func (c *C1File) ListSyncRuns(ctx context.Context, pageToken string, pageSize ui } rowId := 0 data := &syncRun{} - err := rows.Scan(&rowId, &data.ID, &data.StartedAt, &data.EndedAt, &data.SyncToken, &data.Type, &data.ParentSyncID, &data.LinkedSyncID) + err := rows.Scan(&rowId, &data.ID, &data.StartedAt, &data.EndedAt, &data.SyncToken, &data.Type, &data.ParentSyncID, &data.LinkedSyncID, &data.SupportsDiff) if err != nil { return nil, "", err } @@ -366,7 +373,7 @@ func (c *C1File) getSync(ctx context.Context, syncID string) (*syncRun, error) { ret := &syncRun{} q := c.db.From(syncRuns.Name()) - q = q.Select("sync_id", "started_at", "ended_at", "sync_token", "sync_type", "parent_sync_id", "linked_sync_id") + q = q.Select("sync_id", "started_at", "ended_at", "sync_token", "sync_type", "parent_sync_id", "linked_sync_id", "supports_diff") q = q.Where(goqu.C("sync_id").Eq(syncID)) query, args, err := q.ToSQL() @@ -374,7 +381,7 @@ func (c *C1File) getSync(ctx context.Context, syncID string) (*syncRun, error) { return nil, err } row := c.db.QueryRowContext(ctx, query, args...) - err = row.Scan(&ret.ID, &ret.StartedAt, &ret.EndedAt, &ret.SyncToken, &ret.Type, &ret.ParentSyncID, &ret.LinkedSyncID) + err = row.Scan(&ret.ID, &ret.StartedAt, &ret.EndedAt, &ret.SyncToken, &ret.Type, &ret.ParentSyncID, &ret.LinkedSyncID, &ret.SupportsDiff) if err != nil { return nil, err } @@ -589,6 +596,8 @@ func (c *C1File) insertSyncRunWithLink(ctx context.Context, syncID string, syncT "sync_type": syncType, "parent_sync_id": parentSyncID, "linked_sync_id": linkedSyncID, + // New code writes SQL-layer metadata needed for diffs/expansion, so default to true. + "supports_diff": 1, }) query, args, err := q.ToSQL() @@ -658,6 +667,37 @@ func (c *C1File) endSyncRun(ctx context.Context, syncID string) error { return nil } +// SetSupportsDiff marks the given sync as supporting diff operations. +// This indicates the sync has SQL-layer grant metadata (is_expandable) properly populated. +func (c *C1File) SetSupportsDiff(ctx context.Context, syncID string) error { + ctx, span := tracer.Start(ctx, "C1File.SetSupportsDiff") + defer span.End() + + if c.readOnly { + return ErrReadOnly + } + + q := c.db.Update(syncRuns.Name()) + q = q.Set(goqu.Record{ + "supports_diff": 1, + }) + q = q.Where(goqu.C("sync_id").Eq(syncID)) + q = q.Where(goqu.C("supports_diff").Eq(0)) + + query, args, err := q.ToSQL() + if err != nil { + return err + } + + _, err = c.db.ExecContext(ctx, query, args...) + if err != nil { + return err + } + c.dbUpdated = true + + return nil +} + func (c *C1File) Cleanup(ctx context.Context) error { ctx, span := tracer.Start(ctx, "C1File.Cleanup") defer span.End() diff --git a/pkg/ratelimit/grpc.go b/pkg/ratelimit/grpc.go index cd6351cb7..411af3f6f 100644 --- a/pkg/ratelimit/grpc.go +++ b/pkg/ratelimit/grpc.go @@ -63,11 +63,13 @@ func getRatelimitDescriptors(ctx context.Context, method string, in interface{}, // ListEntitlements, ListGrants if req, ok := in.(hasResource); ok { - if resourceType := req.GetResource().GetId().GetResourceType(); resourceType != "" { - ret.SetEntries(append(ret.GetEntries(), ratelimitV1.RateLimitDescriptors_Entry_builder{ - Key: descriptorKeyConnectorResourceType, - Value: resourceType, - }.Build())) + if r := req.GetResource(); r != nil { + if resourceType := r.GetId().GetResourceType(); resourceType != "" { + ret.SetEntries(append(ret.GetEntries(), ratelimitV1.RateLimitDescriptors_Entry_builder{ + Key: descriptorKeyConnectorResourceType, + Value: resourceType, + }.Build())) + } } return ret } diff --git a/pkg/sync/expand/expand_benchmark_test.go b/pkg/sync/expand/expand_benchmark_test.go index 184892132..e39166ce7 100644 --- a/pkg/sync/expand/expand_benchmark_test.go +++ b/pkg/sync/expand/expand_benchmark_test.go @@ -2,6 +2,8 @@ package expand import ( "context" + "database/sql" + "errors" "fmt" "maps" "os" @@ -114,17 +116,23 @@ func loadEntitlementGraphFromC1Z(ctx context.Context, c1f *dotc1z.C1File) (*Enti EntitlementId: srcEntitlementID, }.Build()) if err != nil { - continue // Skip if source entitlement not found + // Only skip not-found entitlements; propagate other errors. + if errors.Is(err, sql.ErrNoRows) { + continue + } + return nil, fmt.Errorf("error fetching source entitlement %q: %w", srcEntitlementID, err) } graph.AddEntitlement(grant.GetEntitlement()) graph.AddEntitlement(srcEntitlement.GetEntitlement()) - _ = graph.AddEdge(ctx, + if err := graph.AddEdge(ctx, srcEntitlement.GetEntitlement().GetId(), grant.GetEntitlement().GetId(), expandable.GetShallow(), expandable.GetResourceTypeIds(), - ) + ); err != nil { + return nil, err + } } } diff --git a/pkg/sync/expand/graph.go b/pkg/sync/expand/graph.go index 4dce1bafd..6ff1051f3 100644 --- a/pkg/sync/expand/graph.go +++ b/pkg/sync/expand/graph.go @@ -202,6 +202,29 @@ func (g *EntitlementGraph) AddEntitlement(entitlement *v2.Entitlement) { g.EntitlementsToNodes[entitlement.GetId()] = node.Id } +// AddEntitlementID adds an entitlement ID as an unconnected node in the graph. +// This is a convenience for callers that already have entitlement IDs and do not +// want to fetch/unmarshal full entitlement protos. +func (g *EntitlementGraph) AddEntitlementID(entitlementID string) { + // If the entitlement is already in the graph, fail silently. + found := g.GetNode(entitlementID) + if found != nil { + return + } + g.HasNoCycles = false // Reset this since we're changing the graph. + + // Start at 1 in case we don't initialize something and try to get node 0. + g.NextNodeID++ + + node := Node{ + Id: g.NextNodeID, + EntitlementIDs: []string{entitlementID}, + } + + g.Nodes[node.Id] = node + g.EntitlementsToNodes[entitlementID] = node.Id +} + // GetEntitlements returns a combined list of _all_ entitlements from all nodes. func (g *EntitlementGraph) GetEntitlements() []string { var entitlements []string diff --git a/pkg/sync/incrementalexpansion/affected_subgraph.go b/pkg/sync/incrementalexpansion/affected_subgraph.go new file mode 100644 index 000000000..87b4d7af9 --- /dev/null +++ b/pkg/sync/incrementalexpansion/affected_subgraph.go @@ -0,0 +1,93 @@ +package incrementalexpansion + +import ( + "context" + "fmt" + + "github.com/conductorone/baton-sdk/pkg/dotc1z" +) + +// AffectedEntitlements computes the forward-closure of entitlements potentially impacted by an edge delta. +// Seeds include all src/dst entitlements that appear in Added/Removed edge sets. +// +// The closure is computed over the current edge set for targetSyncID (read from expandable-grant columns), +// following src -> dst direction. +func AffectedEntitlements(ctx context.Context, store expandableGrantLister, targetSyncID string, delta *EdgeDelta) (map[string]struct{}, error) { + if delta == nil { + return map[string]struct{}{}, nil + } + + adj, err := buildAdjacency(ctx, store, targetSyncID) + if err != nil { + return nil, err + } + + affected := make(map[string]struct{}, 256) + queue := make([]string, 0, 256) + + seed := func(entID string) { + if entID == "" { + return + } + if _, ok := affected[entID]; ok { + return + } + affected[entID] = struct{}{} + queue = append(queue, entID) + } + + for _, e := range delta.Added { + seed(e.SrcEntitlementID) + seed(e.DstEntitlementID) + } + for _, e := range delta.Removed { + seed(e.SrcEntitlementID) + seed(e.DstEntitlementID) + } + + for len(queue) > 0 { + u := queue[0] + queue = queue[1:] + for _, v := range adj[u] { + if _, ok := affected[v]; ok { + continue + } + affected[v] = struct{}{} + queue = append(queue, v) + } + } + + return affected, nil +} + +func buildAdjacency(ctx context.Context, store expandableGrantLister, syncID string) (map[string][]string, error) { + adj := make(map[string][]string, 1024) + pageToken := "" + for { + defs, next, err := store.ListExpandableGrants( + ctx, + dotc1z.WithExpandableGrantsSyncID(syncID), + dotc1z.WithExpandableGrantsPageToken(pageToken), + dotc1z.WithExpandableGrantsNeedsExpansionOnly(false), + ) + if err != nil { + return nil, fmt.Errorf("build adjacency: %w", err) + } + + for _, def := range defs { + dst := def.DstEntitlementID + for _, src := range def.SrcEntitlementIDs { + if src == "" || dst == "" { + continue + } + adj[src] = append(adj[src], dst) + } + } + + if next == "" { + break + } + pageToken = next + } + return adj, nil +} diff --git a/pkg/sync/incrementalexpansion/apply.go b/pkg/sync/incrementalexpansion/apply.go new file mode 100644 index 000000000..d97b4446e --- /dev/null +++ b/pkg/sync/incrementalexpansion/apply.go @@ -0,0 +1,105 @@ +package incrementalexpansion + +import ( + "context" + "fmt" + "os" + + "github.com/conductorone/baton-sdk/pkg/dotc1z" +) + +// ApplyIncrementalExpansionFromDiff applies invalidation + subgraph expansion to targetSyncID +// using paired diff syncs (upserts/deletions). +// +// This is intended to be called after the target sync has already had the base+diff data applied, +// and the diff syncs exist in the same file (so they can be read via ListExpandableGrants with forced sync IDs). +func ApplyIncrementalExpansionFromDiff(ctx context.Context, c1f *dotc1z.C1File, targetSyncID string, upsertsSyncID string, deletionsSyncID string) error { + delta, err := EdgeDeltaFromDiffSyncs(ctx, c1f, upsertsSyncID, deletionsSyncID) + if err != nil { + return err + } + if os.Getenv("BATON_DEBUG_INCREMENTAL") != "" { + fmt.Printf("incremental: delta added=%d removed=%d\n", len(delta.Added), len(delta.Removed)) + // Print a small sample for debugging. + i := 0 + for _, e := range delta.Added { + fmt.Printf(" added: %s -> %s\n", e.SrcEntitlementID, e.DstEntitlementID) + i++ + if i >= 10 { + break + } + } + i = 0 + for _, e := range delta.Removed { + fmt.Printf(" removed: %s -> %s\n", e.SrcEntitlementID, e.DstEntitlementID) + i++ + if i >= 10 { + break + } + } + } + + // Seed invalidation from any entitlement/resource/grant changes, not just edge-definition changes. + // If the set of grants for a source entitlement changes (including deletion), its propagated sources + // must be recomputed along outgoing edges. + changedSources := make(map[string]struct{}, 256) + addSeeds := func(ids []string) { + for _, id := range ids { + if id == "" { + continue + } + changedSources[id] = struct{}{} + } + } + + ids, err := c1f.ListDistinctGrantEntitlementIDsForSync(ctx, upsertsSyncID) + if err != nil { + return err + } + addSeeds(ids) + ids, err = c1f.ListDistinctGrantEntitlementIDsForSync(ctx, deletionsSyncID) + if err != nil { + return err + } + addSeeds(ids) + + // Entitlement deletions/updates should also seed invalidation, even if they had no grant changes. + ids, err = c1f.ListDistinctEntitlementIDsForSync(ctx, upsertsSyncID) + if err != nil { + return err + } + addSeeds(ids) + ids, err = c1f.ListDistinctEntitlementIDsForSync(ctx, deletionsSyncID) + if err != nil { + return err + } + addSeeds(ids) + + affected, err := AffectedEntitlements(ctx, c1f, targetSyncID, delta) + if err != nil { + return err + } + + if err := InvalidateRemovedEdges(ctx, c1f, targetSyncID, delta); err != nil { + return err + } + + if err := InvalidateChangedSourceEntitlements(ctx, c1f, targetSyncID, changedSources); err != nil { + return err + } + + // Include changed source entitlements in the affected closure for dirty marking. + for id := range changedSources { + affected[id] = struct{}{} + } + + if err := MarkNeedsExpansionForAffectedEdges(ctx, c1f, targetSyncID, affected); err != nil { + return err + } + + if err := ExpandDirtySubgraph(ctx, c1f, targetSyncID); err != nil { + return fmt.Errorf("expand dirty subgraph: %w", err) + } + + return nil +} diff --git a/pkg/sync/incrementalexpansion/edge_delta.go b/pkg/sync/incrementalexpansion/edge_delta.go new file mode 100644 index 000000000..f0ca871fb --- /dev/null +++ b/pkg/sync/incrementalexpansion/edge_delta.go @@ -0,0 +1,92 @@ +package incrementalexpansion + +import ( + "context" + "fmt" + "strings" + + "github.com/conductorone/baton-sdk/pkg/dotc1z" +) + +// Edge represents an expansion edge from a source entitlement to a descendant entitlement, +// with expansion constraints. +type Edge struct { + SrcEntitlementID string + DstEntitlementID string + Shallow bool + // PrincipalResourceTypeIDs is the filter applied when listing source grants to propagate. + PrincipalResourceTypeIDs []string +} + +func (e Edge) Key() string { + // Use an unlikely separator to avoid accidental collisions. + sep := "\x1f" + shallow := "0" + if e.Shallow { + shallow = "1" + } + return strings.Join([]string{ + e.SrcEntitlementID, + e.DstEntitlementID, + shallow, + strings.Join(e.PrincipalResourceTypeIDs, sep), + }, sep) +} + +type EdgeDelta struct { + Added map[string]Edge + Removed map[string]Edge +} + +type expandableGrantLister interface { + ListExpandableGrants(ctx context.Context, opts ...dotc1z.ListExpandableGrantsOption) ([]*dotc1z.ExpandableGrantDef, string, error) +} + +// EdgeDeltaFromDiffSyncs computes edge additions/removals from a paired diff sync: +// - upsertsSyncID contains NEW versions (adds + modifications) +// - deletionsSyncID contains OLD versions (deletes + OLD side of modifications) +// +// This function assumes diff generation inserts OLD versions of modified grants into the deletions sync. +func EdgeDeltaFromDiffSyncs(ctx context.Context, store expandableGrantLister, upsertsSyncID, deletionsSyncID string) (*EdgeDelta, error) { + added, err := edgeSetFromSync(ctx, store, upsertsSyncID) + if err != nil { + return nil, fmt.Errorf("edge delta: failed reading upserts sync %s: %w", upsertsSyncID, err) + } + removed, err := edgeSetFromSync(ctx, store, deletionsSyncID) + if err != nil { + return nil, fmt.Errorf("edge delta: failed reading deletions sync %s: %w", deletionsSyncID, err) + } + return &EdgeDelta{Added: added, Removed: removed}, nil +} + +func edgeSetFromSync(ctx context.Context, store expandableGrantLister, syncID string) (map[string]Edge, error) { + out := make(map[string]Edge) + pageToken := "" + for { + defs, next, err := store.ListExpandableGrants( + ctx, + dotc1z.WithExpandableGrantsSyncID(syncID), + dotc1z.WithExpandableGrantsPageToken(pageToken), + dotc1z.WithExpandableGrantsNeedsExpansionOnly(false), + ) + if err != nil { + return nil, err + } + for _, def := range defs { + for _, src := range def.SrcEntitlementIDs { + e := Edge{ + SrcEntitlementID: src, + DstEntitlementID: def.DstEntitlementID, + Shallow: def.Shallow, + PrincipalResourceTypeIDs: def.PrincipalResourceTypeIDs, + } + out[e.Key()] = e + } + } + if next == "" { + break + } + pageToken = next + } + return out, nil +} diff --git a/pkg/sync/incrementalexpansion/expand_dirty.go b/pkg/sync/incrementalexpansion/expand_dirty.go new file mode 100644 index 000000000..7c81bbd36 --- /dev/null +++ b/pkg/sync/incrementalexpansion/expand_dirty.go @@ -0,0 +1,99 @@ +package incrementalexpansion + +import ( + "context" + "database/sql" + "errors" + "fmt" + + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + reader_v2 "github.com/conductorone/baton-sdk/pb/c1/reader/v2" + "github.com/conductorone/baton-sdk/pkg/dotc1z" + "github.com/conductorone/baton-sdk/pkg/sync/expand" +) + +// ExpandDirtySubgraph loads only expandable edges marked needs_expansion=1 for syncID, +// runs the standard expander, then clears needs_expansion. +// +// NOTE: This expands only edges present in the loaded subgraph. Callers are responsible for +// marking the correct edge-defining grants as needs_expansion based on the affected subgraph. +func ExpandDirtySubgraph(ctx context.Context, c1f *dotc1z.C1File, syncID string) error { + if err := c1f.SetSyncID(ctx, syncID); err != nil { + return err + } + + // Mark the sync as supporting diff operations (SQL-layer data is ready). This is idempotent - subsequent calls are no-ops. + // The marker is used to detect syncs that expanded with older code that dropped annotations. + if err := c1f.SetSupportsDiff(ctx, syncID); err != nil { + return err + } + + graph := expand.NewEntitlementGraph(ctx) + + pageToken := "" + for { + defs, next, err := c1f.ListExpandableGrants( + ctx, + dotc1z.WithExpandableGrantsSyncID(syncID), + dotc1z.WithExpandableGrantsPageToken(pageToken), + dotc1z.WithExpandableGrantsNeedsExpansionOnly(true), + ) + if err != nil { + return err + } + + for _, def := range defs { + principalID := v2.ResourceId_builder{ + ResourceType: def.PrincipalResourceTypeID, + Resource: def.PrincipalResourceID, + }.Build() + + for _, srcEntitlementID := range def.SrcEntitlementIDs { + srcEntitlement, err := c1f.GetEntitlement(ctx, reader_v2.EntitlementsReaderServiceGetEntitlementRequest_builder{ + EntitlementId: srcEntitlementID, + }.Build()) + if err != nil { + // Only skip not-found entitlements; propagate other errors + // to avoid silently dropping edges and yielding incorrect expansions. + if errors.Is(err, sql.ErrNoRows) { + continue + } + return fmt.Errorf("error fetching source entitlement %q: %w", srcEntitlementID, err) + } + + sourceEntitlementResourceID := srcEntitlement.GetEntitlement().GetResource().GetId() + if sourceEntitlementResourceID == nil { + return fmt.Errorf("source entitlement resource id was nil") + } + if principalID.GetResourceType() != sourceEntitlementResourceID.GetResourceType() || + principalID.GetResource() != sourceEntitlementResourceID.GetResource() { + return fmt.Errorf("source entitlement resource id did not match grant principal id") + } + + graph.AddEntitlementID(def.DstEntitlementID) + graph.AddEntitlementID(srcEntitlementID) + if err := graph.AddEdge(ctx, srcEntitlementID, def.DstEntitlementID, def.Shallow, def.PrincipalResourceTypeIDs); err != nil { + return fmt.Errorf("error adding edge to graph: %w", err) + } + } + } + + if next == "" { + break + } + pageToken = next + } + graph.Loaded = true + + // Fix cycles before running expansion. + if err := graph.FixCycles(ctx); err != nil { + return err + } + + expander := expand.NewExpander(c1f, graph) + if err := expander.Run(ctx); err != nil { + return err + } + + return c1f.ClearNeedsExpansionForSync(ctx, syncID) +} diff --git a/pkg/sync/incrementalexpansion/expand_dirty_test.go b/pkg/sync/incrementalexpansion/expand_dirty_test.go new file mode 100644 index 000000000..58f8493d2 --- /dev/null +++ b/pkg/sync/incrementalexpansion/expand_dirty_test.go @@ -0,0 +1,175 @@ +package incrementalexpansion_test + +import ( + "context" + "path/filepath" + "testing" + + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + "github.com/conductorone/baton-sdk/pkg/annotations" + "github.com/conductorone/baton-sdk/pkg/connectorstore" + "github.com/conductorone/baton-sdk/pkg/dotc1z" + "github.com/conductorone/baton-sdk/pkg/sync/incrementalexpansion" + batonEntitlement "github.com/conductorone/baton-sdk/pkg/types/entitlement" + batonGrant "github.com/conductorone/baton-sdk/pkg/types/grant" + "github.com/stretchr/testify/require" +) + +// TestExpandDirtySubgraph_MissingSourceEntitlementSkipped verifies that when a grant +// references a source entitlement that doesn't exist (sql.ErrNoRows), the edge is +// skipped gracefully rather than causing an error. +func TestExpandDirtySubgraph_MissingSourceEntitlementSkipped(t *testing.T) { + ctx := context.Background() + tmpDir := t.TempDir() + dbPath := filepath.Join(tmpDir, "test.c1z") + + // Create test data + groupRT := v2.ResourceType_builder{Id: "group", DisplayName: "Group"}.Build() + userRT := v2.ResourceType_builder{Id: "user", DisplayName: "User"}.Build() + + g1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g1"}.Build(), DisplayName: "G1"}.Build() + g2 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g2"}.Build(), DisplayName: "G2"}.Build() + u1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "user", Resource: "u1"}.Build(), DisplayName: "U1"}.Build() + + // Only create e2 - e1 will be referenced but NOT exist + e2 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g2, "member"), Resource: g2, Slug: "member", DisplayName: "member"}.Build() + + // Direct grant U1 -> E2 + grantU1E2 := v2.Grant_builder{ + Id: batonGrant.NewGrantID(u1, e2), + Entitlement: e2, + Principal: u1, + }.Build() + + // Nesting grant G1 -> E2 referencing NON-EXISTENT entitlement "group:g1:member" + // The source entitlement e1 is never created, so GetEntitlement will return sql.ErrNoRows + nonExistentEntitlementID := batonEntitlement.NewEntitlementID(g1, "member") + grantG1E2 := v2.Grant_builder{ + Id: batonGrant.NewGrantID(g1, e2), + Entitlement: e2, + Principal: g1, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{nonExistentEntitlementID}, // References non-existent entitlement + Shallow: false, + ResourceTypeIds: []string{"user"}, + }.Build()), + }.Build() + + // Create the c1z file + c1f, err := dotc1z.NewC1ZFile(ctx, dbPath) + require.NoError(t, err) + defer c1f.Close(ctx) + + syncID, err := c1f.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, c1f.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, c1f.PutResources(ctx, g1, g2, u1)) + require.NoError(t, c1f.PutEntitlements(ctx, e2)) // Note: e1 is intentionally NOT added + require.NoError(t, c1f.PutGrants(ctx, grantU1E2, grantG1E2)) + require.NoError(t, c1f.EndSync(ctx)) + + // Mark expandable grants as needing expansion + require.NoError(t, c1f.SetNeedsExpansionForGrants(ctx, syncID, []string{grantG1E2.GetId()}, true)) + + // This should NOT error - it should skip the edge with the missing source entitlement + err = incrementalexpansion.ExpandDirtySubgraph(ctx, c1f, syncID) + require.NoError(t, err, "ExpandDirtySubgraph should skip missing source entitlements gracefully") + + // Verify the grant with the missing source wasn't somehow modified incorrectly + require.NoError(t, c1f.SetSyncID(ctx, "")) + require.NoError(t, c1f.ViewSync(ctx, syncID)) + + // Count grants - should still have original 2 grants (no derived grants created + // because the source entitlement doesn't exist) + grantCount := 0 + pageToken := "" + for { + resp, err := c1f.ListGrants(ctx, v2.GrantsServiceListGrantsRequest_builder{PageToken: pageToken}.Build()) + require.NoError(t, err) + grantCount += len(resp.GetList()) + pageToken = resp.GetNextPageToken() + if pageToken == "" { + break + } + } + require.Equal(t, 2, grantCount, "should have exactly 2 grants (no derived grants from missing source)") +} + +// TestExpandDirtySubgraph_ValidSourceEntitlementWorks verifies that when a grant +// references a valid source entitlement, the edge is processed correctly. +func TestExpandDirtySubgraph_ValidSourceEntitlementWorks(t *testing.T) { + ctx := context.Background() + tmpDir := t.TempDir() + dbPath := filepath.Join(tmpDir, "test.c1z") + + // Create test data + groupRT := v2.ResourceType_builder{Id: "group", DisplayName: "Group"}.Build() + userRT := v2.ResourceType_builder{Id: "user", DisplayName: "User"}.Build() + + g1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g1"}.Build(), DisplayName: "G1"}.Build() + g2 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g2"}.Build(), DisplayName: "G2"}.Build() + u1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "user", Resource: "u1"}.Build(), DisplayName: "U1"}.Build() + + e1 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g1, "member"), Resource: g1, Slug: "member", DisplayName: "member"}.Build() + e2 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g2, "member"), Resource: g2, Slug: "member", DisplayName: "member"}.Build() + + // Direct grant U1 -> E1 + grantU1E1 := v2.Grant_builder{ + Id: batonGrant.NewGrantID(u1, e1), + Entitlement: e1, + Principal: u1, + }.Build() + + // Nesting grant G1 -> E2 referencing EXISTING entitlement e1 + grantG1E2 := v2.Grant_builder{ + Id: batonGrant.NewGrantID(g1, e2), + Entitlement: e2, + Principal: g1, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{e1.GetId()}, + Shallow: false, + ResourceTypeIds: []string{"user"}, + }.Build()), + }.Build() + + // Create the c1z file + c1f, err := dotc1z.NewC1ZFile(ctx, dbPath) + require.NoError(t, err) + defer c1f.Close(ctx) + + syncID, err := c1f.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, c1f.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, c1f.PutResources(ctx, g1, g2, u1)) + require.NoError(t, c1f.PutEntitlements(ctx, e1, e2)) // Both entitlements exist + require.NoError(t, c1f.PutGrants(ctx, grantU1E1, grantG1E2)) + require.NoError(t, c1f.EndSync(ctx)) + + // Mark expandable grants as needing expansion + require.NoError(t, c1f.SetNeedsExpansionForGrants(ctx, syncID, []string{grantG1E2.GetId()}, true)) + + // This should succeed and create derived grants + err = incrementalexpansion.ExpandDirtySubgraph(ctx, c1f, syncID) + require.NoError(t, err) + + // Verify derived grants were created + require.NoError(t, c1f.SetSyncID(ctx, "")) + require.NoError(t, c1f.ViewSync(ctx, syncID)) + + // Count grants - should have 3 grants now: + // 1. grantU1E1 (direct) + // 2. grantG1E2 (nesting) + // 3. derived grant U1 -> E2 (from expansion) + grantCount := 0 + pageToken := "" + for { + resp, err := c1f.ListGrants(ctx, v2.GrantsServiceListGrantsRequest_builder{PageToken: pageToken}.Build()) + require.NoError(t, err) + grantCount += len(resp.GetList()) + pageToken = resp.GetNextPageToken() + if pageToken == "" { + break + } + } + require.Equal(t, 3, grantCount, "should have 3 grants (2 original + 1 derived)") +} diff --git a/pkg/sync/incrementalexpansion/incremental_correctness_test.go b/pkg/sync/incrementalexpansion/incremental_correctness_test.go new file mode 100644 index 000000000..bd54b5b45 --- /dev/null +++ b/pkg/sync/incrementalexpansion/incremental_correctness_test.go @@ -0,0 +1,1886 @@ +package incrementalexpansion_test + +import ( + "context" + "path/filepath" + "testing" + + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + "github.com/conductorone/baton-sdk/pkg/annotations" + "github.com/conductorone/baton-sdk/pkg/connectorstore" + "github.com/conductorone/baton-sdk/pkg/dotc1z" + "github.com/conductorone/baton-sdk/pkg/sync/expand" + "github.com/conductorone/baton-sdk/pkg/sync/incrementalexpansion" + batonEntitlement "github.com/conductorone/baton-sdk/pkg/types/entitlement" + batonGrant "github.com/conductorone/baton-sdk/pkg/types/grant" + "github.com/stretchr/testify/require" +) + +func runFullExpansion(ctx context.Context, c1f *dotc1z.C1File, syncID string) error { + if err := c1f.SetSyncID(ctx, syncID); err != nil { + return err + } + + // Mark the sync as supporting diff operations (SQL-layer data is ready). + if err := c1f.SetSupportsDiff(ctx, syncID); err != nil { + return err + } + + graph := expand.NewEntitlementGraph(ctx) + + pageToken := "" + for { + defs, next, err := c1f.ListExpandableGrants( + ctx, + dotc1z.WithExpandableGrantsSyncID(syncID), + dotc1z.WithExpandableGrantsPageToken(pageToken), + dotc1z.WithExpandableGrantsNeedsExpansionOnly(false), + ) + if err != nil { + return err + } + for _, def := range defs { + for _, src := range def.SrcEntitlementIDs { + graph.AddEntitlementID(def.DstEntitlementID) + graph.AddEntitlementID(src) + if err := graph.AddEdge(ctx, src, def.DstEntitlementID, def.Shallow, def.PrincipalResourceTypeIDs); err != nil { + return err + } + } + } + if next == "" { + break + } + pageToken = next + } + + graph.Loaded = true + if err := graph.FixCycles(ctx); err != nil { + return err + } + + return expand.NewExpander(c1f, graph).Run(ctx) +} + +func loadGrantSourcesByKey(ctx context.Context, c1f *dotc1z.C1File, syncID string) (map[string]map[string]bool, error) { + // Ensure we're not in a "current sync" context (ViewSync rejects that). + err := c1f.SetSyncID(ctx, "") + if err != nil { + return nil, err + } + if err := c1f.ViewSync(ctx, syncID); err != nil { + return nil, err + } + + out := make(map[string]map[string]bool) + pageToken := "" + for { + resp, err := c1f.ListGrants(ctx, v2.GrantsServiceListGrantsRequest_builder{PageToken: pageToken}.Build()) + if err != nil { + return nil, err + } + for _, g := range resp.GetList() { + key := g.GetEntitlement().GetId() + "|" + g.GetPrincipal().GetId().GetResourceType() + "|" + g.GetPrincipal().GetId().GetResource() + srcs := make(map[string]bool) + for s := range g.GetSources().GetSources() { + srcs[s] = true + } + out[key] = srcs + } + pageToken = resp.GetNextPageToken() + if pageToken == "" { + break + } + } + return out, nil +} + +func TestIncrementalExpansion_RemovedEdgeDeletesDerivedGrant(t *testing.T) { + ctx := context.Background() + tmpDir := t.TempDir() + + oldPath := filepath.Join(tmpDir, "old.c1z") + newPath := filepath.Join(tmpDir, "new.c1z") + expectedPath := filepath.Join(tmpDir, "expected.c1z") + + // Common objects + groupRT := v2.ResourceType_builder{Id: "group", DisplayName: "Group"}.Build() + userRT := v2.ResourceType_builder{Id: "user", DisplayName: "User"}.Build() + + g1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g1"}.Build(), DisplayName: "G1"}.Build() + g2 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g2"}.Build(), DisplayName: "G2"}.Build() + u1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "user", Resource: "u1"}.Build(), DisplayName: "U1"}.Build() + + e1 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g1, "member"), Resource: g1, Slug: "member", DisplayName: "member"}.Build() + e2 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g2, "member"), Resource: g2, Slug: "member", DisplayName: "member"}.Build() + + // Direct grant U1 -> E1 + grantU1E1 := v2.Grant_builder{ + Id: batonGrant.NewGrantID(u1, e1), + Entitlement: e1, + Principal: u1, + }.Build() + + // Nesting grant G1 -> E2 defining edge E1 -> E2 + nestingGrantID := batonGrant.NewGrantID(g1, e2) + grantG1E2 := v2.Grant_builder{ + Id: nestingGrantID, + Entitlement: e2, + Principal: g1, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{e1.GetId()}, + Shallow: false, + ResourceTypeIds: []string{"user"}, + }.Build()), + }.Build() + + // OLD (expanded) + oldFile, err := dotc1z.NewC1ZFile(ctx, oldPath) + require.NoError(t, err) + defer oldFile.Close(ctx) + + oldSyncID, err := oldFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, oldFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, oldFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, oldFile.PutEntitlements(ctx, e1, e2)) + require.NoError(t, oldFile.PutGrants(ctx, grantU1E1, grantG1E2)) + require.NoError(t, oldFile.EndSync(ctx)) + + // Expand old sync in place + require.NoError(t, runFullExpansion(ctx, oldFile, oldSyncID)) + + // NEW starts as a copy of OLD expanded data, but without the nesting grant row. + newFile, err := dotc1z.NewC1ZFile(ctx, newPath) + require.NoError(t, err) + defer newFile.Close(ctx) + + newSyncID, err := newFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, newFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, newFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, newFile.PutEntitlements(ctx, e1, e2)) + + // Copy all grants from old expanded sync -> new sync, skipping nesting grant (edge removal). + require.NoError(t, oldFile.SetSyncID(ctx, "")) + require.NoError(t, oldFile.ViewSync(ctx, oldSyncID)) + pageToken := "" + for { + resp, err := oldFile.ListGrants(ctx, v2.GrantsServiceListGrantsRequest_builder{PageToken: pageToken}.Build()) + require.NoError(t, err) + for _, g := range resp.GetList() { + if g.GetId() == nestingGrantID { + continue + } + require.NoError(t, newFile.PutGrants(ctx, g)) + } + pageToken = resp.GetNextPageToken() + if pageToken == "" { + break + } + } + require.NoError(t, newFile.EndSync(ctx)) + + // Generate diff syncs: main=NEW, attached=OLD + attached, err := newFile.AttachFile(oldFile, "attached") + require.NoError(t, err) + upsertsSyncID, deletionsSyncID, err := attached.GenerateSyncDiffFromFile(ctx, oldSyncID, newSyncID) + require.NoError(t, err) + _, err = attached.DetachFile("attached") + require.NoError(t, err) + + // Incremental invalidation + expansion on NEW + require.NoError(t, incrementalexpansion.ApplyIncrementalExpansionFromDiff(ctx, newFile, newSyncID, upsertsSyncID, deletionsSyncID)) + + // EXPECTED: start from connector truth after removal (no nesting grant), then expand fully. + expectedFile, err := dotc1z.NewC1ZFile(ctx, expectedPath) + require.NoError(t, err) + defer expectedFile.Close(ctx) + + expectedSyncID, err := expectedFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, expectedFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, expectedFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, expectedFile.PutEntitlements(ctx, e1, e2)) + require.NoError(t, expectedFile.PutGrants(ctx, grantU1E1 /* no nesting */)) + require.NoError(t, expectedFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, expectedFile, expectedSyncID)) + + got, err := loadGrantSourcesByKey(ctx, newFile, newSyncID) + require.NoError(t, err) + want, err := loadGrantSourcesByKey(ctx, expectedFile, expectedSyncID) + require.NoError(t, err) + + require.Equal(t, want, got) +} + +func TestIncrementalExpansion_RemovedEdgeRemovesOnlyOneSource(t *testing.T) { + ctx := context.Background() + tmpDir := t.TempDir() + + oldPath := filepath.Join(tmpDir, "old2.c1z") + newPath := filepath.Join(tmpDir, "new2.c1z") + expectedPath := filepath.Join(tmpDir, "expected2.c1z") + + groupRT := v2.ResourceType_builder{Id: "group", DisplayName: "Group"}.Build() + userRT := v2.ResourceType_builder{Id: "user", DisplayName: "User"}.Build() + + g1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g1"}.Build(), DisplayName: "G1"}.Build() + g2 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g2"}.Build(), DisplayName: "G2"}.Build() + g3 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g3"}.Build(), DisplayName: "G3"}.Build() + u1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "user", Resource: "u1"}.Build(), DisplayName: "U1"}.Build() + + e1 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g1, "member"), Resource: g1, Slug: "member", DisplayName: "member"}.Build() + e2 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g2, "member"), Resource: g2, Slug: "member", DisplayName: "member"}.Build() + e3 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g3, "member"), Resource: g3, Slug: "member", DisplayName: "member"}.Build() + + grantU1E1 := v2.Grant_builder{Id: batonGrant.NewGrantID(u1, e1), Entitlement: e1, Principal: u1}.Build() + grantU1E3 := v2.Grant_builder{Id: batonGrant.NewGrantID(u1, e3), Entitlement: e3, Principal: u1}.Build() + + nestingGrantID1 := batonGrant.NewGrantID(g1, e2) + grantG1E2 := v2.Grant_builder{ + Id: nestingGrantID1, + Entitlement: e2, + Principal: g1, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{e1.GetId()}, + Shallow: false, + ResourceTypeIds: []string{"user"}, + }.Build()), + }.Build() + + nestingGrantID3 := batonGrant.NewGrantID(g3, e2) + grantG3E2 := v2.Grant_builder{ + Id: nestingGrantID3, + Entitlement: e2, + Principal: g3, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{e3.GetId()}, + Shallow: false, + ResourceTypeIds: []string{"user"}, + }.Build()), + }.Build() + + oldFile, err := dotc1z.NewC1ZFile(ctx, oldPath) + require.NoError(t, err) + defer oldFile.Close(ctx) + + oldSyncID, err := oldFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, oldFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, oldFile.PutResources(ctx, g1, g2, g3, u1)) + require.NoError(t, oldFile.PutEntitlements(ctx, e1, e2, e3)) + require.NoError(t, oldFile.PutGrants(ctx, grantU1E1, grantU1E3, grantG1E2, grantG3E2)) + require.NoError(t, oldFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, oldFile, oldSyncID)) + + newFile, err := dotc1z.NewC1ZFile(ctx, newPath) + require.NoError(t, err) + defer newFile.Close(ctx) + + newSyncID, err := newFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, newFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, newFile.PutResources(ctx, g1, g2, g3, u1)) + require.NoError(t, newFile.PutEntitlements(ctx, e1, e2, e3)) + + require.NoError(t, oldFile.SetSyncID(ctx, "")) + require.NoError(t, oldFile.ViewSync(ctx, oldSyncID)) + pageToken := "" + for { + resp, err := oldFile.ListGrants(ctx, v2.GrantsServiceListGrantsRequest_builder{PageToken: pageToken}.Build()) + require.NoError(t, err) + for _, g := range resp.GetList() { + if g.GetId() == nestingGrantID1 { + continue + } + require.NoError(t, newFile.PutGrants(ctx, g)) + } + pageToken = resp.GetNextPageToken() + if pageToken == "" { + break + } + } + require.NoError(t, newFile.EndSync(ctx)) + + attached, err := newFile.AttachFile(oldFile, "attached") + require.NoError(t, err) + upsertsSyncID, deletionsSyncID, err := attached.GenerateSyncDiffFromFile(ctx, oldSyncID, newSyncID) + require.NoError(t, err) + _, err = attached.DetachFile("attached") + require.NoError(t, err) + + require.NoError(t, incrementalexpansion.ApplyIncrementalExpansionFromDiff(ctx, newFile, newSyncID, upsertsSyncID, deletionsSyncID)) + + expectedFile, err := dotc1z.NewC1ZFile(ctx, expectedPath) + require.NoError(t, err) + defer expectedFile.Close(ctx) + + expectedSyncID, err := expectedFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, expectedFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, expectedFile.PutResources(ctx, g1, g2, g3, u1)) + require.NoError(t, expectedFile.PutEntitlements(ctx, e1, e2, e3)) + require.NoError(t, expectedFile.PutGrants(ctx, grantU1E1, grantU1E3 /* no G1->E2 */, grantG3E2)) + require.NoError(t, expectedFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, expectedFile, expectedSyncID)) + + got, err := loadGrantSourcesByKey(ctx, newFile, newSyncID) + require.NoError(t, err) + want, err := loadGrantSourcesByKey(ctx, expectedFile, expectedSyncID) + require.NoError(t, err) + + require.Equal(t, want, got) +} + +func TestIncrementalExpansion_NoChangesIsNoop(t *testing.T) { + ctx := context.Background() + tmpDir := t.TempDir() + + oldPath := filepath.Join(tmpDir, "old_noop.c1z") + newPath := filepath.Join(tmpDir, "new_noop.c1z") + + groupRT := v2.ResourceType_builder{Id: "group", DisplayName: "Group"}.Build() + userRT := v2.ResourceType_builder{Id: "user", DisplayName: "User"}.Build() + + g1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g1"}.Build(), DisplayName: "G1"}.Build() + g2 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g2"}.Build(), DisplayName: "G2"}.Build() + u1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "user", Resource: "u1"}.Build(), DisplayName: "U1"}.Build() + + e1 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g1, "member"), Resource: g1, Slug: "member", DisplayName: "member"}.Build() + e2 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g2, "member"), Resource: g2, Slug: "member", DisplayName: "member"}.Build() + + grantU1E1 := v2.Grant_builder{Id: batonGrant.NewGrantID(u1, e1), Entitlement: e1, Principal: u1}.Build() + grantG1E2 := v2.Grant_builder{ + Id: batonGrant.NewGrantID(g1, e2), + Entitlement: e2, + Principal: g1, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{e1.GetId()}, + Shallow: false, + ResourceTypeIds: []string{"user"}, + }.Build()), + }.Build() + + // OLD (expanded) + oldFile, err := dotc1z.NewC1ZFile(ctx, oldPath) + require.NoError(t, err) + defer oldFile.Close(ctx) + + oldSyncID, err := oldFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, oldFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, oldFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, oldFile.PutEntitlements(ctx, e1, e2)) + require.NoError(t, oldFile.PutGrants(ctx, grantU1E1, grantG1E2)) + require.NoError(t, oldFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, oldFile, oldSyncID)) + + // NEW is identical to OLD expanded state. + newFile, err := dotc1z.NewC1ZFile(ctx, newPath) + require.NoError(t, err) + defer newFile.Close(ctx) + + newSyncID, err := newFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, newFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, newFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, newFile.PutEntitlements(ctx, e1, e2)) + + require.NoError(t, oldFile.SetSyncID(ctx, "")) + require.NoError(t, oldFile.ViewSync(ctx, oldSyncID)) + pageToken := "" + for { + resp, err := oldFile.ListGrants(ctx, v2.GrantsServiceListGrantsRequest_builder{PageToken: pageToken}.Build()) + require.NoError(t, err) + for _, g := range resp.GetList() { + require.NoError(t, newFile.PutGrants(ctx, g)) + } + pageToken = resp.GetNextPageToken() + if pageToken == "" { + break + } + } + require.NoError(t, newFile.EndSync(ctx)) + + attached, err := newFile.AttachFile(oldFile, "attached") + require.NoError(t, err) + upsertsSyncID, deletionsSyncID, err := attached.GenerateSyncDiffFromFile(ctx, oldSyncID, newSyncID) + require.NoError(t, err) + _, err = attached.DetachFile("attached") + require.NoError(t, err) + + // Should be a no-op. + require.NoError(t, incrementalexpansion.ApplyIncrementalExpansionFromDiff(ctx, newFile, newSyncID, upsertsSyncID, deletionsSyncID)) + + got, err := loadGrantSourcesByKey(ctx, newFile, newSyncID) + require.NoError(t, err) + want, err := loadGrantSourcesByKey(ctx, oldFile, oldSyncID) + require.NoError(t, err) + require.Equal(t, want, got) +} + +func TestIncrementalExpansion_MultipleDisjointSubgraphs(t *testing.T) { + ctx := context.Background() + tmpDir := t.TempDir() + + oldPath := filepath.Join(tmpDir, "old_multi.c1z") + newPath := filepath.Join(tmpDir, "new_multi.c1z") + expectedPath := filepath.Join(tmpDir, "expected_multi.c1z") + + groupRT := v2.ResourceType_builder{Id: "group", DisplayName: "Group"}.Build() + userRT := v2.ResourceType_builder{Id: "user", DisplayName: "User"}.Build() + + // Subgraph A + ga1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "ga1"}.Build(), DisplayName: "GA1"}.Build() + ga2 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "ga2"}.Build(), DisplayName: "GA2"}.Build() + ua := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "user", Resource: "ua"}.Build(), DisplayName: "UA"}.Build() + + ea1 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(ga1, "member"), Resource: ga1, Slug: "member", DisplayName: "member"}.Build() + ea2 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(ga2, "member"), Resource: ga2, Slug: "member", DisplayName: "member"}.Build() + + grantUAEA1 := v2.Grant_builder{Id: batonGrant.NewGrantID(ua, ea1), Entitlement: ea1, Principal: ua}.Build() + nestingAID := batonGrant.NewGrantID(ga1, ea2) + grantGA1EA2 := v2.Grant_builder{ + Id: nestingAID, + Entitlement: ea2, + Principal: ga1, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{ea1.GetId()}, + Shallow: false, + ResourceTypeIds: []string{"user"}, + }.Build()), + }.Build() + + // Subgraph B + gb1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "gb1"}.Build(), DisplayName: "GB1"}.Build() + gb2 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "gb2"}.Build(), DisplayName: "GB2"}.Build() + ub := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "user", Resource: "ub"}.Build(), DisplayName: "UB"}.Build() + + eb1 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(gb1, "member"), Resource: gb1, Slug: "member", DisplayName: "member"}.Build() + eb2 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(gb2, "member"), Resource: gb2, Slug: "member", DisplayName: "member"}.Build() + + grantUBEB1 := v2.Grant_builder{Id: batonGrant.NewGrantID(ub, eb1), Entitlement: eb1, Principal: ub}.Build() + nestingBID := batonGrant.NewGrantID(gb1, eb2) + grantGB1EB2 := v2.Grant_builder{ + Id: nestingBID, + Entitlement: eb2, + Principal: gb1, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{eb1.GetId()}, + Shallow: false, + ResourceTypeIds: []string{"user"}, + }.Build()), + }.Build() + + // OLD (expanded) + oldFile, err := dotc1z.NewC1ZFile(ctx, oldPath) + require.NoError(t, err) + defer oldFile.Close(ctx) + + oldSyncID, err := oldFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, oldFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, oldFile.PutResources(ctx, ga1, ga2, ua, gb1, gb2, ub)) + require.NoError(t, oldFile.PutEntitlements(ctx, ea1, ea2, eb1, eb2)) + require.NoError(t, oldFile.PutGrants(ctx, grantUAEA1, grantGA1EA2, grantUBEB1, grantGB1EB2)) + require.NoError(t, oldFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, oldFile, oldSyncID)) + + // NEW is old expanded minus both nesting grants. + newFile, err := dotc1z.NewC1ZFile(ctx, newPath) + require.NoError(t, err) + defer newFile.Close(ctx) + + newSyncID, err := newFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, newFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, newFile.PutResources(ctx, ga1, ga2, ua, gb1, gb2, ub)) + require.NoError(t, newFile.PutEntitlements(ctx, ea1, ea2, eb1, eb2)) + + require.NoError(t, oldFile.SetSyncID(ctx, "")) + require.NoError(t, oldFile.ViewSync(ctx, oldSyncID)) + pageToken := "" + for { + resp, err := oldFile.ListGrants(ctx, v2.GrantsServiceListGrantsRequest_builder{PageToken: pageToken}.Build()) + require.NoError(t, err) + for _, g := range resp.GetList() { + if g.GetId() == nestingAID || g.GetId() == nestingBID { + continue + } + require.NoError(t, newFile.PutGrants(ctx, g)) + } + pageToken = resp.GetNextPageToken() + if pageToken == "" { + break + } + } + require.NoError(t, newFile.EndSync(ctx)) + + attached, err := newFile.AttachFile(oldFile, "attached") + require.NoError(t, err) + upsertsSyncID, deletionsSyncID, err := attached.GenerateSyncDiffFromFile(ctx, oldSyncID, newSyncID) + require.NoError(t, err) + _, err = attached.DetachFile("attached") + require.NoError(t, err) + + require.NoError(t, incrementalexpansion.ApplyIncrementalExpansionFromDiff(ctx, newFile, newSyncID, upsertsSyncID, deletionsSyncID)) + + // EXPECTED: connector truth after removal (no nesting grants), fully expanded. + expectedFile, err := dotc1z.NewC1ZFile(ctx, expectedPath) + require.NoError(t, err) + defer expectedFile.Close(ctx) + expectedSyncID, err := expectedFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, expectedFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, expectedFile.PutResources(ctx, ga1, ga2, ua, gb1, gb2, ub)) + require.NoError(t, expectedFile.PutEntitlements(ctx, ea1, ea2, eb1, eb2)) + require.NoError(t, expectedFile.PutGrants(ctx, grantUAEA1, grantUBEB1 /* no nesting */)) + require.NoError(t, expectedFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, expectedFile, expectedSyncID)) + + got, err := loadGrantSourcesByKey(ctx, newFile, newSyncID) + require.NoError(t, err) + want, err := loadGrantSourcesByKey(ctx, expectedFile, expectedSyncID) + require.NoError(t, err) + require.Equal(t, want, got) +} + +func TestIncrementalExpansion_EntitlementDeleted(t *testing.T) { + ctx := context.Background() + tmpDir := t.TempDir() + + oldPath := filepath.Join(tmpDir, "old_ent_deleted.c1z") + newPath := filepath.Join(tmpDir, "new_ent_deleted.c1z") + expectedPath := filepath.Join(tmpDir, "expected_ent_deleted.c1z") + + groupRT := v2.ResourceType_builder{Id: "group", DisplayName: "Group"}.Build() + userRT := v2.ResourceType_builder{Id: "user", DisplayName: "User"}.Build() + + // Chain: e1 -> e2 -> e3. Deleting e2 should remove propagated membership on e3. + g1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g1"}.Build(), DisplayName: "G1"}.Build() + g2 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g2"}.Build(), DisplayName: "G2"}.Build() + g3 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g3"}.Build(), DisplayName: "G3"}.Build() + u1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "user", Resource: "u1"}.Build(), DisplayName: "U1"}.Build() + + e1 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g1, "member"), Resource: g1, Slug: "member", DisplayName: "member"}.Build() + e2 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g2, "member"), Resource: g2, Slug: "member", DisplayName: "member"}.Build() + e3 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g3, "member"), Resource: g3, Slug: "member", DisplayName: "member"}.Build() + + grantU1E1 := v2.Grant_builder{Id: batonGrant.NewGrantID(u1, e1), Entitlement: e1, Principal: u1}.Build() + + grantG1E2 := v2.Grant_builder{ + Id: batonGrant.NewGrantID(g1, e2), + Entitlement: e2, + Principal: g1, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{e1.GetId()}, + Shallow: false, + ResourceTypeIds: []string{"user"}, + }.Build()), + }.Build() + + grantG2E3 := v2.Grant_builder{ + Id: batonGrant.NewGrantID(g2, e3), + Entitlement: e3, + Principal: g2, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{e2.GetId()}, + Shallow: false, + ResourceTypeIds: []string{"user"}, + }.Build()), + }.Build() + + // OLD (expanded) + oldFile, err := dotc1z.NewC1ZFile(ctx, oldPath) + require.NoError(t, err) + defer oldFile.Close(ctx) + + oldSyncID, err := oldFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, oldFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, oldFile.PutResources(ctx, g1, g2, g3, u1)) + require.NoError(t, oldFile.PutEntitlements(ctx, e1, e2, e3)) + require.NoError(t, oldFile.PutGrants(ctx, grantU1E1, grantG1E2, grantG2E3)) + require.NoError(t, oldFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, oldFile, oldSyncID)) + + // NEW is OLD expanded minus the e2 entitlement and any grants on e2. + newFile, err := dotc1z.NewC1ZFile(ctx, newPath) + require.NoError(t, err) + defer newFile.Close(ctx) + + newSyncID, err := newFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, newFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, newFile.PutResources(ctx, g1, g2, g3, u1)) + require.NoError(t, newFile.PutEntitlements(ctx, e1 /* e2 deleted */, e3)) + + require.NoError(t, oldFile.SetSyncID(ctx, "")) + require.NoError(t, oldFile.ViewSync(ctx, oldSyncID)) + pageToken := "" + for { + resp, err := oldFile.ListGrants(ctx, v2.GrantsServiceListGrantsRequest_builder{PageToken: pageToken}.Build()) + require.NoError(t, err) + for _, g := range resp.GetList() { + if g.GetEntitlement().GetId() == e2.GetId() { + continue + } + require.NoError(t, newFile.PutGrants(ctx, g)) + } + pageToken = resp.GetNextPageToken() + if pageToken == "" { + break + } + } + require.NoError(t, newFile.EndSync(ctx)) + + attached, err := newFile.AttachFile(oldFile, "attached") + require.NoError(t, err) + upsertsSyncID, deletionsSyncID, err := attached.GenerateSyncDiffFromFile(ctx, oldSyncID, newSyncID) + require.NoError(t, err) + _, err = attached.DetachFile("attached") + require.NoError(t, err) + + require.NoError(t, incrementalexpansion.ApplyIncrementalExpansionFromDiff(ctx, newFile, newSyncID, upsertsSyncID, deletionsSyncID)) + + // EXPECTED: connector truth with e2 removed (no grants on e2), expanded fully. + expectedFile, err := dotc1z.NewC1ZFile(ctx, expectedPath) + require.NoError(t, err) + defer expectedFile.Close(ctx) + expectedSyncID, err := expectedFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, expectedFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, expectedFile.PutResources(ctx, g1, g2, g3, u1)) + require.NoError(t, expectedFile.PutEntitlements(ctx, e1 /* e2 deleted */, e3)) + require.NoError(t, expectedFile.PutGrants(ctx, grantU1E1 /* no g1->e2 */, grantG2E3)) + require.NoError(t, expectedFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, expectedFile, expectedSyncID)) + + got, err := loadGrantSourcesByKey(ctx, newFile, newSyncID) + require.NoError(t, err) + want, err := loadGrantSourcesByKey(ctx, expectedFile, expectedSyncID) + require.NoError(t, err) + require.Equal(t, want, got) +} + +func TestIncrementalExpansion_GrantNoLongerExpandable(t *testing.T) { + ctx := context.Background() + tmpDir := t.TempDir() + + oldPath := filepath.Join(tmpDir, "old_noexpand.c1z") + newPath := filepath.Join(tmpDir, "new_noexpand.c1z") + expectedPath := filepath.Join(tmpDir, "expected_noexpand.c1z") + + groupRT := v2.ResourceType_builder{Id: "group", DisplayName: "Group"}.Build() + userRT := v2.ResourceType_builder{Id: "user", DisplayName: "User"}.Build() + + g1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g1"}.Build(), DisplayName: "G1"}.Build() + g2 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g2"}.Build(), DisplayName: "G2"}.Build() + u1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "user", Resource: "u1"}.Build(), DisplayName: "U1"}.Build() + + e1 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g1, "member"), Resource: g1, Slug: "member", DisplayName: "member"}.Build() + e2 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g2, "member"), Resource: g2, Slug: "member", DisplayName: "member"}.Build() + + grantU1E1 := v2.Grant_builder{Id: batonGrant.NewGrantID(u1, e1), Entitlement: e1, Principal: u1}.Build() + nestingID := batonGrant.NewGrantID(g1, e2) + grantExpandable := v2.Grant_builder{ + Id: nestingID, + Entitlement: e2, + Principal: g1, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{e1.GetId()}, + Shallow: false, + ResourceTypeIds: []string{"user"}, + }.Build()), + }.Build() + grantNotExpandable := v2.Grant_builder{ + Id: nestingID, + Entitlement: e2, + Principal: g1, + Annotations: nil, // removed GrantExpandable + }.Build() + + // OLD (expanded) + oldFile, err := dotc1z.NewC1ZFile(ctx, oldPath) + require.NoError(t, err) + defer oldFile.Close(ctx) + oldSyncID, err := oldFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, oldFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, oldFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, oldFile.PutEntitlements(ctx, e1, e2)) + require.NoError(t, oldFile.PutGrants(ctx, grantU1E1, grantExpandable)) + require.NoError(t, oldFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, oldFile, oldSyncID)) + + // NEW is OLD expanded, but the nesting grant is still present and is no longer expandable. + newFile, err := dotc1z.NewC1ZFile(ctx, newPath) + require.NoError(t, err) + defer newFile.Close(ctx) + newSyncID, err := newFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, newFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, newFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, newFile.PutEntitlements(ctx, e1, e2)) + + require.NoError(t, oldFile.SetSyncID(ctx, "")) + require.NoError(t, oldFile.ViewSync(ctx, oldSyncID)) + pageToken := "" + for { + resp, err := oldFile.ListGrants(ctx, v2.GrantsServiceListGrantsRequest_builder{PageToken: pageToken}.Build()) + require.NoError(t, err) + for _, g := range resp.GetList() { + // Replace the edge-defining grant with the non-expandable version. + if g.GetId() == nestingID { + require.NoError(t, newFile.PutGrants(ctx, grantNotExpandable)) + continue + } + require.NoError(t, newFile.PutGrants(ctx, g)) + } + pageToken = resp.GetNextPageToken() + if pageToken == "" { + break + } + } + require.NoError(t, newFile.EndSync(ctx)) + + attached, err := newFile.AttachFile(oldFile, "attached") + require.NoError(t, err) + upsertsSyncID, deletionsSyncID, err := attached.GenerateSyncDiffFromFile(ctx, oldSyncID, newSyncID) + require.NoError(t, err) + _, err = attached.DetachFile("attached") + require.NoError(t, err) + + require.NoError(t, incrementalexpansion.ApplyIncrementalExpansionFromDiff(ctx, newFile, newSyncID, upsertsSyncID, deletionsSyncID)) + + // EXPECTED: connector truth with nesting grant non-expandable, expanded fully. + expectedFile, err := dotc1z.NewC1ZFile(ctx, expectedPath) + require.NoError(t, err) + defer expectedFile.Close(ctx) + expectedSyncID, err := expectedFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, expectedFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, expectedFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, expectedFile.PutEntitlements(ctx, e1, e2)) + require.NoError(t, expectedFile.PutGrants(ctx, grantU1E1, grantNotExpandable)) + require.NoError(t, expectedFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, expectedFile, expectedSyncID)) + + got, err := loadGrantSourcesByKey(ctx, newFile, newSyncID) + require.NoError(t, err) + want, err := loadGrantSourcesByKey(ctx, expectedFile, expectedSyncID) + require.NoError(t, err) + require.Equal(t, want, got) +} + +func TestIncrementalExpansion_ResourceDeleted(t *testing.T) { + ctx := context.Background() + tmpDir := t.TempDir() + + oldPath := filepath.Join(tmpDir, "old_res_deleted.c1z") + newPath := filepath.Join(tmpDir, "new_res_deleted.c1z") + expectedPath := filepath.Join(tmpDir, "expected_res_deleted.c1z") + + groupRT := v2.ResourceType_builder{Id: "group", DisplayName: "Group"}.Build() + userRT := v2.ResourceType_builder{Id: "user", DisplayName: "User"}.Build() + + // Chain: e1 -> e2 -> e3. Deleting resource g2 (and therefore entitlement e2) should remove propagated membership on e3. + g1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g1"}.Build(), DisplayName: "G1"}.Build() + g2 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g2"}.Build(), DisplayName: "G2"}.Build() + g3 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g3"}.Build(), DisplayName: "G3"}.Build() + u1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "user", Resource: "u1"}.Build(), DisplayName: "U1"}.Build() + + e1 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g1, "member"), Resource: g1, Slug: "member", DisplayName: "member"}.Build() + e2 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g2, "member"), Resource: g2, Slug: "member", DisplayName: "member"}.Build() + e3 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g3, "member"), Resource: g3, Slug: "member", DisplayName: "member"}.Build() + + grantU1E1 := v2.Grant_builder{Id: batonGrant.NewGrantID(u1, e1), Entitlement: e1, Principal: u1}.Build() + + grantG1E2 := v2.Grant_builder{ + Id: batonGrant.NewGrantID(g1, e2), + Entitlement: e2, + Principal: g1, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{e1.GetId()}, + Shallow: false, + ResourceTypeIds: []string{"user"}, + }.Build()), + }.Build() + + grantG2E3 := v2.Grant_builder{ + Id: batonGrant.NewGrantID(g2, e3), + Entitlement: e3, + Principal: g2, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{e2.GetId()}, + Shallow: false, + ResourceTypeIds: []string{"user"}, + }.Build()), + }.Build() + + // OLD (expanded) + oldFile, err := dotc1z.NewC1ZFile(ctx, oldPath) + require.NoError(t, err) + defer oldFile.Close(ctx) + + oldSyncID, err := oldFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, oldFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, oldFile.PutResources(ctx, g1, g2, g3, u1)) + require.NoError(t, oldFile.PutEntitlements(ctx, e1, e2, e3)) + require.NoError(t, oldFile.PutGrants(ctx, grantU1E1, grantG1E2, grantG2E3)) + require.NoError(t, oldFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, oldFile, oldSyncID)) + + // NEW is OLD expanded minus resource g2, entitlement e2, and any grants on e2. + newFile, err := dotc1z.NewC1ZFile(ctx, newPath) + require.NoError(t, err) + defer newFile.Close(ctx) + + newSyncID, err := newFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, newFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, newFile.PutResources(ctx, g1 /* g2 deleted */, g3, u1)) + require.NoError(t, newFile.PutEntitlements(ctx, e1 /* e2 deleted */, e3)) + + require.NoError(t, oldFile.SetSyncID(ctx, "")) + require.NoError(t, oldFile.ViewSync(ctx, oldSyncID)) + pageToken := "" + for { + resp, err := oldFile.ListGrants(ctx, v2.GrantsServiceListGrantsRequest_builder{PageToken: pageToken}.Build()) + require.NoError(t, err) + for _, g := range resp.GetList() { + if g.GetEntitlement().GetId() == e2.GetId() { + continue + } + require.NoError(t, newFile.PutGrants(ctx, g)) + } + pageToken = resp.GetNextPageToken() + if pageToken == "" { + break + } + } + require.NoError(t, newFile.EndSync(ctx)) + + attached, err := newFile.AttachFile(oldFile, "attached") + require.NoError(t, err) + upsertsSyncID, deletionsSyncID, err := attached.GenerateSyncDiffFromFile(ctx, oldSyncID, newSyncID) + require.NoError(t, err) + _, err = attached.DetachFile("attached") + require.NoError(t, err) + + require.NoError(t, incrementalexpansion.ApplyIncrementalExpansionFromDiff(ctx, newFile, newSyncID, upsertsSyncID, deletionsSyncID)) + + // EXPECTED: connector truth with g2/e2 removed (no grants on e2), expanded fully. + expectedFile, err := dotc1z.NewC1ZFile(ctx, expectedPath) + require.NoError(t, err) + defer expectedFile.Close(ctx) + expectedSyncID, err := expectedFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, expectedFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, expectedFile.PutResources(ctx, g1 /* g2 deleted */, g3, u1)) + require.NoError(t, expectedFile.PutEntitlements(ctx, e1 /* e2 deleted */, e3)) + require.NoError(t, expectedFile.PutGrants(ctx, grantU1E1 /* no g1->e2 */, grantG2E3)) + require.NoError(t, expectedFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, expectedFile, expectedSyncID)) + + got, err := loadGrantSourcesByKey(ctx, newFile, newSyncID) + require.NoError(t, err) + want, err := loadGrantSourcesByKey(ctx, expectedFile, expectedSyncID) + require.NoError(t, err) + require.Equal(t, want, got) +} + +func TestIncrementalExpansion_AddedEdgeCreatesNewDerivedGrant(t *testing.T) { + ctx := context.Background() + tmpDir := t.TempDir() + + oldPath := filepath.Join(tmpDir, "old_add_edge.c1z") + newPath := filepath.Join(tmpDir, "new_add_edge.c1z") + expectedPath := filepath.Join(tmpDir, "expected_add_edge.c1z") + + groupRT := v2.ResourceType_builder{Id: "group", DisplayName: "Group"}.Build() + userRT := v2.ResourceType_builder{Id: "user", DisplayName: "User"}.Build() + + g1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g1"}.Build(), DisplayName: "G1"}.Build() + g2 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g2"}.Build(), DisplayName: "G2"}.Build() + u1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "user", Resource: "u1"}.Build(), DisplayName: "U1"}.Build() + + e1 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g1, "member"), Resource: g1, Slug: "member", DisplayName: "member"}.Build() + e2 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g2, "member"), Resource: g2, Slug: "member", DisplayName: "member"}.Build() + + grantU1E1 := v2.Grant_builder{Id: batonGrant.NewGrantID(u1, e1), Entitlement: e1, Principal: u1}.Build() + + // Edge-defining grant to be added in NEW + grantG1E2 := v2.Grant_builder{ + Id: batonGrant.NewGrantID(g1, e2), + Entitlement: e2, + Principal: g1, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{e1.GetId()}, + Shallow: false, + ResourceTypeIds: []string{"user"}, + }.Build()), + }.Build() + + // OLD: just the direct grant, no edges, expanded (nothing to expand) + oldFile, err := dotc1z.NewC1ZFile(ctx, oldPath) + require.NoError(t, err) + defer oldFile.Close(ctx) + + oldSyncID, err := oldFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, oldFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, oldFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, oldFile.PutEntitlements(ctx, e1, e2)) + require.NoError(t, oldFile.PutGrants(ctx, grantU1E1)) + require.NoError(t, oldFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, oldFile, oldSyncID)) + + // NEW: same as OLD but with the new edge-defining grant added + newFile, err := dotc1z.NewC1ZFile(ctx, newPath) + require.NoError(t, err) + defer newFile.Close(ctx) + + newSyncID, err := newFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, newFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, newFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, newFile.PutEntitlements(ctx, e1, e2)) + + // Copy grants from OLD expanded state + require.NoError(t, oldFile.SetSyncID(ctx, "")) + require.NoError(t, oldFile.ViewSync(ctx, oldSyncID)) + pageToken := "" + for { + resp, err := oldFile.ListGrants(ctx, v2.GrantsServiceListGrantsRequest_builder{PageToken: pageToken}.Build()) + require.NoError(t, err) + for _, g := range resp.GetList() { + require.NoError(t, newFile.PutGrants(ctx, g)) + } + pageToken = resp.GetNextPageToken() + if pageToken == "" { + break + } + } + // Add the new edge-defining grant + require.NoError(t, newFile.PutGrants(ctx, grantG1E2)) + require.NoError(t, newFile.EndSync(ctx)) + + attached, err := newFile.AttachFile(oldFile, "attached") + require.NoError(t, err) + upsertsSyncID, deletionsSyncID, err := attached.GenerateSyncDiffFromFile(ctx, oldSyncID, newSyncID) + require.NoError(t, err) + _, err = attached.DetachFile("attached") + require.NoError(t, err) + + require.NoError(t, incrementalexpansion.ApplyIncrementalExpansionFromDiff(ctx, newFile, newSyncID, upsertsSyncID, deletionsSyncID)) + + // EXPECTED: connector truth with new edge, fully expanded + expectedFile, err := dotc1z.NewC1ZFile(ctx, expectedPath) + require.NoError(t, err) + defer expectedFile.Close(ctx) + expectedSyncID, err := expectedFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, expectedFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, expectedFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, expectedFile.PutEntitlements(ctx, e1, e2)) + require.NoError(t, expectedFile.PutGrants(ctx, grantU1E1, grantG1E2)) + require.NoError(t, expectedFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, expectedFile, expectedSyncID)) + + got, err := loadGrantSourcesByKey(ctx, newFile, newSyncID) + require.NoError(t, err) + want, err := loadGrantSourcesByKey(ctx, expectedFile, expectedSyncID) + require.NoError(t, err) + require.Equal(t, want, got) +} + +func TestIncrementalExpansion_AddedDirectGrantPropagates(t *testing.T) { + ctx := context.Background() + tmpDir := t.TempDir() + + oldPath := filepath.Join(tmpDir, "old_add_direct.c1z") + newPath := filepath.Join(tmpDir, "new_add_direct.c1z") + expectedPath := filepath.Join(tmpDir, "expected_add_direct.c1z") + + groupRT := v2.ResourceType_builder{Id: "group", DisplayName: "Group"}.Build() + userRT := v2.ResourceType_builder{Id: "user", DisplayName: "User"}.Build() + + g1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g1"}.Build(), DisplayName: "G1"}.Build() + g2 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g2"}.Build(), DisplayName: "G2"}.Build() + u1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "user", Resource: "u1"}.Build(), DisplayName: "U1"}.Build() + u2 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "user", Resource: "u2"}.Build(), DisplayName: "U2"}.Build() + + e1 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g1, "member"), Resource: g1, Slug: "member", DisplayName: "member"}.Build() + e2 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g2, "member"), Resource: g2, Slug: "member", DisplayName: "member"}.Build() + + grantU1E1 := v2.Grant_builder{Id: batonGrant.NewGrantID(u1, e1), Entitlement: e1, Principal: u1}.Build() + grantU2E1 := v2.Grant_builder{Id: batonGrant.NewGrantID(u2, e1), Entitlement: e1, Principal: u2}.Build() + + grantG1E2 := v2.Grant_builder{ + Id: batonGrant.NewGrantID(g1, e2), + Entitlement: e2, + Principal: g1, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{e1.GetId()}, + Shallow: false, + ResourceTypeIds: []string{"user"}, + }.Build()), + }.Build() + + // OLD: U1 → E1, edge E1 → E2, expanded (U1 → E2 derived) + oldFile, err := dotc1z.NewC1ZFile(ctx, oldPath) + require.NoError(t, err) + defer oldFile.Close(ctx) + + oldSyncID, err := oldFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, oldFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, oldFile.PutResources(ctx, g1, g2, u1, u2)) + require.NoError(t, oldFile.PutEntitlements(ctx, e1, e2)) + require.NoError(t, oldFile.PutGrants(ctx, grantU1E1, grantG1E2)) + require.NoError(t, oldFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, oldFile, oldSyncID)) + + // NEW: same as OLD expanded, plus new direct grant U2 → E1 + newFile, err := dotc1z.NewC1ZFile(ctx, newPath) + require.NoError(t, err) + defer newFile.Close(ctx) + + newSyncID, err := newFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, newFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, newFile.PutResources(ctx, g1, g2, u1, u2)) + require.NoError(t, newFile.PutEntitlements(ctx, e1, e2)) + + require.NoError(t, oldFile.SetSyncID(ctx, "")) + require.NoError(t, oldFile.ViewSync(ctx, oldSyncID)) + pageToken := "" + for { + resp, err := oldFile.ListGrants(ctx, v2.GrantsServiceListGrantsRequest_builder{PageToken: pageToken}.Build()) + require.NoError(t, err) + for _, g := range resp.GetList() { + require.NoError(t, newFile.PutGrants(ctx, g)) + } + pageToken = resp.GetNextPageToken() + if pageToken == "" { + break + } + } + // Add the new direct grant + require.NoError(t, newFile.PutGrants(ctx, grantU2E1)) + require.NoError(t, newFile.EndSync(ctx)) + + attached, err := newFile.AttachFile(oldFile, "attached") + require.NoError(t, err) + upsertsSyncID, deletionsSyncID, err := attached.GenerateSyncDiffFromFile(ctx, oldSyncID, newSyncID) + require.NoError(t, err) + _, err = attached.DetachFile("attached") + require.NoError(t, err) + + require.NoError(t, incrementalexpansion.ApplyIncrementalExpansionFromDiff(ctx, newFile, newSyncID, upsertsSyncID, deletionsSyncID)) + + // EXPECTED: connector truth with both direct grants, fully expanded (U2 → E2 derived) + expectedFile, err := dotc1z.NewC1ZFile(ctx, expectedPath) + require.NoError(t, err) + defer expectedFile.Close(ctx) + expectedSyncID, err := expectedFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, expectedFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, expectedFile.PutResources(ctx, g1, g2, u1, u2)) + require.NoError(t, expectedFile.PutEntitlements(ctx, e1, e2)) + require.NoError(t, expectedFile.PutGrants(ctx, grantU1E1, grantU2E1, grantG1E2)) + require.NoError(t, expectedFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, expectedFile, expectedSyncID)) + + got, err := loadGrantSourcesByKey(ctx, newFile, newSyncID) + require.NoError(t, err) + want, err := loadGrantSourcesByKey(ctx, expectedFile, expectedSyncID) + require.NoError(t, err) + require.Equal(t, want, got) +} + +func TestIncrementalExpansion_ShallowEdgeRemoval(t *testing.T) { + ctx := context.Background() + tmpDir := t.TempDir() + + oldPath := filepath.Join(tmpDir, "old_shallow.c1z") + newPath := filepath.Join(tmpDir, "new_shallow.c1z") + expectedPath := filepath.Join(tmpDir, "expected_shallow.c1z") + + groupRT := v2.ResourceType_builder{Id: "group", DisplayName: "Group"}.Build() + userRT := v2.ResourceType_builder{Id: "user", DisplayName: "User"}.Build() + + g1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g1"}.Build(), DisplayName: "G1"}.Build() + g2 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g2"}.Build(), DisplayName: "G2"}.Build() + u1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "user", Resource: "u1"}.Build(), DisplayName: "U1"}.Build() + + e1 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g1, "member"), Resource: g1, Slug: "member", DisplayName: "member"}.Build() + e2 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g2, "member"), Resource: g2, Slug: "member", DisplayName: "member"}.Build() + + grantU1E1 := v2.Grant_builder{Id: batonGrant.NewGrantID(u1, e1), Entitlement: e1, Principal: u1}.Build() + + // Shallow edge E1 -> E2 (only direct grants on E1 propagate) + nestingID := batonGrant.NewGrantID(g1, e2) + grantG1E2Shallow := v2.Grant_builder{ + Id: nestingID, + Entitlement: e2, + Principal: g1, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{e1.GetId()}, + Shallow: true, // shallow! + ResourceTypeIds: []string{"user"}, + }.Build()), + }.Build() + + // OLD: U1 → E1, shallow edge E1 → E2, expanded (U1 → E2 derived) + oldFile, err := dotc1z.NewC1ZFile(ctx, oldPath) + require.NoError(t, err) + defer oldFile.Close(ctx) + + oldSyncID, err := oldFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, oldFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, oldFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, oldFile.PutEntitlements(ctx, e1, e2)) + require.NoError(t, oldFile.PutGrants(ctx, grantU1E1, grantG1E2Shallow)) + require.NoError(t, oldFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, oldFile, oldSyncID)) + + // NEW: same as OLD expanded, but remove the shallow edge + newFile, err := dotc1z.NewC1ZFile(ctx, newPath) + require.NoError(t, err) + defer newFile.Close(ctx) + + newSyncID, err := newFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, newFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, newFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, newFile.PutEntitlements(ctx, e1, e2)) + + require.NoError(t, oldFile.SetSyncID(ctx, "")) + require.NoError(t, oldFile.ViewSync(ctx, oldSyncID)) + pageToken := "" + for { + resp, err := oldFile.ListGrants(ctx, v2.GrantsServiceListGrantsRequest_builder{PageToken: pageToken}.Build()) + require.NoError(t, err) + for _, g := range resp.GetList() { + if g.GetId() == nestingID { + continue // skip the shallow edge-defining grant + } + require.NoError(t, newFile.PutGrants(ctx, g)) + } + pageToken = resp.GetNextPageToken() + if pageToken == "" { + break + } + } + require.NoError(t, newFile.EndSync(ctx)) + + attached, err := newFile.AttachFile(oldFile, "attached") + require.NoError(t, err) + upsertsSyncID, deletionsSyncID, err := attached.GenerateSyncDiffFromFile(ctx, oldSyncID, newSyncID) + require.NoError(t, err) + _, err = attached.DetachFile("attached") + require.NoError(t, err) + + require.NoError(t, incrementalexpansion.ApplyIncrementalExpansionFromDiff(ctx, newFile, newSyncID, upsertsSyncID, deletionsSyncID)) + + // EXPECTED: connector truth without edge, fully expanded (no derived grants) + expectedFile, err := dotc1z.NewC1ZFile(ctx, expectedPath) + require.NoError(t, err) + defer expectedFile.Close(ctx) + expectedSyncID, err := expectedFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, expectedFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, expectedFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, expectedFile.PutEntitlements(ctx, e1, e2)) + require.NoError(t, expectedFile.PutGrants(ctx, grantU1E1 /* no edge */)) + require.NoError(t, expectedFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, expectedFile, expectedSyncID)) + + got, err := loadGrantSourcesByKey(ctx, newFile, newSyncID) + require.NoError(t, err) + want, err := loadGrantSourcesByKey(ctx, expectedFile, expectedSyncID) + require.NoError(t, err) + require.Equal(t, want, got) +} + +func TestIncrementalExpansion_DirectGrantRemovedFromSource(t *testing.T) { + ctx := context.Background() + tmpDir := t.TempDir() + + oldPath := filepath.Join(tmpDir, "old_remove_direct.c1z") + newPath := filepath.Join(tmpDir, "new_remove_direct.c1z") + expectedPath := filepath.Join(tmpDir, "expected_remove_direct.c1z") + + groupRT := v2.ResourceType_builder{Id: "group", DisplayName: "Group"}.Build() + userRT := v2.ResourceType_builder{Id: "user", DisplayName: "User"}.Build() + + g1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g1"}.Build(), DisplayName: "G1"}.Build() + g2 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g2"}.Build(), DisplayName: "G2"}.Build() + u1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "user", Resource: "u1"}.Build(), DisplayName: "U1"}.Build() + + e1 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g1, "member"), Resource: g1, Slug: "member", DisplayName: "member"}.Build() + e2 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g2, "member"), Resource: g2, Slug: "member", DisplayName: "member"}.Build() + + directGrantID := batonGrant.NewGrantID(u1, e1) + grantU1E1 := v2.Grant_builder{Id: directGrantID, Entitlement: e1, Principal: u1}.Build() + + grantG1E2 := v2.Grant_builder{ + Id: batonGrant.NewGrantID(g1, e2), + Entitlement: e2, + Principal: g1, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{e1.GetId()}, + Shallow: false, + ResourceTypeIds: []string{"user"}, + }.Build()), + }.Build() + + // OLD: U1 → E1, edge E1 → E2, expanded (U1 → E2 derived) + oldFile, err := dotc1z.NewC1ZFile(ctx, oldPath) + require.NoError(t, err) + defer oldFile.Close(ctx) + + oldSyncID, err := oldFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, oldFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, oldFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, oldFile.PutEntitlements(ctx, e1, e2)) + require.NoError(t, oldFile.PutGrants(ctx, grantU1E1, grantG1E2)) + require.NoError(t, oldFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, oldFile, oldSyncID)) + + // NEW: same as OLD expanded, but remove the direct grant U1 → E1 + newFile, err := dotc1z.NewC1ZFile(ctx, newPath) + require.NoError(t, err) + defer newFile.Close(ctx) + + newSyncID, err := newFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, newFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, newFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, newFile.PutEntitlements(ctx, e1, e2)) + + require.NoError(t, oldFile.SetSyncID(ctx, "")) + require.NoError(t, oldFile.ViewSync(ctx, oldSyncID)) + pageToken := "" + for { + resp, err := oldFile.ListGrants(ctx, v2.GrantsServiceListGrantsRequest_builder{PageToken: pageToken}.Build()) + require.NoError(t, err) + for _, g := range resp.GetList() { + if g.GetId() == directGrantID { + continue // skip the direct grant we're removing + } + require.NoError(t, newFile.PutGrants(ctx, g)) + } + pageToken = resp.GetNextPageToken() + if pageToken == "" { + break + } + } + require.NoError(t, newFile.EndSync(ctx)) + + attached, err := newFile.AttachFile(oldFile, "attached") + require.NoError(t, err) + upsertsSyncID, deletionsSyncID, err := attached.GenerateSyncDiffFromFile(ctx, oldSyncID, newSyncID) + require.NoError(t, err) + _, err = attached.DetachFile("attached") + require.NoError(t, err) + + require.NoError(t, incrementalexpansion.ApplyIncrementalExpansionFromDiff(ctx, newFile, newSyncID, upsertsSyncID, deletionsSyncID)) + + // EXPECTED: connector truth without the direct grant, fully expanded (U1 → E2 should be gone) + expectedFile, err := dotc1z.NewC1ZFile(ctx, expectedPath) + require.NoError(t, err) + defer expectedFile.Close(ctx) + expectedSyncID, err := expectedFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, expectedFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, expectedFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, expectedFile.PutEntitlements(ctx, e1, e2)) + require.NoError(t, expectedFile.PutGrants(ctx, grantG1E2 /* no U1 → E1 */)) + require.NoError(t, expectedFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, expectedFile, expectedSyncID)) + + got, err := loadGrantSourcesByKey(ctx, newFile, newSyncID) + require.NoError(t, err) + want, err := loadGrantSourcesByKey(ctx, expectedFile, expectedSyncID) + require.NoError(t, err) + require.Equal(t, want, got) +} + +func TestIncrementalExpansion_DirectGrantBecomesSourceless(t *testing.T) { + // When a direct grant (no GrantImmutable) loses all expansion sources, + // the grant should persist with sources=nil, matching a fresh full expansion. + // + // The expander adds a "self-source" during expansion to mark direct grants, + // but when all expansion sources are removed, we also remove the self-source + // to ensure incremental expansion produces the same result as full expansion. + + ctx := context.Background() + tmpDir := t.TempDir() + + oldPath := filepath.Join(tmpDir, "old_sourceless.c1z") + newPath := filepath.Join(tmpDir, "new_sourceless.c1z") + expectedPath := filepath.Join(tmpDir, "expected_sourceless.c1z") + + groupRT := v2.ResourceType_builder{Id: "group", DisplayName: "Group"}.Build() + userRT := v2.ResourceType_builder{Id: "user", DisplayName: "User"}.Build() + + g1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g1"}.Build(), DisplayName: "G1"}.Build() + g2 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g2"}.Build(), DisplayName: "G2"}.Build() + u1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "user", Resource: "u1"}.Build(), DisplayName: "U1"}.Build() + + e1 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g1, "member"), Resource: g1, Slug: "member", DisplayName: "member"}.Build() + e2 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g2, "member"), Resource: g2, Slug: "member", DisplayName: "member"}.Build() + + // U1 is directly a member of both E1 and E2 + grantU1E1 := v2.Grant_builder{Id: batonGrant.NewGrantID(u1, e1), Entitlement: e1, Principal: u1}.Build() + grantU1E2 := v2.Grant_builder{Id: batonGrant.NewGrantID(u1, e2), Entitlement: e2, Principal: u1}.Build() + + // Edge E1 → E2 via nesting grant + nestingID := batonGrant.NewGrantID(g1, e2) + grantG1E2 := v2.Grant_builder{ + Id: nestingID, + Entitlement: e2, + Principal: g1, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{e1.GetId()}, + Shallow: false, + ResourceTypeIds: []string{"user"}, + }.Build()), + }.Build() + + // OLD: U1 → E1 (direct), U1 → E2 (direct), edge E1 → E2 + // After expansion: U1 → E2 acquires sources={E1} + oldFile, err := dotc1z.NewC1ZFile(ctx, oldPath) + require.NoError(t, err) + defer oldFile.Close(ctx) + + oldSyncID, err := oldFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, oldFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, oldFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, oldFile.PutEntitlements(ctx, e1, e2)) + require.NoError(t, oldFile.PutGrants(ctx, grantU1E1, grantU1E2, grantG1E2)) + require.NoError(t, oldFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, oldFile, oldSyncID)) + + // NEW: same as OLD expanded, but remove the edge (nesting grant) + // U1 → E2 should lose sources but remain as a direct grant + newFile, err := dotc1z.NewC1ZFile(ctx, newPath) + require.NoError(t, err) + defer newFile.Close(ctx) + + newSyncID, err := newFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, newFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, newFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, newFile.PutEntitlements(ctx, e1, e2)) + + require.NoError(t, oldFile.SetSyncID(ctx, "")) + require.NoError(t, oldFile.ViewSync(ctx, oldSyncID)) + pageToken := "" + for { + resp, err := oldFile.ListGrants(ctx, v2.GrantsServiceListGrantsRequest_builder{PageToken: pageToken}.Build()) + require.NoError(t, err) + for _, g := range resp.GetList() { + if g.GetId() == nestingID { + continue // remove the edge + } + require.NoError(t, newFile.PutGrants(ctx, g)) + } + pageToken = resp.GetNextPageToken() + if pageToken == "" { + break + } + } + require.NoError(t, newFile.EndSync(ctx)) + + attached, err := newFile.AttachFile(oldFile, "attached") + require.NoError(t, err) + upsertsSyncID, deletionsSyncID, err := attached.GenerateSyncDiffFromFile(ctx, oldSyncID, newSyncID) + require.NoError(t, err) + _, err = attached.DetachFile("attached") + require.NoError(t, err) + + require.NoError(t, incrementalexpansion.ApplyIncrementalExpansionFromDiff(ctx, newFile, newSyncID, upsertsSyncID, deletionsSyncID)) + + // EXPECTED: U1 → E2 still exists (it's a direct grant), but with no sources + expectedFile, err := dotc1z.NewC1ZFile(ctx, expectedPath) + require.NoError(t, err) + defer expectedFile.Close(ctx) + expectedSyncID, err := expectedFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, expectedFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, expectedFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, expectedFile.PutEntitlements(ctx, e1, e2)) + require.NoError(t, expectedFile.PutGrants(ctx, grantU1E1, grantU1E2 /* no edge, U1→E2 persists without sources */)) + require.NoError(t, expectedFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, expectedFile, expectedSyncID)) + + got, err := loadGrantSourcesByKey(ctx, newFile, newSyncID) + require.NoError(t, err) + want, err := loadGrantSourcesByKey(ctx, expectedFile, expectedSyncID) + require.NoError(t, err) + require.Equal(t, want, got) +} + +func TestIncrementalExpansion_CycleEdgeRemoval(t *testing.T) { + // Test removing an edge in a cycle: E1 → E2 → E1 (bidirectional cycle) + // When one edge is removed, the cycle is broken. + ctx := context.Background() + tmpDir := t.TempDir() + + oldPath := filepath.Join(tmpDir, "old_cycle.c1z") + newPath := filepath.Join(tmpDir, "new_cycle.c1z") + expectedPath := filepath.Join(tmpDir, "expected_cycle.c1z") + + groupRT := v2.ResourceType_builder{Id: "group", DisplayName: "Group"}.Build() + userRT := v2.ResourceType_builder{Id: "user", DisplayName: "User"}.Build() + + g1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g1"}.Build(), DisplayName: "G1"}.Build() + g2 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g2"}.Build(), DisplayName: "G2"}.Build() + u1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "user", Resource: "u1"}.Build(), DisplayName: "U1"}.Build() + + e1 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g1, "member"), Resource: g1, Slug: "member", DisplayName: "member"}.Build() + e2 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g2, "member"), Resource: g2, Slug: "member", DisplayName: "member"}.Build() + + grantU1E1 := v2.Grant_builder{Id: batonGrant.NewGrantID(u1, e1), Entitlement: e1, Principal: u1}.Build() + + // Edge E1 → E2 + grantG1E2ID := batonGrant.NewGrantID(g1, e2) + grantG1E2 := v2.Grant_builder{ + Id: grantG1E2ID, + Entitlement: e2, + Principal: g1, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{e1.GetId()}, + Shallow: false, + ResourceTypeIds: []string{"user"}, + }.Build()), + }.Build() + + // Edge E2 → E1 (creates a cycle) + grantG2E1ID := batonGrant.NewGrantID(g2, e1) + grantG2E1 := v2.Grant_builder{ + Id: grantG2E1ID, + Entitlement: e1, + Principal: g2, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{e2.GetId()}, + Shallow: false, + ResourceTypeIds: []string{"user"}, + }.Build()), + }.Build() + + // OLD: U1 → E1, edges E1 → E2 and E2 → E1 (cycle) + oldFile, err := dotc1z.NewC1ZFile(ctx, oldPath) + require.NoError(t, err) + defer oldFile.Close(ctx) + + oldSyncID, err := oldFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, oldFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, oldFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, oldFile.PutEntitlements(ctx, e1, e2)) + require.NoError(t, oldFile.PutGrants(ctx, grantU1E1, grantG1E2, grantG2E1)) + require.NoError(t, oldFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, oldFile, oldSyncID)) + + // NEW: same as OLD expanded, but remove one edge (E2 → E1) to break the cycle + newFile, err := dotc1z.NewC1ZFile(ctx, newPath) + require.NoError(t, err) + defer newFile.Close(ctx) + + newSyncID, err := newFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, newFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, newFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, newFile.PutEntitlements(ctx, e1, e2)) + + require.NoError(t, oldFile.SetSyncID(ctx, "")) + require.NoError(t, oldFile.ViewSync(ctx, oldSyncID)) + pageToken := "" + for { + resp, err := oldFile.ListGrants(ctx, v2.GrantsServiceListGrantsRequest_builder{PageToken: pageToken}.Build()) + require.NoError(t, err) + for _, g := range resp.GetList() { + if g.GetId() == grantG2E1ID { + continue // remove E2 → E1 edge + } + require.NoError(t, newFile.PutGrants(ctx, g)) + } + pageToken = resp.GetNextPageToken() + if pageToken == "" { + break + } + } + require.NoError(t, newFile.EndSync(ctx)) + + attached, err := newFile.AttachFile(oldFile, "attached") + require.NoError(t, err) + upsertsSyncID, deletionsSyncID, err := attached.GenerateSyncDiffFromFile(ctx, oldSyncID, newSyncID) + require.NoError(t, err) + _, err = attached.DetachFile("attached") + require.NoError(t, err) + + require.NoError(t, incrementalexpansion.ApplyIncrementalExpansionFromDiff(ctx, newFile, newSyncID, upsertsSyncID, deletionsSyncID)) + + // EXPECTED: connector truth with only E1 → E2 edge (no cycle), fully expanded + expectedFile, err := dotc1z.NewC1ZFile(ctx, expectedPath) + require.NoError(t, err) + defer expectedFile.Close(ctx) + expectedSyncID, err := expectedFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, expectedFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, expectedFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, expectedFile.PutEntitlements(ctx, e1, e2)) + require.NoError(t, expectedFile.PutGrants(ctx, grantU1E1, grantG1E2 /* no E2→E1 */)) + require.NoError(t, expectedFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, expectedFile, expectedSyncID)) + + got, err := loadGrantSourcesByKey(ctx, newFile, newSyncID) + require.NoError(t, err) + want, err := loadGrantSourcesByKey(ctx, expectedFile, expectedSyncID) + require.NoError(t, err) + require.Equal(t, want, got) +} + +func TestIncrementalExpansion_PrincipalTypeFilterMismatch(t *testing.T) { + // Test that PrincipalResourceTypeIDs filter excludes non-matching principals. + // Edge E1 → E2 with filter ["user"] should only propagate user grants, not group grants. + ctx := context.Background() + tmpDir := t.TempDir() + + oldPath := filepath.Join(tmpDir, "old_filter.c1z") + newPath := filepath.Join(tmpDir, "new_filter.c1z") + expectedPath := filepath.Join(tmpDir, "expected_filter.c1z") + + groupRT := v2.ResourceType_builder{Id: "group", DisplayName: "Group"}.Build() + userRT := v2.ResourceType_builder{Id: "user", DisplayName: "User"}.Build() + + g1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g1"}.Build(), DisplayName: "G1"}.Build() + g2 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g2"}.Build(), DisplayName: "G2"}.Build() + g3 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g3"}.Build(), DisplayName: "G3"}.Build() + u1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "user", Resource: "u1"}.Build(), DisplayName: "U1"}.Build() + + e1 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g1, "member"), Resource: g1, Slug: "member", DisplayName: "member"}.Build() + e2 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g2, "member"), Resource: g2, Slug: "member", DisplayName: "member"}.Build() + + // U1 (user) → E1 - should propagate + grantU1E1 := v2.Grant_builder{Id: batonGrant.NewGrantID(u1, e1), Entitlement: e1, Principal: u1}.Build() + // G3 (group) → E1 - should NOT propagate (filter is ["user"]) + grantG3E1 := v2.Grant_builder{Id: batonGrant.NewGrantID(g3, e1), Entitlement: e1, Principal: g3}.Build() + + // Edge E1 → E2 with filter ["user"] + nestingID := batonGrant.NewGrantID(g1, e2) + grantG1E2 := v2.Grant_builder{ + Id: nestingID, + Entitlement: e2, + Principal: g1, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{e1.GetId()}, + Shallow: false, + ResourceTypeIds: []string{"user"}, // only users propagate + }.Build()), + }.Build() + + // OLD: U1 → E1, G3 → E1, edge E1 → E2 (filter=user) + // After expansion: only U1 → E2 (G3 is excluded by filter) + oldFile, err := dotc1z.NewC1ZFile(ctx, oldPath) + require.NoError(t, err) + defer oldFile.Close(ctx) + + oldSyncID, err := oldFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, oldFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, oldFile.PutResources(ctx, g1, g2, g3, u1)) + require.NoError(t, oldFile.PutEntitlements(ctx, e1, e2)) + require.NoError(t, oldFile.PutGrants(ctx, grantU1E1, grantG3E1, grantG1E2)) + require.NoError(t, oldFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, oldFile, oldSyncID)) + + // NEW: same as OLD expanded, but remove the edge + // The derived grant U1 → E2 should be deleted, but G3 → E1 should remain unchanged + newFile, err := dotc1z.NewC1ZFile(ctx, newPath) + require.NoError(t, err) + defer newFile.Close(ctx) + + newSyncID, err := newFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, newFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, newFile.PutResources(ctx, g1, g2, g3, u1)) + require.NoError(t, newFile.PutEntitlements(ctx, e1, e2)) + + require.NoError(t, oldFile.SetSyncID(ctx, "")) + require.NoError(t, oldFile.ViewSync(ctx, oldSyncID)) + pageToken := "" + for { + resp, err := oldFile.ListGrants(ctx, v2.GrantsServiceListGrantsRequest_builder{PageToken: pageToken}.Build()) + require.NoError(t, err) + for _, g := range resp.GetList() { + if g.GetId() == nestingID { + continue // remove the edge + } + require.NoError(t, newFile.PutGrants(ctx, g)) + } + pageToken = resp.GetNextPageToken() + if pageToken == "" { + break + } + } + require.NoError(t, newFile.EndSync(ctx)) + + attached, err := newFile.AttachFile(oldFile, "attached") + require.NoError(t, err) + upsertsSyncID, deletionsSyncID, err := attached.GenerateSyncDiffFromFile(ctx, oldSyncID, newSyncID) + require.NoError(t, err) + _, err = attached.DetachFile("attached") + require.NoError(t, err) + + require.NoError(t, incrementalexpansion.ApplyIncrementalExpansionFromDiff(ctx, newFile, newSyncID, upsertsSyncID, deletionsSyncID)) + + // EXPECTED: connector truth without edge, fully expanded + // U1 → E1 remains, G3 → E1 remains, no derived grants + expectedFile, err := dotc1z.NewC1ZFile(ctx, expectedPath) + require.NoError(t, err) + defer expectedFile.Close(ctx) + expectedSyncID, err := expectedFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, expectedFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, expectedFile.PutResources(ctx, g1, g2, g3, u1)) + require.NoError(t, expectedFile.PutEntitlements(ctx, e1, e2)) + require.NoError(t, expectedFile.PutGrants(ctx, grantU1E1, grantG3E1 /* no edge */)) + require.NoError(t, expectedFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, expectedFile, expectedSyncID)) + + got, err := loadGrantSourcesByKey(ctx, newFile, newSyncID) + require.NoError(t, err) + want, err := loadGrantSourcesByKey(ctx, expectedFile, expectedSyncID) + require.NoError(t, err) + require.Equal(t, want, got) +} + +// TestIncrementalExpansion_FullPartialCompactDiff tests the realistic workflow: +// 1. Full sync with full expansion +// 2. First partial sync (new data, no expansion) +// 3. Compaction (merge partial into full) +// 4. Second partial sync (more new data, no expansion) +// 5. Compaction (merge second partial into full) +// 6. Generate diff between old full and compacted +// 7. Apply incremental expansion using the diff +// 8. Compare against fresh full expansion of the compacted state. +func TestIncrementalExpansion_FullPartialCompactDiff(t *testing.T) { + ctx := context.Background() + tmpDir := t.TempDir() + + basePath := filepath.Join(tmpDir, "base.c1z") + partial1Path := filepath.Join(tmpDir, "partial1.c1z") + partial2Path := filepath.Join(tmpDir, "partial2.c1z") + expectedPath := filepath.Join(tmpDir, "expected.c1z") + + groupRT := v2.ResourceType_builder{Id: "group", DisplayName: "Group"}.Build() + userRT := v2.ResourceType_builder{Id: "user", DisplayName: "User"}.Build() + + // Resources + g1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g1"}.Build(), DisplayName: "G1"}.Build() + g2 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "group", Resource: "g2"}.Build(), DisplayName: "G2"}.Build() + u1 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "user", Resource: "u1"}.Build(), DisplayName: "U1"}.Build() + u2 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "user", Resource: "u2"}.Build(), DisplayName: "U2"}.Build() + u3 := v2.Resource_builder{Id: v2.ResourceId_builder{ResourceType: "user", Resource: "u3"}.Build(), DisplayName: "U3"}.Build() + + // Entitlements + e1 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g1, "member"), Resource: g1, Slug: "member", DisplayName: "member"}.Build() + e2 := v2.Entitlement_builder{Id: batonEntitlement.NewEntitlementID(g2, "member"), Resource: g2, Slug: "member", DisplayName: "member"}.Build() + + // Grants + // U1 is a direct member of G1 + grantU1E1 := v2.Grant_builder{Id: batonGrant.NewGrantID(u1, e1), Entitlement: e1, Principal: u1}.Build() + // G1 is a member of G2 with expansion (G1 members propagate to G2) + grantG1E2 := v2.Grant_builder{ + Id: batonGrant.NewGrantID(g1, e2), + Entitlement: e2, + Principal: g1, + Annotations: annotations.New(v2.GrantExpandable_builder{ + EntitlementIds: []string{e1.GetId()}, + Shallow: false, + ResourceTypeIds: []string{"user"}, + }.Build()), + }.Build() + // U2 is a direct member of G1 (added in first partial sync) + grantU2E1 := v2.Grant_builder{Id: batonGrant.NewGrantID(u2, e1), Entitlement: e1, Principal: u2}.Build() + // U3 is a direct member of G1 (added in second partial sync) + grantU3E1 := v2.Grant_builder{Id: batonGrant.NewGrantID(u3, e1), Entitlement: e1, Principal: u3}.Build() + + // ========================================================================== + // STEP 1: Create BASE with full sync + full expansion + // Initial state: U1 → E1, G1 → E2 (expandable). After expansion: U1 → E2 (derived). + // ========================================================================== + baseFile, err := dotc1z.NewC1ZFile(ctx, basePath) + require.NoError(t, err) + defer baseFile.Close(ctx) + + baseSyncID, err := baseFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, baseFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, baseFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, baseFile.PutEntitlements(ctx, e1, e2)) + require.NoError(t, baseFile.PutGrants(ctx, grantU1E1, grantG1E2)) + require.NoError(t, baseFile.EndSync(ctx)) + + // Run full expansion on base + require.NoError(t, runFullExpansion(ctx, baseFile, baseSyncID)) + + // Verify base expansion worked: U1 should have derived membership in E2 + baseGrants, err := loadGrantSourcesByKey(ctx, baseFile, baseSyncID) + require.NoError(t, err) + require.Contains(t, baseGrants, "group:g2:member|user|u1", "U1 should have derived membership in G2 after base expansion") + + // ========================================================================== + // STEP 2: Create FIRST PARTIAL sync with new user U2 as member of G1 + // Partial syncs don't run expansion. + // ========================================================================== + partial1File, err := dotc1z.NewC1ZFile(ctx, partial1Path) + require.NoError(t, err) + defer partial1File.Close(ctx) + + partial1SyncID, err := partial1File.StartNewSync(ctx, connectorstore.SyncTypePartial, "") + require.NoError(t, err) + // Partial sync: just the new user and their grant + require.NoError(t, partial1File.PutResourceTypes(ctx, userRT)) + require.NoError(t, partial1File.PutResources(ctx, u2)) + require.NoError(t, partial1File.PutGrants(ctx, grantU2E1)) + require.NoError(t, partial1File.EndSync(ctx)) + + // ========================================================================== + // STEP 3: Compact first partial into base + // After compaction, base has the new user U2 and grant U2→E1. + // Note: CompactTable uses hardcoded "attached" as the database alias. + // ========================================================================== + compact1Attached, err := baseFile.AttachFile(partial1File, "attached") + require.NoError(t, err) + require.NoError(t, compact1Attached.CompactResourceTypes(ctx, baseSyncID, partial1SyncID)) + require.NoError(t, compact1Attached.CompactResources(ctx, baseSyncID, partial1SyncID)) + require.NoError(t, compact1Attached.CompactEntitlements(ctx, baseSyncID, partial1SyncID)) + require.NoError(t, compact1Attached.CompactGrants(ctx, baseSyncID, partial1SyncID)) + _, err = compact1Attached.DetachFile("attached") + require.NoError(t, err) + + // ========================================================================== + // STEP 4: Create SECOND PARTIAL sync with new user U3 as member of G1 + // ========================================================================== + partial2File, err := dotc1z.NewC1ZFile(ctx, partial2Path) + require.NoError(t, err) + defer partial2File.Close(ctx) + + partial2SyncID, err := partial2File.StartNewSync(ctx, connectorstore.SyncTypePartial, "") + require.NoError(t, err) + // Partial sync: just the new user and their grant + require.NoError(t, partial2File.PutResourceTypes(ctx, userRT)) + require.NoError(t, partial2File.PutResources(ctx, u3)) + require.NoError(t, partial2File.PutGrants(ctx, grantU3E1)) + require.NoError(t, partial2File.EndSync(ctx)) + + // ========================================================================== + // STEP 5: Compact second partial into base + // After compaction, base has both U2 and U3 as G1 members. + // ========================================================================== + compact2Attached, err := baseFile.AttachFile(partial2File, "attached") + require.NoError(t, err) + require.NoError(t, compact2Attached.CompactResourceTypes(ctx, baseSyncID, partial2SyncID)) + require.NoError(t, compact2Attached.CompactResources(ctx, baseSyncID, partial2SyncID)) + require.NoError(t, compact2Attached.CompactEntitlements(ctx, baseSyncID, partial2SyncID)) + require.NoError(t, compact2Attached.CompactGrants(ctx, baseSyncID, partial2SyncID)) + _, err = compact2Attached.DetachFile("attached") + require.NoError(t, err) + + // Sanity-check the compacted state before incremental expansion: + // - U2/U3 direct memberships in G1 should exist (from partial syncs + compaction). + // - U2/U3 derived memberships in G2 should NOT exist yet (because we haven't re-expanded). + compactedBefore, err := loadGrantSourcesByKey(ctx, baseFile, baseSyncID) + require.NoError(t, err) + require.Contains(t, compactedBefore, "group:g1:member|user|u2") + require.Contains(t, compactedBefore, "group:g1:member|user|u3") + require.NotContains(t, compactedBefore, "group:g2:member|user|u2") + require.NotContains(t, compactedBefore, "group:g2:member|user|u3") + + // ========================================================================== + // STEP 6: Generate diff between pre-compaction and post-compaction states + // We need a "snapshot" of the old state to diff against. In practice, this would + // be the previous sync. Here we simulate by creating a copy of the old state. + // ========================================================================== + // For this test, we'll create a separate "old" file representing pre-compaction state + oldPath := filepath.Join(tmpDir, "old_snapshot.c1z") + oldFile, err := dotc1z.NewC1ZFile(ctx, oldPath) + require.NoError(t, err) + defer oldFile.Close(ctx) + + oldSyncID, err := oldFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, oldFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, oldFile.PutResources(ctx, g1, g2, u1)) + require.NoError(t, oldFile.PutEntitlements(ctx, e1, e2)) + require.NoError(t, oldFile.PutGrants(ctx, grantU1E1, grantG1E2)) + require.NoError(t, oldFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, oldFile, oldSyncID)) + + // Now generate diff: base (compacted) is NEW, oldFile is OLD + // Note: GenerateSyncDiffFromFile uses hardcoded "attached" as the database alias. + diffAttached, err := baseFile.AttachFile(oldFile, "attached") + require.NoError(t, err) + upsertsSyncID, deletionsSyncID, err := diffAttached.GenerateSyncDiffFromFile(ctx, oldSyncID, baseSyncID) + require.NoError(t, err) + _, err = diffAttached.DetachFile("attached") + require.NoError(t, err) + + // Sanity-check the upserts diff: it should include the NEW direct grants (U2/U3→E1), + // but it should not already contain the derived grants (U2/U3→E2). + upserts, err := loadGrantSourcesByKey(ctx, baseFile, upsertsSyncID) + require.NoError(t, err) + require.Contains(t, upserts, "group:g1:member|user|u2") + require.Contains(t, upserts, "group:g1:member|user|u3") + require.NotContains(t, upserts, "group:g2:member|user|u2") + require.NotContains(t, upserts, "group:g2:member|user|u3") + + // ========================================================================== + // STEP 7: Apply incremental expansion using the diff + // This should detect the new grants U2→E1 and U3→E1 and propagate them through the expansion graph. + // ========================================================================== + require.NoError(t, incrementalexpansion.ApplyIncrementalExpansionFromDiff(ctx, baseFile, baseSyncID, upsertsSyncID, deletionsSyncID)) + + // ========================================================================== + // STEP 8: Create EXPECTED file with fresh full expansion of the compacted state + // ========================================================================== + expectedFile, err := dotc1z.NewC1ZFile(ctx, expectedPath) + require.NoError(t, err) + defer expectedFile.Close(ctx) + + expectedSyncID, err := expectedFile.StartNewSync(ctx, connectorstore.SyncTypeFull, "") + require.NoError(t, err) + require.NoError(t, expectedFile.PutResourceTypes(ctx, groupRT, userRT)) + require.NoError(t, expectedFile.PutResources(ctx, g1, g2, u1, u2, u3)) + require.NoError(t, expectedFile.PutEntitlements(ctx, e1, e2)) + require.NoError(t, expectedFile.PutGrants(ctx, grantU1E1, grantG1E2, grantU2E1, grantU3E1)) + require.NoError(t, expectedFile.EndSync(ctx)) + require.NoError(t, runFullExpansion(ctx, expectedFile, expectedSyncID)) + + // ========================================================================== + // STEP 9: Compare incremental result against expected + // ========================================================================== + got, err := loadGrantSourcesByKey(ctx, baseFile, baseSyncID) + require.NoError(t, err) + want, err := loadGrantSourcesByKey(ctx, expectedFile, expectedSyncID) + require.NoError(t, err) + + // U1, U2, and U3 should all have derived membership in G2 + require.Contains(t, want, "group:g2:member|user|u1", "Expected: U1 should have derived membership in G2") + require.Contains(t, want, "group:g2:member|user|u2", "Expected: U2 should have derived membership in G2") + require.Contains(t, want, "group:g2:member|user|u3", "Expected: U3 should have derived membership in G2") + + require.Equal(t, want, got, "Incremental expansion should match fresh full expansion") +} diff --git a/pkg/sync/incrementalexpansion/invalidate.go b/pkg/sync/incrementalexpansion/invalidate.go new file mode 100644 index 000000000..4fcca2659 --- /dev/null +++ b/pkg/sync/incrementalexpansion/invalidate.go @@ -0,0 +1,150 @@ +package incrementalexpansion + +import ( + "context" + "fmt" + "strings" + + v2 "github.com/conductorone/baton-sdk/pb/c1/connector/v2" + reader_v2 "github.com/conductorone/baton-sdk/pb/c1/reader/v2" + "github.com/conductorone/baton-sdk/pkg/annotations" +) + +type InvalidationStore interface { + // Optional, but needed for dotc1z.C1File.DeleteGrant scoping. + SetSyncID(ctx context.Context, syncID string) error + + ListGrantsForEntitlement(ctx context.Context, req *reader_v2.GrantsReaderServiceListGrantsForEntitlementRequest) (*reader_v2.GrantsReaderServiceListGrantsForEntitlementResponse, error) + PutGrants(ctx context.Context, grants ...*v2.Grant) error + DeleteGrant(ctx context.Context, grantId string) error +} + +// InvalidateRemovedEdges removes only the specific source keys implied by removed edges. +// +// For each removed edge srcE->dstE (source entitlement to destination entitlement): +// - list grants G for entitlement dstE (filtered by edge principal resource types) +// - remove sources[srcE] from those grants +// - if a grant G becomes sourceless and is GrantImmutable, delete it +// - otherwise, persist the updated sources map. +func InvalidateRemovedEdges(ctx context.Context, store InvalidationStore, targetSyncID string, delta *EdgeDelta) error { + if delta == nil || len(delta.Removed) == 0 { + return nil + } + if err := store.SetSyncID(ctx, targetSyncID); err != nil { + return err + } + + type groupKey struct { + dstEntitlementID string + // Use a stable join so we can group by principal type filters. + principalTypes string + } + + // Group removed edges so we only scan/write each destination entitlement once per filter. + removedByGroup := make(map[groupKey]map[string]struct{}, len(delta.Removed)) + for _, edge := range delta.Removed { + k := groupKey{ + dstEntitlementID: edge.DstEntitlementID, + principalTypes: strings.Join(edge.PrincipalResourceTypeIDs, "\x1f"), + } + m := removedByGroup[k] + if m == nil { + m = make(map[string]struct{}, 4) + removedByGroup[k] = m + } + m[edge.SrcEntitlementID] = struct{}{} + } + + // Batch updates to reduce write overhead. + const chunkSize = 10000 + updates := make([]*v2.Grant, 0, chunkSize) + + flush := func() error { + if len(updates) == 0 { + return nil + } + if err := store.PutGrants(ctx, updates...); err != nil { + return err + } + updates = updates[:0] + return nil + } + + for k, srcIDs := range removedByGroup { + // Entitlement is only used for filtering by entitlement_id; ID is sufficient. + ent := v2.Entitlement_builder{Id: k.dstEntitlementID}.Build() + var principalTypes []string + if k.principalTypes != "" { + principalTypes = strings.Split(k.principalTypes, "\x1f") + } + pageToken := "" + for { + resp, err := store.ListGrantsForEntitlement(ctx, reader_v2.GrantsReaderServiceListGrantsForEntitlementRequest_builder{ + Entitlement: ent, + PageToken: pageToken, + PrincipalResourceTypeIds: principalTypes, + }.Build()) + if err != nil { + return err + } + + for _, g := range resp.GetList() { + srcs := g.GetSources().GetSources() + if len(srcs) == 0 { + continue + } + removedAny := false + for srcID := range srcIDs { + if _, ok := srcs[srcID]; !ok { + continue + } + delete(srcs, srcID) + removedAny = true + } + if !removedAny { + continue + } + + // The expander adds a "self-source" (destination entitlement ID) to mark + // that a grant was originally direct. When all expansion sources are removed, + // we should also remove the self-source so the grant matches a fresh full + // expansion (which would have no sources for a direct grant). + selfSourceID := g.GetEntitlement().GetId() + if len(srcs) == 1 { + delete(srcs, selfSourceID) + } + + if len(srcs) == 0 { + annos := annotations.Annotations(g.GetAnnotations()) + if annos.Contains(&v2.GrantImmutable{}) { + if err := store.DeleteGrant(ctx, g.GetId()); err != nil { + return err + } + continue + } + // Direct grant: clear sources entirely. + g.SetSources(nil) + } else { + g.SetSources(v2.GrantSources_builder{Sources: srcs}.Build()) + } + + updates = append(updates, g) + if len(updates) >= chunkSize { + if err := flush(); err != nil { + return err + } + } + } + + pageToken = resp.GetNextPageToken() + if pageToken == "" { + break + } + } + } + + if err := flush(); err != nil { + return fmt.Errorf("invalidate removed edges: %w", err) + } + return nil +} diff --git a/pkg/sync/incrementalexpansion/invalidate_changed_sources.go b/pkg/sync/incrementalexpansion/invalidate_changed_sources.go new file mode 100644 index 000000000..1b62ce30f --- /dev/null +++ b/pkg/sync/incrementalexpansion/invalidate_changed_sources.go @@ -0,0 +1,60 @@ +package incrementalexpansion + +import ( + "context" + + "github.com/conductorone/baton-sdk/pkg/dotc1z" +) + +type changedSourceInvalidator interface { + expandableGrantLister + InvalidationStore +} + +// InvalidateChangedSourceEntitlements invalidates propagated sources for any entitlement whose grant-set changed. +// It removes only the specific source key (the entitlement ID) from downstream grants along outgoing edges. +func InvalidateChangedSourceEntitlements(ctx context.Context, store changedSourceInvalidator, targetSyncID string, changedSources map[string]struct{}) error { + if len(changedSources) == 0 { + return nil + } + + // Build a set of outgoing edges for only the changed sources. + outgoing := make(map[string]Edge) + pageToken := "" + for { + defs, next, err := store.ListExpandableGrants( + ctx, + dotc1z.WithExpandableGrantsSyncID(targetSyncID), + dotc1z.WithExpandableGrantsPageToken(pageToken), + dotc1z.WithExpandableGrantsNeedsExpansionOnly(false), + ) + if err != nil { + return err + } + for _, def := range defs { + for _, src := range def.SrcEntitlementIDs { + if _, ok := changedSources[src]; !ok { + continue + } + e := Edge{ + SrcEntitlementID: src, + DstEntitlementID: def.DstEntitlementID, + Shallow: def.Shallow, + PrincipalResourceTypeIDs: def.PrincipalResourceTypeIDs, + } + outgoing[e.Key()] = e + } + } + if next == "" { + break + } + pageToken = next + } + + if len(outgoing) == 0 { + return nil + } + + // Reuse the same invalidation path (remove sources[src] from dst grants). + return InvalidateRemovedEdges(ctx, store, targetSyncID, &EdgeDelta{Removed: outgoing}) +} diff --git a/pkg/sync/incrementalexpansion/needs_expansion.go b/pkg/sync/incrementalexpansion/needs_expansion.go new file mode 100644 index 000000000..04d2c0d8e --- /dev/null +++ b/pkg/sync/incrementalexpansion/needs_expansion.go @@ -0,0 +1,74 @@ +package incrementalexpansion + +import ( + "context" + "fmt" + + "github.com/conductorone/baton-sdk/pkg/dotc1z" +) + +type needsExpansionMarker interface { + expandableGrantLister + SetNeedsExpansionForGrants(ctx context.Context, syncID string, grantExternalIDs []string, needsExpansion bool) error +} + +// MarkNeedsExpansionForAffectedEdges sets needs_expansion=1 for expandable grants whose edges are +// in/leading into the affected subgraph. +// +// With grant-column storage, we conservatively mark an expandable grant dirty if: +// - its destination entitlement is affected, OR +// - any of its source entitlement IDs is affected. +func MarkNeedsExpansionForAffectedEdges(ctx context.Context, store needsExpansionMarker, targetSyncID string, affected map[string]struct{}) error { + if len(affected) == 0 { + return nil + } + + pageToken := "" + toMark := make([]string, 0, 1024) + + for { + defs, next, err := store.ListExpandableGrants( + ctx, + dotc1z.WithExpandableGrantsSyncID(targetSyncID), + dotc1z.WithExpandableGrantsPageToken(pageToken), + dotc1z.WithExpandableGrantsNeedsExpansionOnly(false), + ) + if err != nil { + return err + } + + for _, def := range defs { + _, dstAffected := affected[def.DstEntitlementID] + srcAffected := false + if !dstAffected { + for _, src := range def.SrcEntitlementIDs { + if _, ok := affected[src]; ok { + srcAffected = true + break + } + } + } + if dstAffected || srcAffected { + toMark = append(toMark, def.GrantExternalID) + } + } + + if next == "" { + break + } + pageToken = next + } + + // Apply in chunks to avoid huge IN() clauses. + const chunk = 5000 + for i := 0; i < len(toMark); i += chunk { + j := i + chunk + if j > len(toMark) { + j = len(toMark) + } + if err := store.SetNeedsExpansionForGrants(ctx, targetSyncID, toMark[i:j], true); err != nil { + return fmt.Errorf("mark needs_expansion: %w", err) + } + } + return nil +} diff --git a/pkg/sync/syncer.go b/pkg/sync/syncer.go index 532cc8e47..e6ab1ed1e 100644 --- a/pkg/sync/syncer.go +++ b/pkg/sync/syncer.go @@ -704,6 +704,20 @@ func (s *syncer) Sync(ctx context.Context) error { continue case SyncGrantExpansionOp: + // Mark the sync as supporting diff, but only if we're starting fresh. + // If we're resuming (graph has edges or a page token), we may be continuing + // from old code that didn't have this marker, so we must not set it. + entitlementGraph := s.state.EntitlementGraph(ctx) + isResumingExpansion := entitlementGraph.Loaded || len(entitlementGraph.Edges) > 0 || s.state.PageToken(ctx) != "" + if !isResumingExpansion { + if expansionStore, ok := s.store.(connectorstore.ExpansionStore); ok { + if err := expansionStore.SetSupportsDiff(ctx, s.syncID); err != nil { + l.Error("failed to set supports_diff marker", zap.Error(err)) + return err + } + } + } + if s.dontExpandGrants || !s.state.NeedsExpansion() { l.Debug("skipping grant expansion, no grants to expand") s.state.FinishAction(ctx) @@ -1725,131 +1739,87 @@ func (s *syncer) loadEntitlementGraph(ctx context.Context, graph *expand.Entitle s.handleInitialActionForStep(ctx, *s.state.Current()) } - resp, err := s.store.ListGrants(ctx, v2.GrantsServiceListGrantsRequest_builder{PageToken: pageToken}.Build()) + // List only expandable grants that need expansion using SQL-layer filtering. + resp, err := s.store.ListGrants(ctx, v2.GrantsServiceListGrantsRequest_builder{ + PageToken: pageToken, + ExpandableOnly: true, + NeedsExpansionOnly: true, + }.Build()) if err != nil { return err } - // Handle pagination - if resp.GetNextPageToken() != "" { - err = s.state.NextPage(ctx, resp.GetNextPageToken()) - if err != nil { - return err - } - } else { - l.Debug("Finished loading grants to expand") - graph.Loaded = true - } - - // Process grants and add edges to the graph - updatedGrants := make([]*v2.Grant, 0) for _, grant := range resp.GetList() { - err := s.processGrantForGraph(ctx, grant, graph) - if err != nil { - return err - } - - // Remove expandable annotation from descendant grant now that we've added it to the graph. - // That way if this sync is part of a compaction, expanding grants at the end of compaction won't redo work. - newAnnos := make(annotations.Annotations, 0) - updated := false - for _, anno := range grant.GetAnnotations() { - if anno.MessageIs(&v2.GrantExpandable{}) { - updated = true - } else { - newAnnos = append(newAnnos, anno) - } - } - if !updated { + // Extract GrantExpandable annotation from the grant. + annos := annotations.Annotations(grant.GetAnnotations()) + expandable := &v2.GrantExpandable{} + ok, err := annos.Pick(expandable) + if err != nil || !ok { + // This shouldn't happen since we filtered by is_expandable=1, + // but skip gracefully if the annotation is missing. continue } - grant.SetAnnotations(newAnnos) - l.Debug("removed expandable annotation from grant", zap.String("grant_id", grant.GetId())) - updatedGrants = append(updatedGrants, grant) - updatedGrants, err = expand.PutGrantsInChunks(ctx, s.store, updatedGrants, 10000) - if err != nil { - return err - } - } - - _, err = expand.PutGrantsInChunks(ctx, s.store, updatedGrants, 0) - if err != nil { - return err - } - - if graph.Loaded { - l.Info("Finished loading entitlement graph", zap.Int("edges", len(graph.Edges))) - } - return nil -} - -// processGrantForGraph examines a grant for expandable annotations and adds edges to the graph. -func (s *syncer) processGrantForGraph(ctx context.Context, grant *v2.Grant, graph *expand.EntitlementGraph) error { - l := ctxzap.Extract(ctx) + principalID := grant.GetPrincipal().GetId() + dstEntitlementID := grant.GetEntitlement().GetId() - annos := annotations.Annotations(grant.GetAnnotations()) - expandable := &v2.GrantExpandable{} - _, err := annos.Pick(expandable) - if err != nil { - return err - } - if len(expandable.GetEntitlementIds()) == 0 { - return nil - } - - principalID := grant.GetPrincipal().GetId() - if principalID == nil { - return fmt.Errorf("principal id was nil") - } - - for _, srcEntitlementID := range expandable.GetEntitlementIds() { - l.Debug( - "Expandable entitlement found", - zap.String("src_entitlement_id", srcEntitlementID), - zap.String("dst_entitlement_id", grant.GetEntitlement().GetId()), - ) + for _, srcEntitlementID := range expandable.GetEntitlementIds() { + // Validate that the source entitlement's resource matches the grant's principal. + srcEntitlement, err := s.store.GetEntitlement(ctx, reader_v2.EntitlementsReaderServiceGetEntitlementRequest_builder{ + EntitlementId: srcEntitlementID, + }.Build()) + if err != nil { + // Only skip not-found entitlements; propagate other errors + // to avoid silently dropping edges and yielding incorrect expansions. + if errors.Is(err, sql.ErrNoRows) { + l.Debug("source entitlement not found, skipping edge", + zap.String("src_entitlement_id", srcEntitlementID), + zap.String("dst_entitlement_id", dstEntitlementID), + ) + continue + } + l.Error("error fetching source entitlement", + zap.String("src_entitlement_id", srcEntitlementID), + zap.String("dst_entitlement_id", dstEntitlementID), + zap.Error(err), + ) + return err + } - srcEntitlement, err := s.store.GetEntitlement(ctx, reader_v2.EntitlementsReaderServiceGetEntitlementRequest_builder{ - EntitlementId: srcEntitlementID, - }.Build()) - if err != nil { - l.Error("error fetching source entitlement", - zap.String("src_entitlement_id", srcEntitlementID), - zap.String("dst_entitlement_id", grant.GetEntitlement().GetId()), - zap.Error(err), - ) - continue - } + sourceEntitlementResourceID := srcEntitlement.GetEntitlement().GetResource().GetId() + if sourceEntitlementResourceID == nil { + return fmt.Errorf("source entitlement resource id was nil") + } + if principalID.GetResourceType() != sourceEntitlementResourceID.GetResourceType() || + principalID.GetResource() != sourceEntitlementResourceID.GetResource() { + l.Error( + "source entitlement resource id did not match grant principal id", + zap.String("grant_principal_id", principalID.String()), + zap.String("source_entitlement_resource_id", sourceEntitlementResourceID.String())) - // The expand annotation points at entitlements by id. Those entitlements' resource should match - // the current grant's principal, so we don't allow expanding arbitrary entitlements. - sourceEntitlementResourceID := srcEntitlement.GetEntitlement().GetResource().GetId() - if sourceEntitlementResourceID == nil { - return fmt.Errorf("source entitlement resource id was nil") - } - if principalID.GetResourceType() != sourceEntitlementResourceID.GetResourceType() || - principalID.GetResource() != sourceEntitlementResourceID.GetResource() { - l.Error( - "source entitlement resource id did not match grant principal id", - zap.String("grant_principal_id", principalID.String()), - zap.String("source_entitlement_resource_id", sourceEntitlementResourceID.String())) + return fmt.Errorf("source entitlement resource id did not match grant principal id") + } - return fmt.Errorf("source entitlement resource id did not match grant principal id") + graph.AddEntitlementID(dstEntitlementID) + graph.AddEntitlementID(srcEntitlementID) + err = graph.AddEdge(ctx, srcEntitlementID, dstEntitlementID, expandable.GetShallow(), expandable.GetResourceTypeIds()) + if err != nil { + return fmt.Errorf("error adding edge to graph: %w", err) + } } + } - graph.AddEntitlement(grant.GetEntitlement()) - graph.AddEntitlement(srcEntitlement.GetEntitlement()) - err = graph.AddEdge(ctx, - srcEntitlement.GetEntitlement().GetId(), - grant.GetEntitlement().GetId(), - expandable.GetShallow(), - expandable.GetResourceTypeIds(), - ) - if err != nil { - return fmt.Errorf("error adding edge to graph: %w", err) + // Handle pagination + nextPageToken := resp.GetNextPageToken() + if nextPageToken != "" { + if err := s.state.NextPage(ctx, nextPageToken); err != nil { + return err } + } else { + graph.Loaded = true + l.Info("Finished loading entitlement graph", zap.Int("edges", len(graph.Edges))) } + return nil } diff --git a/pkg/sync/syncer_test.go b/pkg/sync/syncer_test.go index 8ea1b85f8..2324c3a25 100644 --- a/pkg/sync/syncer_test.go +++ b/pkg/sync/syncer_test.go @@ -108,13 +108,8 @@ func TestExpandGrants(t *testing.T) { } } require.Len(t, allGrants, expectedGrantCount, "should have %d grants but got %d", expectedGrantCount, len(allGrants)) - for _, grant := range allGrants { - annos := annotations.Annotations(grant.GetAnnotations()) - expandable := &v2.GrantExpandable{} - ok, err := annos.Pick(expandable) - require.NoError(t, err) - require.False(t, ok, "grants are expanded, but grant %s has expandable annotation with entitlement ids %v", grant.GetId(), expandable.GetEntitlementIds()) - } + // Note: We no longer strip GrantExpandable from stored grants during expansion. + // Expansion bookkeeping lives outside the grant proto so diffs can safely compare data bytes. } func TestInvalidResourceTypeFilter(t *testing.T) { diff --git a/pkg/synccompactor/attached/attached_test.go b/pkg/synccompactor/attached/attached_test.go index 25eb8257e..36caa73c9 100644 --- a/pkg/synccompactor/attached/attached_test.go +++ b/pkg/synccompactor/attached/attached_test.go @@ -185,6 +185,7 @@ func TestAttachedCompactorDoesNotOperateOnDiffSyncTypes(t *testing.T) { oldSyncID, err := oldDB.StartNewSync(ctx, connectorstore.SyncTypeFull, "") require.NoError(t, err) + require.NoError(t, oldDB.SetSupportsDiff(ctx, oldSyncID)) require.NoError(t, oldDB.EndSync(ctx)) // Applied DB: create a full sync, then generate diff syncs, then delete the full sync. diff --git a/proto/c1/connector/v2/grant.proto b/proto/c1/connector/v2/grant.proto index 4cb3a47f4..9af90c153 100644 --- a/proto/c1/connector/v2/grant.proto +++ b/proto/c1/connector/v2/grant.proto @@ -30,7 +30,7 @@ message Grant { } message GrantsServiceListGrantsRequest { - c1.connector.v2.Resource resource = 1 [(validate.rules).message = {required: true}]; + c1.connector.v2.Resource resource = 1; uint32 page_size = 2 [(validate.rules).uint32 = { ignore_empty: true lte: 250 @@ -46,6 +46,10 @@ message GrantsServiceListGrantsRequest { min_bytes: 1 max_bytes: 1024 }]; + // If true, only return grants that are expandable (have GrantExpandable annotation). + bool expandable_only = 6; + // If true, only return grants that need expansion processing. + bool needs_expansion_only = 7; } message GrantsServiceListGrantsResponse {