From 5396aec63fc8c447f8c4ddb5ecc830ec7e5bccd0 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Mon, 6 May 2024 09:53:25 -0600 Subject: [PATCH 01/54] adds List and Get methods to alerts client The Get endpoint already exists on the service, so only the List endpoint needed to be added there. BACK-2554 --- alerts/client.go | 45 ++++++++++++++++++++++++++---- alerts/config.go | 1 + data/service/api/v1/alerts.go | 37 ++++++++++++++++++++++++ data/service/api/v1/alerts_test.go | 21 ++++++++++++-- data/store/mongo/mongo_alerts.go | 21 ++++++++++++++ 5 files changed, 116 insertions(+), 9 deletions(-) diff --git a/alerts/client.go b/alerts/client.go index dcaafce96c..4709ba87dc 100644 --- a/alerts/client.go +++ b/alerts/client.go @@ -8,6 +8,7 @@ import ( "github.com/tidepool-org/platform/auth" "github.com/tidepool-org/platform/client" + "github.com/tidepool-org/platform/errors" platformlog "github.com/tidepool-org/platform/log" "github.com/tidepool-org/platform/log/null" "github.com/tidepool-org/platform/platform" @@ -42,14 +43,22 @@ type PlatformClient interface { requestBody interface{}, responseBody interface{}, inspectors ...request.ResponseInspector) error } +// TokenProvider retrieves session tokens for calling the alerts API. +// +// client.External is one implementation +type TokenProvider interface { + // ServerSessionToken provides a server-to-server API authentication token. + ServerSessionToken() (string, error) +} + // request performs common operations before passing a request off to the // underlying platform.Client. -func (c *Client) request(ctx context.Context, method, url string, body any) error { +func (c *Client) request(ctx context.Context, method, url string, reqBody, resBody any) error { // Platform's client.Client expects a logger to exist in the request's // context. If it doesn't exist, request processing will panic. loggingCtx := platformlog.NewContextWithLogger(ctx, c.logger) // Make sure the auth token is injected into the request's headers. - return c.requestWithAuth(loggingCtx, method, url, body) + return c.requestWithAuth(loggingCtx, method, url, reqBody, resBody) } // requestWithAuth injects an auth token before calling platform.Client.RequestData. @@ -58,20 +67,44 @@ func (c *Client) request(ctx context.Context, method, url string, body any) erro // platform.Client. It might be nice to be able to use a mutator, but the auth // is specifically handled by the platform.Client via the context field, and // if left blank, platform.Client errors. -func (c *Client) requestWithAuth(ctx context.Context, method, url string, body any) error { - return c.client.RequestData(auth.NewContextWithServerSessionTokenProvider(ctx, c.tokenProvider), method, url, nil, body, nil) +func (c *Client) requestWithAuth(ctx context.Context, method, url string, reqBody, resBody any) error { + return c.client.RequestData(auth.NewContextWithServerSessionTokenProvider(ctx, c.tokenProvider), method, url, nil, reqBody, resBody) } // Upsert updates cfg if it exists or creates it if it doesn't. func (c *Client) Upsert(ctx context.Context, cfg *Config) error { url := c.client.ConstructURL("v1", "users", cfg.FollowedUserID, "followers", cfg.UserID, "alerts") - return c.request(ctx, http.MethodPost, url, cfg) + return c.request(ctx, http.MethodPost, url, cfg, nil) } // Delete the alerts config. func (c *Client) Delete(ctx context.Context, cfg *Config) error { url := c.client.ConstructURL("v1", "users", cfg.FollowedUserID, "followers", cfg.UserID, "alerts") - return c.request(ctx, http.MethodDelete, url, nil) + return c.request(ctx, http.MethodDelete, url, nil, nil) +} + +// Get a user's alerts configuration for the followed user. +func (c *Client) Get(ctx context.Context, followedUserID, userID string) (*Config, error) { + url := c.client.ConstructURL("v1", "users", followedUserID, "followers", userID, "alerts") + cfg := &Config{} + err := c.request(ctx, http.MethodGet, url, nil, cfg) + if err != nil { + return nil, errors.Wrap(err, "Unable to request alerts config") + } + return cfg, nil +} + +// List the alerts configurations that follow the given user. +// +// This method should only be called via an authenticated service session. +func (c *Client) List(ctx context.Context, followedUserID string) ([]*Config, error) { + url := c.client.ConstructURL("v1", "users", followedUserID, "followers", "alerts") + configs := []*Config{} + err := c.request(ctx, http.MethodGet, url, nil, &configs) + if err != nil { + return nil, errors.Wrap(err, "Unable to request alerts configs list") + } + return configs, nil } // ConfigLoader abstracts the method by which config values are loaded. diff --git a/alerts/config.go b/alerts/config.go index 67f2b1d72c..d9931b0f9a 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -239,6 +239,7 @@ type Repository interface { Get(ctx context.Context, conf *Config) (*Config, error) Upsert(ctx context.Context, conf *Config) error Delete(ctx context.Context, conf *Config) error + List(ctx context.Context, userID string) ([]*Config, error) EnsureIndexes() error } diff --git a/data/service/api/v1/alerts.go b/data/service/api/v1/alerts.go index d07891247e..70941b9e20 100644 --- a/data/service/api/v1/alerts.go +++ b/data/service/api/v1/alerts.go @@ -24,6 +24,7 @@ func AlertsRoutes() []service.Route { service.Get("/v1/users/:userId/followers/:followerUserId/alerts", GetAlert, api.RequireAuth), service.Post("/v1/users/:userId/followers/:followerUserId/alerts", UpsertAlert, api.RequireAuth), service.Delete("/v1/users/:userId/followers/:followerUserId/alerts", DeleteAlert, api.RequireAuth), + service.Get("/v1/users/:userId/followers/alerts", ListAlerts, api.RequireServer), } } @@ -134,6 +135,42 @@ func UpsertAlert(dCtx service.Context) { } } +func ListAlerts(dCtx service.Context) { + r := dCtx.Request() + ctx := r.Context() + authDetails := request.GetAuthDetails(ctx) + repo := dCtx.AlertsRepository() + lgr := log.LoggerFromContext(ctx) + + if err := checkAuthentication(authDetails); err != nil { + lgr.Debug("authentication failed") + dCtx.RespondWithError(platform.ErrorUnauthorized()) + return + } + + pathsUserID := r.PathParam("userId") + if err := checkUserIDConsistency(authDetails, pathsUserID); err != nil { + lgr.WithFields(log.Fields{"path": pathsUserID, "auth": authDetails.UserID()}). + Debug("user id consistency failed") + dCtx.RespondWithError(platform.ErrorUnauthorized()) + return + } + + alerts, err := repo.List(ctx, pathsUserID) + if err != nil { + dCtx.RespondWithInternalServerFailure("listing alerts configs", err) + lgr.WithError(err).Error("listing alerts config") + return + } + if len(alerts) == 0 { + dCtx.RespondWithError(ErrorUserIDNotFound(pathsUserID)) + lgr.Debug("no alerts configs found") + } + + responder := request.MustNewResponder(dCtx.Response(), r) + responder.Data(http.StatusOK, alerts) +} + // checkUserIDConsistency verifies the userIDs in a request. // // For safety reasons, if these values don't agree, return an error. diff --git a/data/service/api/v1/alerts_test.go b/data/service/api/v1/alerts_test.go index c3b4b2f2a5..d48be38a6f 100644 --- a/data/service/api/v1/alerts_test.go +++ b/data/service/api/v1/alerts_test.go @@ -160,12 +160,15 @@ var _ = Describe("Alerts endpoints", func() { }) type mockRepo struct { - UserID string - Error error + UserID string + Error error + AlertsForUserID map[string][]*alerts.Config } func newMockRepo() *mockRepo { - return &mockRepo{} + return &mockRepo{ + AlertsForUserID: make(map[string][]*alerts.Config), + } } func (r *mockRepo) ReturnsError(err error) { @@ -202,6 +205,18 @@ func (r *mockRepo) Delete(ctx context.Context, conf *alerts.Config) error { return nil } +func (r *mockRepo) List(ctx context.Context, userID string) ([]*alerts.Config, error) { + if r.Error != nil { + return nil, r.Error + } + r.UserID = userID + alerts, ok := r.AlertsForUserID[userID] + if !ok { + return nil, nil + } + return alerts, nil +} + func (r *mockRepo) EnsureIndexes() error { return nil } diff --git a/data/store/mongo/mongo_alerts.go b/data/store/mongo/mongo_alerts.go index ee313f3ffb..489db755fe 100644 --- a/data/store/mongo/mongo_alerts.go +++ b/data/store/mongo/mongo_alerts.go @@ -9,6 +9,7 @@ import ( "go.mongodb.org/mongo-driver/mongo/options" "github.com/tidepool-org/platform/alerts" + "github.com/tidepool-org/platform/errors" structuredmongo "github.com/tidepool-org/platform/store/structured/mongo" ) @@ -34,6 +35,26 @@ func (r *alertsRepo) Delete(ctx context.Context, cfg *alerts.Config) error { return nil } +// List will retrieve any Configs that are defined by followers of the given user. +func (r *alertsRepo) List(ctx context.Context, followedUserID string) ([]*alerts.Config, error) { + filter := bson.D{ + {Key: "followedUserId", Value: followedUserID}, + } + cursor, err := r.Find(ctx, filter, nil) + if err != nil { + return nil, errors.Wrapf(err, "Unable to list alerts.Config(s) for followed user %s", followedUserID) + } + defer cursor.Close(ctx) + out := []*alerts.Config{} + if err := cursor.All(ctx, &out); err != nil { + return nil, errors.Wrapf(err, "Unable to decode alerts.Config(s) for followed user %s", followedUserID) + } + if err := cursor.Err(); err != nil { + return nil, errors.Wrapf(err, "Unexpected error for followed user %s", followedUserID) + } + return out, nil +} + // Get will retrieve the given Config. func (r *alertsRepo) Get(ctx context.Context, cfg *alerts.Config) (*alerts.Config, error) { res := r.FindOne(ctx, r.filter(cfg), nil) From feba9d889f8443c2d60667f81caed75d587ea876 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Mon, 6 May 2024 10:22:02 -0600 Subject: [PATCH 02/54] lift Repeat out of the base alert config Through discussions it was confirmed that Repeat is not universal to all alerts. So it's lifted out of the Base alert and re-inserted into those alerts where it should be present (namely Low and High alerts only). BACK-2554 --- alerts/config.go | 28 ++++++----- alerts/config_test.go | 106 ++++++++++++++++++++++-------------------- 2 files changed, 72 insertions(+), 62 deletions(-) diff --git a/alerts/config.go b/alerts/config.go index d9931b0f9a..b83cf2b25f 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -72,16 +72,10 @@ func (a Alerts) Validate(validator structure.Validator) { type Base struct { // Enabled controls whether notifications should be sent for this alert. Enabled bool `json:"enabled" bson:"enabled"` - // Repeat is measured in minutes. - // - // A value of 0 (the default) disables repeat notifications. - Repeat DurationMinutes `json:"repeat,omitempty" bson:"repeat"` } func (b Base) Validate(validator structure.Validator) { validator.Bool("enabled", &b.Enabled) - dur := b.Repeat.Duration() - validator.Duration("repeat", &dur).Using(validateRepeat) } const ( @@ -110,7 +104,7 @@ type UrgentLowAlert struct { Base `bson:",inline"` // Threshold is compared the current value to determine if an alert should // be triggered. - Threshold `json:"threshold"` + Threshold `json:"threshold" bson:"threshold"` } func (a UrgentLowAlert) Validate(validator structure.Validator) { @@ -149,13 +143,19 @@ type LowAlert struct { // be triggered. Threshold `json:"threshold"` Delay DurationMinutes `json:"delay,omitempty"` + // Repeat is measured in minutes. + // + // A value of 0 (the default) disables repeat notifications. + Repeat DurationMinutes `json:"repeat,omitempty" bson:"repeat"` } func (a LowAlert) Validate(validator structure.Validator) { a.Base.Validate(validator) - dur := a.Delay.Duration() - validator.Duration("delay", &dur).InRange(0, 2*time.Hour) + delayDur := a.Delay.Duration() + validator.Duration("delay", &delayDur).InRange(0, 2*time.Hour) a.Threshold.Validate(validator) + repeatDur := a.Repeat.Duration() + validator.Duration("repeat", &repeatDur).Using(validateRepeat) } // HighAlert extends Base with a threshold and a delay. @@ -165,13 +165,19 @@ type HighAlert struct { // be triggered. Threshold `json:"threshold"` Delay DurationMinutes `json:"delay,omitempty"` + // Repeat is measured in minutes. + // + // A value of 0 (the default) disables repeat notifications. + Repeat DurationMinutes `json:"repeat,omitempty" bson:"repeat"` } func (a HighAlert) Validate(validator structure.Validator) { a.Base.Validate(validator) a.Threshold.Validate(validator) - dur := a.Delay.Duration() - validator.Duration("delay", &dur).InRange(0, 6*time.Hour) + delayDur := a.Delay.Duration() + validator.Duration("delay", &delayDur).InRange(0, 6*time.Hour) + repeatDur := a.Repeat.Duration() + validator.Duration("repeat", &repeatDur).Using(validateRepeat) } // DurationMinutes reads a JSON integer and converts it to a time.Duration. diff --git a/alerts/config_test.go b/alerts/config_test.go index ec479d8fb4..d28afddd99 100644 --- a/alerts/config_test.go +++ b/alerts/config_test.go @@ -45,7 +45,6 @@ var _ = Describe("Config", func() { }, "urgentLow": { "enabled": false, - "repeat": 30, "threshold": { "units": "mg/dL", "value": 47.5 @@ -62,12 +61,10 @@ var _ = Describe("Config", func() { }, "notLooping": { "enabled": true, - "repeat": 32, "delay": 4 }, "noCommunication": { "enabled": true, - "repeat": 33, "delay": 6 } }`, mockUserID1, mockUserID2, mockUploadID) @@ -88,14 +85,11 @@ var _ = Describe("Config", func() { Expect(conf.Low.Threshold.Value).To(Equal(80.0)) Expect(conf.Low.Threshold.Units).To(Equal(glucose.MgdL)) Expect(conf.UrgentLow.Enabled).To(Equal(false)) - Expect(conf.UrgentLow.Repeat).To(Equal(DurationMinutes(30 * time.Minute))) Expect(conf.UrgentLow.Threshold.Value).To(Equal(47.5)) Expect(conf.UrgentLow.Threshold.Units).To(Equal(glucose.MgdL)) Expect(conf.NotLooping.Enabled).To(Equal(true)) - Expect(conf.NotLooping.Repeat).To(Equal(DurationMinutes(32 * time.Minute))) Expect(conf.NotLooping.Delay).To(Equal(DurationMinutes(4 * time.Minute))) Expect(conf.NoCommunication.Enabled).To(Equal(true)) - Expect(conf.NoCommunication.Repeat).To(Equal(DurationMinutes(33 * time.Minute))) Expect(conf.NoCommunication.Delay).To(Equal(DurationMinutes(6 * time.Minute))) }) @@ -324,32 +318,41 @@ var _ = Describe("Config", func() { }) Context("repeat", func() { + var defaultAlert = LowAlert{ + Threshold: Threshold{Value: 11, Units: glucose.MmolL}, + } + It("accepts values of 0 (indicating disabled)", func() { val := validator.New(logTest.NewLogger()) - b := Base{Repeat: 0} - b.Validate(val) + l := defaultAlert + l.Repeat = 0 + l.Validate(val) Expect(val.Error()).To(Succeed()) }) It("accepts values of 15 minutes to 4 hours (inclusive)", func() { val := validator.New(logTest.NewLogger()) - b := Base{Repeat: DurationMinutes(15 * time.Minute)} - b.Validate(val) + l := defaultAlert + l.Repeat = DurationMinutes(15 * time.Minute) + l.Validate(val) Expect(val.Error()).To(Succeed()) val = validator.New(logTest.NewLogger()) - b = Base{Repeat: DurationMinutes(4 * time.Hour)} - b.Validate(val) + l = defaultAlert + l.Repeat = DurationMinutes(4 * time.Hour) + l.Validate(val) Expect(val.Error()).To(Succeed()) val = validator.New(logTest.NewLogger()) - b = Base{Repeat: DurationMinutes(4*time.Hour + 1)} - b.Validate(val) + l = defaultAlert + l.Repeat = DurationMinutes(4*time.Hour + 1) + l.Validate(val) Expect(val.Error()).NotTo(Succeed()) val = validator.New(logTest.NewLogger()) - b = Base{Repeat: DurationMinutes(15*time.Minute - 1)} - b.Validate(val) + l = defaultAlert + l.Repeat = DurationMinutes(15*time.Minute - 1) + l.Validate(val) Expect(val.Error()).NotTo(Succeed()) }) }) @@ -361,67 +364,68 @@ var _ = Describe("Config", func() { err := request.DecodeObject(context.Background(), nil, buf, threshold) Expect(err).To(MatchError("json is malformed")) }) - It("validates repeat minutes (negative)", func() { + }) + + Context("low", func() { + It("accepts a blank repeat", func() { buf := buff(`{ "userId": "%s", "followedUserId": "%s", "uploadId": "%s", - "urgentLow": { - "enabled": false, - "repeat": -11, + "low": { + "enabled": true, + "delay": 10, "threshold": { - "units": "%s", - "value": 47.5 + "units": "mg/dL", + "value": 80 } } -}`, mockUserID1, mockUserID2, mockUploadID, glucose.MgdL) - cfg := &Config{} - err := request.DecodeObject(context.Background(), nil, buf, cfg) - Expect(err).To(MatchError("value -11m0s is not greater than or equal to 15m0s")) +}`, mockUserID1, mockUserID2, mockUploadID) + conf := &Config{} + err := request.DecodeObject(context.Background(), nil, buf, conf) + Expect(err).To(Succeed()) + Expect(conf.Low.Repeat).To(Equal(DurationMinutes(0))) }) - It("validates repeat minutes (string)", func() { - buf := buff(`{ + }) + It("validates repeat minutes (negative)", func() { + buf := buff(`{ "userId": "%s", "followedUserId": "%s", - "urgentLow": { + "uploadId": "%s", + "low": { "enabled": false, - "repeat": "a", + "repeat": -11, "threshold": { "units": "%s", - "value": 1 + "value": 47.5 } } -}`, mockUserID1, mockUserID2, glucose.MgdL) - cfg := &Config{} - err := request.DecodeObject(context.Background(), nil, buf, cfg) - Expect(err).To(MatchError("json is malformed")) - }) +}`, mockUserID1, mockUserID2, mockUploadID, glucose.MgdL) + cfg := &Config{} + err := request.DecodeObject(context.Background(), nil, buf, cfg) + Expect(err).To(MatchError("value -11m0s is not greater than or equal to 15m0s")) }) - - Context("low", func() { - It("accepts a blank repeat", func() { - buf := buff(`{ + It("validates repeat minutes (string)", func() { + buf := buff(`{ "userId": "%s", "followedUserId": "%s", "uploadId": "%s", "low": { - "enabled": true, - "delay": 10, + "enabled": false, + "repeat": "a", "threshold": { - "units": "mg/dL", - "value": 80 + "units": "%s", + "value": 1 } } -}`, mockUserID1, mockUserID2, mockUploadID) - conf := &Config{} - err := request.DecodeObject(context.Background(), nil, buf, conf) - Expect(err).To(Succeed()) - Expect(conf.Low.Repeat).To(Equal(DurationMinutes(0))) - }) +}`, mockUserID1, mockUserID2, mockUploadID, glucose.MgdL) + cfg := &Config{} + err := request.DecodeObject(context.Background(), nil, buf, cfg) + Expect(err).To(MatchError("json is malformed")) }) }) -var _ = Describe("Duration", func() { +var _ = Describe("DurationMinutes", func() { It("parses 42", func() { d := DurationMinutes(0) err := d.UnmarshalJSON([]byte(`42`)) From 8dc742130f08090884b17edcbe679ccba0e21fd7 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Mon, 6 May 2024 12:46:40 -0600 Subject: [PATCH 03/54] adds activity tracking to alert configurations These activity properties will track the times at which alerts were sent, resolved, or acknowledged. BACK-2554 --- alerts/config.go | 20 ++++++++++++++++++ alerts/config_test.go | 48 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+) diff --git a/alerts/config.go b/alerts/config.go index b83cf2b25f..b6f8334656 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -72,12 +72,32 @@ func (a Alerts) Validate(validator structure.Validator) { type Base struct { // Enabled controls whether notifications should be sent for this alert. Enabled bool `json:"enabled" bson:"enabled"` + + // Activity tracks when events related to the alert occurred. + Activity `json:"-" bson:"activity,omitempty"` } func (b Base) Validate(validator structure.Validator) { validator.Bool("enabled", &b.Enabled) } +type Activity struct { + // Triggered records the last time this alert was triggered. + Triggered time.Time `json:"triggered" bson:"triggered"` + // Sent records the last time this alert was sent. + Sent time.Time `json:"sent" bson:"sent"` + // Resolved records the last time this alert was resolved. + Resolved time.Time `json:"resolved" bson:"resolved"` +} + +func (a Activity) IsActive() bool { + return a.Triggered.After(a.Resolved) +} + +func (a Activity) IsSent() bool { + return a.Sent.After(a.Triggered) +} + const ( // RepeatMin is the minimum duration for a repeat setting (if not 0). RepeatMin = 15 * time.Minute diff --git a/alerts/config_test.go b/alerts/config_test.go index d28afddd99..df38650710 100644 --- a/alerts/config_test.go +++ b/alerts/config_test.go @@ -127,6 +127,54 @@ var _ = Describe("Config", func() { }) }) + Context("Base", func() { + Context("Activity", func() { + Context("IsActive()", func() { + It("is true", func() { + triggered := time.Now() + resolved := triggered.Add(-time.Nanosecond) + a := Activity{ + Triggered: triggered, + Resolved: resolved, + } + Expect(a.IsActive()).To(BeTrue()) + }) + + It("is false", func() { + triggered := time.Now() + resolved := triggered.Add(time.Nanosecond) + a := Activity{ + Triggered: triggered, + Resolved: resolved, + } + Expect(a.IsActive()).To(BeFalse()) + }) + }) + + Context("IsSent()", func() { + It("is true", func() { + triggered := time.Now() + sent := triggered.Add(time.Nanosecond) + a := Activity{ + Triggered: triggered, + Sent: sent, + } + Expect(a.IsSent()).To(BeTrue()) + }) + + It("is false", func() { + triggered := time.Now() + notified := triggered.Add(-time.Nanosecond) + a := Activity{ + Triggered: triggered, + Sent: notified, + } + Expect(a.IsSent()).To(BeFalse()) + }) + }) + }) + }) + Context("UrgentLowAlert", func() { Context("Threshold", func() { It("accepts values between 0 and 1000 mg/dL", func() { From 4a92a30be4f580aaa9a0de35a2851645f1523bc0 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Tue, 7 May 2024 08:10:09 -0600 Subject: [PATCH 04/54] adds auth endpoint to retrieve a user's device tokens This endpoint will be used by upcoming changes to the auth client to allow care partner backend processes to retrieve device tokens in order to send mobile device push notifications. BACK-2554 --- auth/service/api/v1/devicetokens.go | 25 ++++ auth/service/api/v1/devicetokens_test.go | 67 ++++++++- auth/service/service/client.go | 14 ++ auth/service/service/client_test.go | 148 +++++++++++++++++++ auth/store/mongo/device_tokens_repository.go | 14 ++ auth/store/test/device_token_repository.go | 12 ++ devicetokens/devicetokens.go | 1 + 7 files changed, 279 insertions(+), 2 deletions(-) diff --git a/auth/service/api/v1/devicetokens.go b/auth/service/api/v1/devicetokens.go index c19c654343..99d6b2ede1 100644 --- a/auth/service/api/v1/devicetokens.go +++ b/auth/service/api/v1/devicetokens.go @@ -13,6 +13,7 @@ import ( func (r *Router) DeviceTokensRoutes() []*rest.Route { return []*rest.Route{ rest.Post("/v1/users/:userId/device_tokens", api.RequireUser(r.UpsertDeviceToken)), + rest.Get("/v1/users/:userId/device_tokens", api.RequireAuth(r.GetDeviceTokens)), } } @@ -39,3 +40,27 @@ func (r *Router) UpsertDeviceToken(res rest.ResponseWriter, req *rest.Request) { return } } + +func (r *Router) GetDeviceTokens(res rest.ResponseWriter, req *rest.Request) { + responder := request.MustNewResponder(res, req) + ctx := req.Request.Context() + authDetails := request.GetAuthDetails(ctx) + repo := r.AuthStore().NewDeviceTokenRepository() + userID := req.PathParam("userId") + + if userID != authDetails.UserID() && !authDetails.IsService() { + responder.Error(http.StatusForbidden, request.ErrorUnauthorized()) + return + } + + docs, err := repo.GetAllByUserID(ctx, userID) + if err != nil { + responder.Error(http.StatusInternalServerError, err) + return + } + tokens := make([]devicetokens.DeviceToken, 0, len(docs)) + for _, doc := range docs { + tokens = append(tokens, doc.DeviceToken) + } + responder.Data(http.StatusOK, tokens) +} diff --git a/auth/service/api/v1/devicetokens_test.go b/auth/service/api/v1/devicetokens_test.go index 1033b7cc9c..ef61df746e 100644 --- a/auth/service/api/v1/devicetokens_test.go +++ b/auth/service/api/v1/devicetokens_test.go @@ -3,6 +3,7 @@ package v1 import ( "bytes" "context" + "encoding/json" "fmt" "io" "net/http" @@ -12,14 +13,18 @@ import ( . "github.com/onsi/gomega" serviceTest "github.com/tidepool-org/platform/auth/service/test" + storetest "github.com/tidepool-org/platform/auth/store/test" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/request" "github.com/tidepool-org/platform/service/test" ) var _ = Describe("Device tokens endpoints", func() { var rtr *Router + var svc *serviceTest.Service + BeforeEach(func() { - svc := serviceTest.NewService() + svc = serviceTest.NewService() var err error rtr, err = NewRouter(svc) Expect(err).ToNot(HaveOccurred()) @@ -66,6 +71,65 @@ var _ = Describe("Device tokens endpoints", func() { }) + Describe("List", func() { + It("succeeds with valid input", func() { + res := test.NewMockRestResponseWriter() + req := newDeviceTokensTestRequest(nil, nil, "") + + rtr.GetDeviceTokens(res, req) + + Expect(res.Code).To(Equal(http.StatusOK)) + }) + + It("rejects non-service users", func() { + svcDetails := test.NewMockAuthDetails(request.MethodAccessToken, "test-user", test.TestToken2) + req := newDeviceTokensTestRequest(svcDetails, nil, "") + res := test.NewMockRestResponseWriter() + + rtr.GetDeviceTokens(res, req) + + Expect(res.Code).To(Equal(http.StatusForbidden)) + }) + + It("may return multiple documents", func() { + repo := &storetest.DeviceTokenRepository{ + Documents: []*devicetokens.Document{ + { + DeviceToken: devicetokens.DeviceToken{}, + }, + { + DeviceToken: devicetokens.DeviceToken{}, + }, + }, + } + raw := rtr.Service.AuthStore().(*storetest.Store) + raw.NewDeviceTokenRepositoryImpl = repo + res := test.NewMockRestResponseWriter() + req := newDeviceTokensTestRequest(nil, nil, "") + + rtr.GetDeviceTokens(res, req) + + Expect(res.Code).To(Equal(http.StatusOK)) + got := []*devicetokens.DeviceToken{} + err := json.Unmarshal(res.Body.Bytes(), &got) + Expect(err).To(Succeed()) + Expect(got).To(HaveLen(2)) + }) + + It("handles repository errors", func() { + repo := &storetest.DeviceTokenRepository{ + Error: fmt.Errorf("test error"), + } + raw := rtr.Service.AuthStore().(*storetest.Store) + raw.NewDeviceTokenRepositoryImpl = repo + res := test.NewMockRestResponseWriter() + req := newDeviceTokensTestRequest(nil, nil, "") + + rtr.GetDeviceTokens(res, req) + + Expect(res.Code).To(Equal(http.StatusInternalServerError)) + }) + }) }) func buff(template string, args ...any) *bytes.Buffer { @@ -91,5 +155,4 @@ func newDeviceTokensTestRequest(auth request.AuthDetails, body io.Reader, userID Request: httpReq, PathParams: map[string]string{"userId": userIDFromPath}, } - } diff --git a/auth/service/service/client.go b/auth/service/service/client.go index 4335eae5db..71d27ad2dd 100644 --- a/auth/service/service/client.go +++ b/auth/service/service/client.go @@ -6,6 +6,7 @@ import ( "github.com/tidepool-org/platform/auth" "github.com/tidepool-org/platform/auth/client" authStore "github.com/tidepool-org/platform/auth/store" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" "github.com/tidepool-org/platform/page" @@ -102,6 +103,19 @@ func (c *Client) DeleteAllProviderSessions(ctx context.Context, userID string) e return repository.DeleteAllProviderSessions(ctx, userID) } +func (c *Client) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + repo := c.authStore.NewDeviceTokenRepository() + docs, err := repo.GetAllByUserID(ctx, userID) + if err != nil { + return nil, err + } + tokens := make([]*devicetokens.DeviceToken, 0, len(docs)) + for _, doc := range docs { + tokens = append(tokens, &doc.DeviceToken) + } + return tokens, nil +} + func (c *Client) GetProviderSession(ctx context.Context, id string) (*auth.ProviderSession, error) { repository := c.authStore.NewProviderSessionRepository() return repository.GetProviderSession(ctx, id) diff --git a/auth/service/service/client_test.go b/auth/service/service/client_test.go index 9a8a94e85d..1714e947a3 100644 --- a/auth/service/service/client_test.go +++ b/auth/service/service/client_test.go @@ -1,8 +1,156 @@ package service_test import ( + "context" + "fmt" + "time" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + . "github.com/onsi/gomega/ghttp" + + "github.com/tidepool-org/platform/appvalidate" + "github.com/tidepool-org/platform/auth/client" + "github.com/tidepool-org/platform/auth/service/service" + "github.com/tidepool-org/platform/auth/store" + platformclient "github.com/tidepool-org/platform/client" + "github.com/tidepool-org/platform/devicetokens" + logtest "github.com/tidepool-org/platform/log/test" + "github.com/tidepool-org/platform/platform" + "github.com/tidepool-org/platform/provider" ) var _ = Describe("Client", func() { + var testUserID = "test-user-id" + var testDeviceToken1 = &devicetokens.DeviceToken{ + Apple: &devicetokens.AppleDeviceToken{ + Token: []byte("test"), + Environment: "sandbox", + }, + } + + newTestServiceClient := func(url string, authStore store.Store) *service.Client { + var err error + extCfg := &client.ExternalConfig{ + Config: &platform.Config{ + Config: &platformclient.Config{ + Address: url, + UserAgent: "test", + }, + ServiceSecret: "", + }, + ServerSessionTokenSecret: "test token", + ServerSessionTokenTimeout: time.Minute, + } + authAs := platform.AuthorizeAsService + name := "test auth client" + logger := logtest.NewLogger() + if authStore == nil { + authStore = &mockAuthStore{ + DeviceTokenRepository: &mockDeviceTokenRepository{ + Tokens: map[string][]*devicetokens.DeviceToken{ + testUserID: { + testDeviceToken1, + }, + }, + }, + } + } + providerFactory := &mockProviderFactory{} + serviceClient, err := service.NewClient(extCfg, authAs, name, logger, authStore, providerFactory) + Expect(err).To(Succeed()) + return serviceClient + } + + Describe("GetDeviceTokens", func() { + It("returns a slice of tokens", func() { + ctx := context.Background() + server := NewServer() + defer server.Close() + serviceClient := newTestServiceClient(server.URL(), nil) + + tokens, err := serviceClient.GetDeviceTokens(ctx, testUserID) + + Expect(err).To(Succeed()) + Expect(tokens).To(HaveLen(1)) + Expect(tokens[0]).To(Equal(testDeviceToken1)) + }) + + It("handles errors from the underlying repo", func() { + ctx := context.Background() + server := NewServer() + defer server.Close() + authStore := &mockAuthStore{ + DeviceTokenRepository: &mockDeviceTokenRepository{ + Error: fmt.Errorf("test error"), + }, + } + serviceClient := newTestServiceClient(server.URL(), authStore) + + _, err := serviceClient.GetDeviceTokens(ctx, testUserID) + + Expect(err).To(HaveOccurred()) + }) + }) }) + +type mockAuthStore struct { + store.DeviceTokenRepository +} + +func (s *mockAuthStore) NewAppValidateRepository() appvalidate.Repository { + return nil +} + +func (s *mockAuthStore) NewProviderSessionRepository() store.ProviderSessionRepository { + return nil +} + +func (s *mockAuthStore) NewRestrictedTokenRepository() store.RestrictedTokenRepository { + return nil +} + +func (s *mockAuthStore) NewDeviceTokenRepository() store.DeviceTokenRepository { + return s.DeviceTokenRepository +} + +type mockProviderFactory struct{} + +func (f *mockProviderFactory) Get(typ string, name string) (provider.Provider, error) { + return nil, nil +} + +type mockDeviceTokenRepository struct { + Error error + Tokens map[string][]*devicetokens.DeviceToken +} + +func (r *mockDeviceTokenRepository) GetAllByUserID(ctx context.Context, userID string) ([]*devicetokens.Document, error) { + if r.Error != nil { + return nil, r.Error + } + + if tokens, ok := r.Tokens[userID]; ok { + docs := make([]*devicetokens.Document, 0, len(tokens)) + for _, token := range tokens { + docs = append(docs, &devicetokens.Document{DeviceToken: *token}) + } + return docs, nil + } + return nil, nil +} + +func (r *mockDeviceTokenRepository) Upsert(ctx context.Context, doc *devicetokens.Document) error { + if r.Error != nil { + return r.Error + } + return nil +} + +func (r *mockDeviceTokenRepository) EnsureIndexes() error { + if r.Error != nil { + return r.Error + } + return nil +} diff --git a/auth/store/mongo/device_tokens_repository.go b/auth/store/mongo/device_tokens_repository.go index 4a257ca9f0..d338c27ea3 100644 --- a/auth/store/mongo/device_tokens_repository.go +++ b/auth/store/mongo/device_tokens_repository.go @@ -16,6 +16,20 @@ import ( // MongoDB collection. type deviceTokenRepo structuredmongo.Repository +func (r *deviceTokenRepo) GetAllByUserID(ctx context.Context, userID string) ([]*devicetokens.Document, error) { + f := bson.M{"userId": userID} + cursor, err := r.Find(ctx, f, nil) + if err != nil { + return nil, err + } + defer cursor.Close(ctx) + docs := make([]*devicetokens.Document, 0, cursor.RemainingBatchLength()) + if err := cursor.All(ctx, &docs); err != nil { + return nil, err + } + return docs, nil +} + // Upsert will create or update the given Config. func (r *deviceTokenRepo) Upsert(ctx context.Context, doc *devicetokens.Document) error { // The presence of UserID and TokenID should be enforced with a mongodb diff --git a/auth/store/test/device_token_repository.go b/auth/store/test/device_token_repository.go index 4847596895..dbb40d9200 100644 --- a/auth/store/test/device_token_repository.go +++ b/auth/store/test/device_token_repository.go @@ -9,6 +9,8 @@ import ( type DeviceTokenRepository struct { *authTest.DeviceTokenAccessor + Documents []*devicetokens.Document + Error error } func NewDeviceTokenRepository() *DeviceTokenRepository { @@ -21,6 +23,16 @@ func (r *DeviceTokenRepository) Expectations() { r.DeviceTokenAccessor.Expectations() } +func (r *DeviceTokenRepository) GetAllByUserID(ctx context.Context, userID string) ([]*devicetokens.Document, error) { + if r.Error != nil { + return nil, r.Error + } + if len(r.Documents) > 0 { + return r.Documents, nil + } + return nil, nil +} + func (r *DeviceTokenRepository) Upsert(ctx context.Context, doc *devicetokens.Document) error { return nil } diff --git a/devicetokens/devicetokens.go b/devicetokens/devicetokens.go index a8fb790a3d..721f110653 100644 --- a/devicetokens/devicetokens.go +++ b/devicetokens/devicetokens.go @@ -100,6 +100,7 @@ type AppleBlob []byte // Repository abstracts persistent storage for Token data. type Repository interface { + GetAllByUserID(ctx context.Context, userID string) ([]*Document, error) Upsert(ctx context.Context, doc *Document) error EnsureIndexes() error From 0ce49c19ed42a927e16787f350c1c38ff9cf0bfb Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Tue, 7 May 2024 08:45:22 -0600 Subject: [PATCH 05/54] adds the ability to retrieve device tokens to the auth client This functionality will be used by care partner processes to retrieve device tokens in order to send mobile device push notifications in response to care partner alerts being triggered. BACK-2554 --- auth/auth.go | 8 +++++ auth/client/client.go | 13 ++++++++ auth/client/client_test.go | 60 ++++++++++++++++++++++++++++++++++ auth/test/client.go | 2 ++ auth/test/external_accessor.go | 9 +++++ auth/test/mock.go | 54 ++++++++++++++++++++++++++++++ 6 files changed, 146 insertions(+) diff --git a/auth/auth.go b/auth/auth.go index e728beac54..976dbd1f43 100644 --- a/auth/auth.go +++ b/auth/auth.go @@ -3,6 +3,7 @@ package auth import ( "context" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/permission" "github.com/tidepool-org/platform/request" ) @@ -21,6 +22,7 @@ type Client interface { RestrictedTokenAccessor ExternalAccessor permission.Client + DeviceTokensClient } type ExternalAccessor interface { @@ -51,3 +53,9 @@ func ServerSessionTokenProviderFromContext(ctx context.Context) ServerSessionTok type contextKey string const serverSessionTokenProviderContextKey contextKey = "serverSessionTokenProvider" + +// DeviceTokensClient provides access to the tokens used to authenticate +// mobile device push notifications. +type DeviceTokensClient interface { + GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) +} diff --git a/auth/client/client.go b/auth/client/client.go index a4c8511a27..d29f6561ba 100644 --- a/auth/client/client.go +++ b/auth/client/client.go @@ -5,6 +5,7 @@ import ( "net/http" "github.com/tidepool-org/platform/auth" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" "github.com/tidepool-org/platform/page" @@ -308,6 +309,18 @@ func (c *Client) DeleteRestrictedToken(ctx context.Context, id string) error { return c.client.RequestData(ctx, http.MethodDelete, url, nil, nil, nil) } +// GetDeviceTokens belonging to a given user. +func (c *Client) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + ctx = log.NewContextWithLogger(ctx, c.logger) + url := c.client.ConstructURL("v1", "users", userID, "device_tokens") + tokens := []*devicetokens.DeviceToken{} + err := c.client.RequestData(ctx, http.MethodGet, url, nil, nil, &tokens) + if err != nil { + return nil, errors.Wrap(err, "Unable to request device token data") + } + return tokens, nil +} + type ConfigLoader interface { Load(*Config) error } diff --git a/auth/client/client_test.go b/auth/client/client_test.go index fbd9a6be14..23db62b316 100644 --- a/auth/client/client_test.go +++ b/auth/client/client_test.go @@ -2,6 +2,7 @@ package client_test import ( "context" + "encoding/json" "net/http" "time" @@ -14,6 +15,7 @@ import ( "github.com/tidepool-org/platform/auth" authClient "github.com/tidepool-org/platform/auth/client" authTest "github.com/tidepool-org/platform/auth/test" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/errors" errorsTest "github.com/tidepool-org/platform/errors/test" "github.com/tidepool-org/platform/log" @@ -472,6 +474,64 @@ var _ = Describe("Client", func() { }) }) }) + + Describe("GetDeviceTokens", func() { + var testUserID = "test-user-id" + var testUserIDBadResponse = "test-user-id-bad-response" + var testTokens = map[string]any{ + testUserID: []*devicetokens.DeviceToken{{ + Apple: &devicetokens.AppleDeviceToken{ + Token: []byte("blah"), + Environment: "sandbox", + }, + }}, + testUserIDBadResponse: []map[string]any{ + { + "Apple": "", + }, + }, + } + + It("returns a token", func() { + body, err := json.Marshal(testTokens[testUserID]) + Expect(err).To(Succeed()) + server.AppendHandlers( + CombineHandlers( + VerifyRequest("GET", "/v1/users/"+testUserID+"/device_tokens"), + RespondWith(http.StatusOK, body)), + ) + + tokens, err := client.GetDeviceTokens(ctx, testUserID) + Expect(err).To(Succeed()) + Expect(tokens).To(HaveLen(1)) + Expect([]byte(tokens[0].Apple.Token)).To(Equal([]byte("blah"))) + Expect(tokens[0].Apple.Environment).To(Equal("sandbox")) + }) + + It("returns an error when receiving malformed responses", func() { + body, err := json.Marshal(testTokens[testUserIDBadResponse]) + Expect(err).To(Succeed()) + server.AppendHandlers( + CombineHandlers( + VerifyRequest("GET", "/v1/users/"+testUserIDBadResponse+"/device_tokens"), + RespondWith(http.StatusOK, body)), + ) + + _, err = client.GetDeviceTokens(ctx, testUserIDBadResponse) + Expect(err).To(HaveOccurred()) + }) + + It("returns an error on non-200 responses", func() { + server.AppendHandlers( + CombineHandlers( + VerifyRequest("GET", "/v1/users/"+testUserID+"/device_tokens"), + RespondWith(http.StatusBadRequest, nil)), + ) + _, err := client.GetDeviceTokens(ctx, testUserID) + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("Unable to request device token data"))) + }) + }) }) }) }) diff --git a/auth/test/client.go b/auth/test/client.go index e500f69d34..9fba8f4e5c 100644 --- a/auth/test/client.go +++ b/auth/test/client.go @@ -4,6 +4,7 @@ type Client struct { *ProviderSessionAccessor *RestrictedTokenAccessor *ExternalAccessor + *DeviceTokensClient } func NewClient() *Client { @@ -11,6 +12,7 @@ func NewClient() *Client { ProviderSessionAccessor: NewProviderSessionAccessor(), RestrictedTokenAccessor: NewRestrictedTokenAccessor(), ExternalAccessor: NewExternalAccessor(), + DeviceTokensClient: NewDeviceTokensClient(), } } diff --git a/auth/test/external_accessor.go b/auth/test/external_accessor.go index 1916c1cf28..a7872e4c34 100644 --- a/auth/test/external_accessor.go +++ b/auth/test/external_accessor.go @@ -3,6 +3,7 @@ package test import ( "context" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/permission" "github.com/tidepool-org/platform/request" ) @@ -179,3 +180,11 @@ func (e *ExternalAccessor) GetUserPermissions(ctx context.Context, requestUserID } panic("GetUserPermissions no output") } + +func NewDeviceTokensClient() *DeviceTokensClient { return &DeviceTokensClient{} } + +type DeviceTokensClient struct{} + +func (c *DeviceTokensClient) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + return nil, nil +} diff --git a/auth/test/mock.go b/auth/test/mock.go index 0146c6bb9d..de0089e03b 100644 --- a/auth/test/mock.go +++ b/auth/test/mock.go @@ -11,6 +11,7 @@ import ( gomock "github.com/golang/mock/gomock" auth "github.com/tidepool-org/platform/auth" + devicetokens "github.com/tidepool-org/platform/devicetokens" page "github.com/tidepool-org/platform/page" permission "github.com/tidepool-org/platform/permission" request "github.com/tidepool-org/platform/request" @@ -168,6 +169,21 @@ func (mr *MockClientMockRecorder) EnsureAuthorizedUser(ctx, targetUserID, permis return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureAuthorizedUser", reflect.TypeOf((*MockClient)(nil).EnsureAuthorizedUser), ctx, targetUserID, permission) } +// GetDeviceTokens mocks base method. +func (m *MockClient) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDeviceTokens", ctx, userID) + ret0, _ := ret[0].([]*devicetokens.DeviceToken) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDeviceTokens indicates an expected call of GetDeviceTokens. +func (mr *MockClientMockRecorder) GetDeviceTokens(ctx, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeviceTokens", reflect.TypeOf((*MockClient)(nil).GetDeviceTokens), ctx, userID) +} + // GetProviderSession mocks base method. func (m *MockClient) GetProviderSession(ctx context.Context, id string) (*auth.ProviderSession, error) { m.ctrl.T.Helper() @@ -436,3 +452,41 @@ func (mr *MockServerSessionTokenProviderMockRecorder) ServerSessionToken() *gomo mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServerSessionToken", reflect.TypeOf((*MockServerSessionTokenProvider)(nil).ServerSessionToken)) } + +// MockDeviceTokensClient is a mock of DeviceTokensClient interface. +type MockDeviceTokensClient struct { + ctrl *gomock.Controller + recorder *MockDeviceTokensClientMockRecorder +} + +// MockDeviceTokensClientMockRecorder is the mock recorder for MockDeviceTokensClient. +type MockDeviceTokensClientMockRecorder struct { + mock *MockDeviceTokensClient +} + +// NewMockDeviceTokensClient creates a new mock instance. +func NewMockDeviceTokensClient(ctrl *gomock.Controller) *MockDeviceTokensClient { + mock := &MockDeviceTokensClient{ctrl: ctrl} + mock.recorder = &MockDeviceTokensClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDeviceTokensClient) EXPECT() *MockDeviceTokensClientMockRecorder { + return m.recorder +} + +// GetDeviceTokens mocks base method. +func (m *MockDeviceTokensClient) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDeviceTokens", ctx, userID) + ret0, _ := ret[0].([]*devicetokens.DeviceToken) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDeviceTokens indicates an expected call of GetDeviceTokens. +func (mr *MockDeviceTokensClientMockRecorder) GetDeviceTokens(ctx, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeviceTokens", reflect.TypeOf((*MockDeviceTokensClient)(nil).GetDeviceTokens), ctx, userID) +} From 8915bf30ccd617702fdd1580d9c730da76e277f7 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Tue, 7 May 2024 09:18:47 -0600 Subject: [PATCH 06/54] remove unused device tokens repo from data This was missed when moving device tokens from the data service to the auth service in commit a0f5a84. BACK-2554 --- data/service/api/v1/mocks/context.go | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/data/service/api/v1/mocks/context.go b/data/service/api/v1/mocks/context.go index d0ac5c33d2..86c804b906 100644 --- a/data/service/api/v1/mocks/context.go +++ b/data/service/api/v1/mocks/context.go @@ -10,7 +10,6 @@ import ( "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/data/service/context" - "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/permission" "github.com/tidepool-org/platform/request" servicecontext "github.com/tidepool-org/platform/service/context" @@ -23,13 +22,12 @@ type Context struct { T likeT // authDetails should be updated via the WithAuthDetails method. - authDetails *test.MockAuthDetails - RESTRequest *rest.Request - ResponseWriter rest.ResponseWriter - recorder *httptest.ResponseRecorder - MockAlertsRepository alerts.Repository - MockDeviceTokensRepository devicetokens.Repository - MockPermissionClient permission.Client + authDetails *test.MockAuthDetails + RESTRequest *rest.Request + ResponseWriter rest.ResponseWriter + recorder *httptest.ResponseRecorder + MockAlertsRepository alerts.Repository + MockPermissionClient permission.Client } func NewContext(t likeT, method, url string, body io.Reader) *Context { @@ -98,10 +96,6 @@ func (c *Context) AlertsRepository() alerts.Repository { return c.MockAlertsRepository } -func (c *Context) DeviceTokensRepository() devicetokens.Repository { - return c.MockDeviceTokensRepository -} - func (c *Context) PermissionClient() permission.Client { return c.MockPermissionClient } From b4115010f6134ae333eb99a3bdebd8d411c7fe5f Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Tue, 7 May 2024 10:51:10 -0600 Subject: [PATCH 07/54] adds a pusher client for sending APNs push notifications Basic steps are taken to allow for other push notification services to be easily added in the future. BACK-2554 --- go.mod | 1 + go.sum | 9 + push/push.go | 132 ++++++ push/push_suite_test.go | 11 + push/push_test.go | 147 +++++++ vendor/github.com/sideshow/apns2/.gitignore | 31 ++ vendor/github.com/sideshow/apns2/LICENSE | 22 + vendor/github.com/sideshow/apns2/README.md | 216 ++++++++++ vendor/github.com/sideshow/apns2/client.go | 238 +++++++++++ .../sideshow/apns2/client_manager.go | 162 +++++++ .../github.com/sideshow/apns2/notification.go | 148 +++++++ .../sideshow/apns2/payload/builder.go | 402 ++++++++++++++++++ vendor/github.com/sideshow/apns2/response.go | 156 +++++++ .../github.com/sideshow/apns2/token/token.go | 107 +++++ vendor/modules.txt | 5 + 15 files changed, 1787 insertions(+) create mode 100644 push/push.go create mode 100644 push/push_suite_test.go create mode 100644 push/push_test.go create mode 100644 vendor/github.com/sideshow/apns2/.gitignore create mode 100644 vendor/github.com/sideshow/apns2/LICENSE create mode 100644 vendor/github.com/sideshow/apns2/README.md create mode 100644 vendor/github.com/sideshow/apns2/client.go create mode 100644 vendor/github.com/sideshow/apns2/client_manager.go create mode 100644 vendor/github.com/sideshow/apns2/notification.go create mode 100644 vendor/github.com/sideshow/apns2/payload/builder.go create mode 100644 vendor/github.com/sideshow/apns2/response.go create mode 100644 vendor/github.com/sideshow/apns2/token/token.go diff --git a/go.mod b/go.mod index 646c4bb7d0..d73cc096dd 100644 --- a/go.mod +++ b/go.mod @@ -21,6 +21,7 @@ require ( github.com/onsi/gomega v1.33.1 github.com/prometheus/client_golang v1.19.1 github.com/rinchsan/device-check-go v1.3.0 + github.com/sideshow/apns2 v0.23.0 github.com/tidepool-org/clinic/client v0.0.0-20240926112325-657da308fce2 github.com/tidepool-org/devices/api v0.0.0-20240806072455-2b18f22c9cf5 github.com/tidepool-org/go-common v0.12.2-0.20250129210214-bd36b59b9733 diff --git a/go.sum b/go.sum index 295990bf55..f984743dae 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,8 @@ github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbi github.com/IBM/sarama v1.43.2 h1:HABeEqRUh32z8yzY2hGB/j8mHSzC/HA9zlEjqFNCzSw= github.com/IBM/sarama v1.43.2/go.mod h1:Kyo4WkF24Z+1nz7xeVUFWIuKVV8RS3wM8mkvPKMdXFQ= github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20201120081800-1786d5ef83d4/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/ant0ine/go-json-rest v3.3.2+incompatible h1:nBixrkLFiDNAW0hauKDLc8yJI6XfrQumWvytE1Hk14E= github.com/ant0ine/go-json-rest v3.3.2+incompatible/go.mod h1:q6aCt0GfU6LhpBsnZ/2U+mwe+0XB5WStbmwyoPfc+sk= github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= @@ -53,6 +55,7 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= @@ -162,6 +165,8 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sideshow/apns2 v0.23.0 h1:lpkikaZ995GIcKk6AFsYzHyezCrsrfEDvUWcWkEGErY= +github.com/sideshow/apns2 v0.23.0/go.mod h1:7Fceu+sL0XscxrfLSkAoH6UtvKefq3Kq1n4W3ayQZqE= github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -218,6 +223,7 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20170512130425-ab89591268e0/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -238,6 +244,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20220403103023-749bd193bc2b/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= @@ -259,6 +266,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -300,6 +308,7 @@ google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/push/push.go b/push/push.go new file mode 100644 index 0000000000..419cd395b3 --- /dev/null +++ b/push/push.go @@ -0,0 +1,132 @@ +// Package push provides clients for sending mobile device push notifications. +package push + +import ( + "context" + "encoding/hex" + "net/http" + "sync" + + "github.com/sideshow/apns2" + "github.com/sideshow/apns2/payload" + "github.com/sideshow/apns2/token" + + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/log" +) + +// Notification models a provider-independent push notification. +type Notification struct { + Message string +} + +// APNSPusher implements push notifications via Apple APNs. +type APNSPusher struct { + BundleID string + + client APNS2Client + clientMu sync.Mutex +} + +// NewAPNSPusher creates a Pusher for sending device notifications via Apple's +// APNs. +func NewAPNSPusher(client APNS2Client, bundleID string) *APNSPusher { + return &APNSPusher{ + BundleID: bundleID, + client: client, + } +} + +// NewAPNSPusherFromKeyData creates an APNSPusher for sending device +// notifications via Apple's APNs. +// +// The signingKey is the raw token signing key received from Apple (.p8 file +// containing PEM-encoded private key), along with its respective team id, key +// id, and application bundle id. +// +// https://developer.apple.com/documentation/usernotifications/sending-notification-requests-to-apns +func NewAPNSPusherFromKeyData(signingKey []byte, keyID, teamID, bundleID string) (*APNSPusher, error) { + authKey, err := token.AuthKeyFromBytes(signingKey) + if err != nil { + return nil, err + } + token := &token.Token{ + AuthKey: authKey, + KeyID: keyID, + TeamID: teamID, + } + client := &apns2Client{Client: apns2.NewTokenClient(token)} + return NewAPNSPusher(client, bundleID), nil +} + +func (p *APNSPusher) Push(ctx context.Context, deviceToken *devicetokens.DeviceToken, note *Notification) error { + if deviceToken.Apple == nil { + return errors.New("Unable to push notification: APNSPusher can only use Apple device tokens but the Apple token is nil") + } + + hexToken := hex.EncodeToString(deviceToken.Apple.Token) + appleNote := p.buildAppleNotification(hexToken, note) + resp, err := p.safePush(ctx, deviceToken.Apple.Environment, appleNote) + if err != nil { + return errors.Wrap(err, "Unable to push notification") + } + if resp.StatusCode != http.StatusOK { + return errors.Newf("Unable to push notification: APNs returned non-200 status: %d, %s", resp.StatusCode, resp.Reason) + } + if logger := log.LoggerFromContext(ctx); logger != nil { + logger.WithFields(log.Fields{ + "apnsID": resp.ApnsID, + }).Info("notification pushed") + } + + return nil +} + +// safePush guards the environment setup and push method with a mutex. +// +// This prevents the environment from being changed out from under +// you. Unlikely, but better safe than sorry. +func (p *APNSPusher) safePush(ctx context.Context, env string, note *apns2.Notification) (*apns2.Response, error) { + p.clientMu.Lock() + defer p.clientMu.Unlock() + if env == devicetokens.AppleEnvProduction { + p.client.Production() + } else { + p.client.Development() + } + return p.client.PushWithContext(ctx, note) +} + +func (p *APNSPusher) buildAppleNotification(hexToken string, note *Notification) *apns2.Notification { + payload := payload.NewPayload(). + Alert(note.Message). + AlertBody(note.Message) + return &apns2.Notification{ + DeviceToken: hexToken, + Payload: payload, + Topic: p.BundleID, + } +} + +// APNS2Client abstracts the apns2 library for easier testing. +type APNS2Client interface { + Development() APNS2Client + Production() APNS2Client + PushWithContext(apns2.Context, *apns2.Notification) (*apns2.Response, error) +} + +// apns2Client adapts the apns2.Client to APNS2Client so it can be replaced for testing. +type apns2Client struct { + *apns2.Client +} + +func (c apns2Client) Development() APNS2Client { + d := c.Client.Development() + return &apns2Client{Client: d} +} + +func (c apns2Client) Production() APNS2Client { + p := c.Client.Production() + return &apns2Client{Client: p} +} diff --git a/push/push_suite_test.go b/push/push_suite_test.go new file mode 100644 index 0000000000..a5b73e9d49 --- /dev/null +++ b/push/push_suite_test.go @@ -0,0 +1,11 @@ +package push + +import ( + "testing" + + "github.com/tidepool-org/platform/test" +) + +func TestSuite(t *testing.T) { + test.Test(t) +} diff --git a/push/push_test.go b/push/push_test.go new file mode 100644 index 0000000000..5922f85e25 --- /dev/null +++ b/push/push_test.go @@ -0,0 +1,147 @@ +package push + +import ( + "context" + "fmt" + "net/http" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/sideshow/apns2" + + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/log" + testlog "github.com/tidepool-org/platform/log/test" +) + +const ( + testBundleID = "test-bundle-id" +) + +var ( + testDeviceToken []byte = []byte("dGVzdGluZyAxIDIgMw==") +) + +type pushTestDeps struct { + Client *mockAPNS2Client + Token *devicetokens.DeviceToken + Notification *Notification +} + +func testDeps() (context.Context, *APNSPusher, *pushTestDeps) { + ctx := context.Background() + mockClient := &mockAPNS2Client{ + Response: &apns2.Response{ + StatusCode: http.StatusOK, + }, + } + pusher := NewAPNSPusher(mockClient, testBundleID) + deps := &pushTestDeps{ + Client: mockClient, + Token: &devicetokens.DeviceToken{ + Apple: &devicetokens.AppleDeviceToken{ + Token: testDeviceToken, + }, + }, + Notification: &Notification{}, + } + return ctx, pusher, deps +} + +var _ = Describe("APNSPusher", func() { + Describe("Push", func() { + It("requires an Apple token", func() { + ctx, pusher, deps := testDeps() + deps.Token.Apple = nil + + err := pusher.Push(ctx, deps.Token, deps.Notification) + + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("can only use Apple device tokens"))) + }) + + Context("its environment", func() { + + for _, env := range []string{devicetokens.AppleEnvProduction, devicetokens.AppleEnvSandbox} { + It("is set via its token", func() { + ctx, pusher, deps := testDeps() + deps.Token.Apple.Environment = env + + err := pusher.Push(ctx, deps.Token, deps.Notification) + + Expect(err).To(Succeed()) + // This is reaching into the implementation of + // APNS2Client, but there's no other way to test this. + Expect(deps.Client.Env).To(Equal(env)) + }) + } + }) + + It("reports upstream errors", func() { + ctx, pusher, deps := testDeps() + deps.Client.Error = fmt.Errorf("test error") + + err := pusher.Push(ctx, deps.Token, deps.Notification) + + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("test error"))) + }) + + Context("when a logger is available", func() { + It("logs", func() { + ctx, pusher, deps := testDeps() + testLogger := testlog.NewLogger() + ctx = log.NewContextWithLogger(ctx, testLogger) + deps.Client.Response = &apns2.Response{ + StatusCode: http.StatusOK, + ApnsID: "test-id", + } + + err := pusher.Push(ctx, deps.Token, deps.Notification) + + Expect(err).To(Succeed()) + testLogger.AssertInfo("notification pushed", log.Fields{ + "apnsID": "test-id", + }) + }) + }) + + It("reports non-200 responses as errors", func() { + ctx, pusher, deps := testDeps() + deps.Client.Response = &apns2.Response{ + StatusCode: http.StatusBadRequest, + } + + err := pusher.Push(ctx, deps.Token, deps.Notification) + + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("APNs returned non-200 status"))) + }) + }) +}) + +type mockAPNS2Client struct { + Response *apns2.Response + Error error + Env string +} + +func (c *mockAPNS2Client) Development() APNS2Client { + c.Env = devicetokens.AppleEnvSandbox + return c +} + +func (c *mockAPNS2Client) Production() APNS2Client { + c.Env = devicetokens.AppleEnvProduction + return c +} + +func (c *mockAPNS2Client) PushWithContext(_ apns2.Context, _ *apns2.Notification) (*apns2.Response, error) { + if c.Error != nil { + return nil, c.Error + } + if c.Response != nil { + return c.Response, nil + } + return nil, nil +} diff --git a/vendor/github.com/sideshow/apns2/.gitignore b/vendor/github.com/sideshow/apns2/.gitignore new file mode 100644 index 0000000000..5b77d5d22e --- /dev/null +++ b/vendor/github.com/sideshow/apns2/.gitignore @@ -0,0 +1,31 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +/*.p12 +/*.pem +/*.cer +/*.p8 + +.DS_Store \ No newline at end of file diff --git a/vendor/github.com/sideshow/apns2/LICENSE b/vendor/github.com/sideshow/apns2/LICENSE new file mode 100644 index 0000000000..59abbcf40e --- /dev/null +++ b/vendor/github.com/sideshow/apns2/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 Adam Jones + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/sideshow/apns2/README.md b/vendor/github.com/sideshow/apns2/README.md new file mode 100644 index 0000000000..32e04190ce --- /dev/null +++ b/vendor/github.com/sideshow/apns2/README.md @@ -0,0 +1,216 @@ +# APNS/2 + +APNS/2 is a go package designed for simple, flexible and fast Apple Push Notifications on iOS, OSX and Safari using the new HTTP/2 Push provider API. + +[![Build Status](https://github.com/sideshow/apns2/actions/workflows/tests.yml/badge.svg)](https://github.com/sideshow/apns2/actions/workflows/tests.yml) [![Coverage Status](https://coveralls.io/repos/sideshow/apns2/badge.svg?branch=master&service=github)](https://coveralls.io/github/sideshow/apns2?branch=master) [![GoDoc](https://godoc.org/github.com/sideshow/apns2?status.svg)](https://godoc.org/github.com/sideshow/apns2) + +## Features + +- Uses new Apple APNs HTTP/2 connection +- Fast - See [notes on speed](https://github.com/sideshow/apns2/wiki/APNS-HTTP-2-Push-Speed) +- Works with go 1.7 and later +- Supports new Apple Token Based Authentication (JWT) +- Supports new iOS 10 features such as Collapse IDs, Subtitles and Mutable Notifications +- Supports new iOS 15 features interruptionLevel and relevanceScore +- Supports persistent connections to APNs +- Supports VoIP/PushKit notifications (iOS 8 and later) +- Modular & easy to use +- Tested and working in APNs production environment + +## Install + +- Make sure you have [Go](https://golang.org/doc/install) installed and have set your [GOPATH](https://golang.org/doc/code.html#GOPATH). +- Install apns2: + +```sh +go get -u github.com/sideshow/apns2 +``` + +If you are running the test suite you will also need to install testify: + +```sh +go get -u github.com/stretchr/testify +``` + +## Example + +```go +package main + +import ( + "log" + "fmt" + + "github.com/sideshow/apns2" + "github.com/sideshow/apns2/certificate" +) + +func main() { + + cert, err := certificate.FromP12File("../cert.p12", "") + if err != nil { + log.Fatal("Cert Error:", err) + } + + notification := &apns2.Notification{} + notification.DeviceToken = "11aa01229f15f0f0c52029d8cf8cd0aeaf2365fe4cebc4af26cd6d76b7919ef7" + notification.Topic = "com.sideshow.Apns2" + notification.Payload = []byte(`{"aps":{"alert":"Hello!"}}`) // See Payload section below + + // If you want to test push notifications for builds running directly from XCode (Development), use + // client := apns2.NewClient(cert).Development() + // For apps published to the app store or installed as an ad-hoc distribution use Production() + + client := apns2.NewClient(cert).Production() + res, err := client.Push(notification) + + if err != nil { + log.Fatal("Error:", err) + } + + fmt.Printf("%v %v %v\n", res.StatusCode, res.ApnsID, res.Reason) +} +``` + +## JWT Token Example + +Instead of using a `.p12` or `.pem` certificate as above, you can optionally use +APNs JWT _Provider Authentication Tokens_. First you will need a signing key (`.p8` file), Key ID and Team ID [from Apple](http://help.apple.com/xcode/mac/current/#/dev54d690a66). Once you have these details, you can create a new client: + +```go +authKey, err := token.AuthKeyFromFile("../AuthKey_XXX.p8") +if err != nil { + log.Fatal("token error:", err) +} + +token := &token.Token{ + AuthKey: authKey, + // KeyID from developer account (Certificates, Identifiers & Profiles -> Keys) + KeyID: "ABC123DEFG", + // TeamID from developer account (View Account -> Membership) + TeamID: "DEF123GHIJ", +} +... + +client := apns2.NewTokenClient(token) +res, err := client.Push(notification) +``` + +- You can use one APNs signing key to authenticate tokens for multiple apps. +- A signing key works for both the development and production environments. +- A signing key doesn’t expire but can be revoked. + +## Notification + +At a minimum, a _Notification_ needs a _DeviceToken_, a _Topic_ and a _Payload_. + +```go +notification := &apns2.Notification{ + DeviceToken: "11aa01229f15f0f0c52029d8cf8cd0aeaf2365fe4cebc4af26cd6d76b7919ef7", + Topic: "com.sideshow.Apns2", + Payload: []byte(`{"aps":{"alert":"Hello!"}}`), +} +``` + +You can also set an optional _ApnsID_, _Expiration_ or _Priority_. + +```go +notification.ApnsID = "40636A2C-C093-493E-936A-2A4333C06DEA" +notification.Expiration = time.Now() +notification.Priority = apns2.PriorityLow +``` + +## Payload + +You can use raw bytes for the `notification.Payload` as above, or you can use the payload builder package which makes it easy to construct APNs payloads. + +```go +// {"aps":{"alert":"hello","badge":1},"key":"val"} + +payload := payload.NewPayload().Alert("hello").Badge(1).Custom("key", "val") + +notification.Payload = payload +client.Push(notification) +``` + +Refer to the [payload](https://godoc.org/github.com/sideshow/apns2/payload) docs for more info. + +## Response, Error handling + +APNS/2 draws the distinction between a valid response from Apple indicating whether or not the _Notification_ was sent or not, and an unrecoverable or unexpected _Error_; + +- An `Error` is returned if a non-recoverable error occurs, i.e. if there is a problem with the underlying _http.Client_ connection or _Certificate_, the payload was not sent, or a valid _Response_ was not received. +- A `Response` is returned if the payload was successfully sent to Apple and a documented response was received. This struct will contain more information about whether or not the push notification succeeded, its _apns-id_ and if applicable, more information around why it did not succeed. + +To check if a `Notification` was successfully sent; + +```go +res, err := client.Push(notification) +if err != nil { + log.Println("There was an error", err) + return +} + +if res.Sent() { + log.Println("Sent:", res.ApnsID) +} else { + fmt.Printf("Not Sent: %v %v %v\n", res.StatusCode, res.ApnsID, res.Reason) +} +``` + +## Context & Timeouts + +For better control over request cancellations and timeouts APNS/2 supports +contexts. Using a context can be helpful if you want to cancel all pushes when +the parent process is cancelled, or need finer grained control over individual +push timeouts. See the [Google post](https://blog.golang.org/context) for more +information on contexts. + +```go +ctx, cancel = context.WithTimeout(context.Background(), 10 * time.Second) +res, err := client.PushWithContext(ctx, notification) +defer cancel() +``` + +## Speed & Performance + +Also see the wiki page on [APNS HTTP 2 Push Speed](https://github.com/sideshow/apns2/wiki/APNS-HTTP-2-Push-Speed). + +For best performance, you should hold on to an `apns2.Client` instance and not re-create it every push. The underlying TLS connection itself can take a few seconds to connect and negotiate, so if you are setting up an `apns2.Client` and tearing it down every push, then this will greatly affect performance. (Apple suggest keeping the connection open all the time). + +You should also limit the amount of `apns2.Client` instances. The underlying transport has a http connection pool itself, so a single client instance will be enough for most users (One instance can potentially do 4,000+ pushes per second). If you need more than this then one instance per CPU core is a good starting point. + +Speed is greatly affected by the location of your server and the quality of your network connection. If you're just testing locally, behind a proxy or if your server is outside USA then you're not going to get great performance. With a good server located in AWS, you should be able to get [decent throughput](https://github.com/sideshow/apns2/wiki/APNS-HTTP-2-Push-Speed). + +## Command line tool + +APNS/2 has a command line tool that can be installed with `go get github.com/sideshow/apns2/apns2`. Usage: + +``` +apns2 --help +usage: apns2 --certificate-path=CERTIFICATE-PATH --topic=TOPIC [] + +Listens to STDIN to send notifications and writes APNS response code and reason to STDOUT. + +The expected format is: +Example: aff0c63d9eaa63ad161bafee732d5bc2c31f66d552054718ff19ce314371e5d0 {"aps": {"alert": "hi"}} +Flags: + --help Show context-sensitive help (also try --help-long and --help-man). + -c, --certificate-path=CERTIFICATE-PATH + Path to certificate file. + -t, --topic=TOPIC The topic of the remote notification, which is typically the bundle ID for your app + -m, --mode="production" APNS server to send notifications to. `production` or `development`. Defaults to `production` + --version Show application version. +``` + +## License + +The MIT License (MIT) + +Copyright (c) 2016 Adam Jones + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/sideshow/apns2/client.go b/vendor/github.com/sideshow/apns2/client.go new file mode 100644 index 0000000000..cd98dd4228 --- /dev/null +++ b/vendor/github.com/sideshow/apns2/client.go @@ -0,0 +1,238 @@ +// Package apns2 is a go Apple Push Notification Service (APNs) provider that +// allows you to send remote notifications to your iOS, tvOS, and OS X +// apps, using the new APNs HTTP/2 network protocol. +package apns2 + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "io" + "net" + "net/http" + "strconv" + "time" + + "github.com/sideshow/apns2/token" + "golang.org/x/net/http2" +) + +// Apple HTTP/2 Development & Production urls +const ( + HostDevelopment = "https://api.sandbox.push.apple.com" + HostProduction = "https://api.push.apple.com" +) + +// DefaultHost is a mutable var for testing purposes +var DefaultHost = HostDevelopment + +var ( + // HTTPClientTimeout specifies a time limit for requests made by the + // HTTPClient. The timeout includes connection time, any redirects, + // and reading the response body. + HTTPClientTimeout = 60 * time.Second + + // ReadIdleTimeout is the timeout after which a health check using a ping + // frame will be carried out if no frame is received on the connection. If + // zero, no health check is performed. + ReadIdleTimeout = 15 * time.Second + + // TCPKeepAlive specifies the keep-alive period for an active network + // connection. If zero, keep-alive probes are sent with a default value + // (currently 15 seconds) + TCPKeepAlive = 15 * time.Second + + // TLSDialTimeout is the maximum amount of time a dial will wait for a connect + // to complete. + TLSDialTimeout = 20 * time.Second +) + +// DialTLS is the default dial function for creating TLS connections for +// non-proxied HTTPS requests. +var DialTLS = func(network, addr string, cfg *tls.Config) (net.Conn, error) { + dialer := &net.Dialer{ + Timeout: TLSDialTimeout, + KeepAlive: TCPKeepAlive, + } + return tls.DialWithDialer(dialer, network, addr, cfg) +} + +// Client represents a connection with the APNs +type Client struct { + Host string + Certificate tls.Certificate + Token *token.Token + HTTPClient *http.Client +} + +// A Context carries a deadline, a cancellation signal, and other values across +// API boundaries. Context's methods may be called by multiple goroutines +// simultaneously. +type Context interface { + context.Context +} + +type connectionCloser interface { + CloseIdleConnections() +} + +// NewClient returns a new Client with an underlying http.Client configured with +// the correct APNs HTTP/2 transport settings. It does not connect to the APNs +// until the first Notification is sent via the Push method. +// +// As per the Apple APNs Provider API, you should keep a handle on this client +// so that you can keep your connections with APNs open across multiple +// notifications; don’t repeatedly open and close connections. APNs treats rapid +// connection and disconnection as a denial-of-service attack. +// +// If your use case involves multiple long-lived connections, consider using +// the ClientManager, which manages clients for you. +func NewClient(certificate tls.Certificate) *Client { + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{certificate}, + } + if len(certificate.Certificate) > 0 { + tlsConfig.BuildNameToCertificate() + } + transport := &http2.Transport{ + TLSClientConfig: tlsConfig, + DialTLS: DialTLS, + ReadIdleTimeout: ReadIdleTimeout, + } + return &Client{ + HTTPClient: &http.Client{ + Transport: transport, + Timeout: HTTPClientTimeout, + }, + Certificate: certificate, + Host: DefaultHost, + } +} + +// NewTokenClient returns a new Client with an underlying http.Client configured +// with the correct APNs HTTP/2 transport settings. It does not connect to the APNs +// until the first Notification is sent via the Push method. +// +// As per the Apple APNs Provider API, you should keep a handle on this client +// so that you can keep your connections with APNs open across multiple +// notifications; don’t repeatedly open and close connections. APNs treats rapid +// connection and disconnection as a denial-of-service attack. +func NewTokenClient(token *token.Token) *Client { + transport := &http2.Transport{ + DialTLS: DialTLS, + ReadIdleTimeout: ReadIdleTimeout, + } + return &Client{ + Token: token, + HTTPClient: &http.Client{ + Transport: transport, + Timeout: HTTPClientTimeout, + }, + Host: DefaultHost, + } +} + +// Development sets the Client to use the APNs development push endpoint. +func (c *Client) Development() *Client { + c.Host = HostDevelopment + return c +} + +// Production sets the Client to use the APNs production push endpoint. +func (c *Client) Production() *Client { + c.Host = HostProduction + return c +} + +// Push sends a Notification to the APNs gateway. If the underlying http.Client +// is not currently connected, this method will attempt to reconnect +// transparently before sending the notification. It will return a Response +// indicating whether the notification was accepted or rejected by the APNs +// gateway, or an error if something goes wrong. +// +// Use PushWithContext if you need better cancellation and timeout control. +func (c *Client) Push(n *Notification) (*Response, error) { + return c.PushWithContext(context.Background(), n) +} + +// PushWithContext sends a Notification to the APNs gateway. Context carries a +// deadline and a cancellation signal and allows you to close long running +// requests when the context timeout is exceeded. Context can be nil, for +// backwards compatibility. +// +// If the underlying http.Client is not currently connected, this method will +// attempt to reconnect transparently before sending the notification. It will +// return a Response indicating whether the notification was accepted or +// rejected by the APNs gateway, or an error if something goes wrong. +func (c *Client) PushWithContext(ctx Context, n *Notification) (*Response, error) { + payload, err := json.Marshal(n) + if err != nil { + return nil, err + } + + url := c.Host + "/3/device/" + n.DeviceToken + request, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(payload)) + if err != nil { + return nil, err + } + + if c.Token != nil { + c.setTokenHeader(request) + } + + setHeaders(request, n) + + response, err := c.HTTPClient.Do(request) + if err != nil { + return nil, err + } + defer response.Body.Close() + + r := &Response{} + r.StatusCode = response.StatusCode + r.ApnsID = response.Header.Get("apns-id") + + decoder := json.NewDecoder(response.Body) + if err := decoder.Decode(r); err != nil && err != io.EOF { + return &Response{}, err + } + return r, nil +} + +// CloseIdleConnections closes any underlying connections which were previously +// connected from previous requests but are now sitting idle. It will not +// interrupt any connections currently in use. +func (c *Client) CloseIdleConnections() { + c.HTTPClient.Transport.(connectionCloser).CloseIdleConnections() +} + +func (c *Client) setTokenHeader(r *http.Request) { + bearer := c.Token.GenerateIfExpired() + r.Header.Set("authorization", "bearer "+bearer) +} + +func setHeaders(r *http.Request, n *Notification) { + r.Header.Set("Content-Type", "application/json; charset=utf-8") + if n.Topic != "" { + r.Header.Set("apns-topic", n.Topic) + } + if n.ApnsID != "" { + r.Header.Set("apns-id", n.ApnsID) + } + if n.CollapseID != "" { + r.Header.Set("apns-collapse-id", n.CollapseID) + } + if n.Priority > 0 { + r.Header.Set("apns-priority", strconv.Itoa(n.Priority)) + } + if !n.Expiration.IsZero() { + r.Header.Set("apns-expiration", strconv.FormatInt(n.Expiration.Unix(), 10)) + } + if n.PushType != "" { + r.Header.Set("apns-push-type", string(n.PushType)) + } else { + r.Header.Set("apns-push-type", string(PushTypeAlert)) + } + +} diff --git a/vendor/github.com/sideshow/apns2/client_manager.go b/vendor/github.com/sideshow/apns2/client_manager.go new file mode 100644 index 0000000000..bb4bdf900d --- /dev/null +++ b/vendor/github.com/sideshow/apns2/client_manager.go @@ -0,0 +1,162 @@ +package apns2 + +import ( + "container/list" + "crypto/sha1" + "crypto/tls" + "sync" + "time" +) + +type managerItem struct { + key [sha1.Size]byte + client *Client + lastUsed time.Time +} + +// ClientManager is a way to manage multiple connections to the APNs. +type ClientManager struct { + // MaxSize is the maximum number of clients allowed in the manager. When + // this limit is reached, the least recently used client is evicted. Set + // zero for no limit. + MaxSize int + + // MaxAge is the maximum age of clients in the manager. Upon retrieval, if + // a client has remained unused in the manager for this duration or longer, + // it is evicted and nil is returned. Set zero to disable this + // functionality. + MaxAge time.Duration + + // Factory is the function which constructs clients if not found in the + // manager. + Factory func(certificate tls.Certificate) *Client + + cache map[[sha1.Size]byte]*list.Element + ll *list.List + mu sync.Mutex + once sync.Once +} + +// NewClientManager returns a new ClientManager for prolonged, concurrent usage +// of multiple APNs clients. ClientManager is flexible enough to work best for +// your use case. When a client is not found in the manager, Get will return +// the result of calling Factory, which can be a Client or nil. +// +// Having multiple clients per certificate in the manager is not allowed. +// +// By default, MaxSize is 64, MaxAge is 10 minutes, and Factory always returns +// a Client with default options. +func NewClientManager() *ClientManager { + manager := &ClientManager{ + MaxSize: 64, + MaxAge: 10 * time.Minute, + Factory: NewClient, + } + + manager.initInternals() + + return manager +} + +// Add adds a Client to the manager. You can use this to individually configure +// Clients in the manager. +func (m *ClientManager) Add(client *Client) { + m.initInternals() + m.mu.Lock() + defer m.mu.Unlock() + + key := cacheKey(client.Certificate) + now := time.Now() + if ele, hit := m.cache[key]; hit { + item := ele.Value.(*managerItem) + item.client = client + item.lastUsed = now + m.ll.MoveToFront(ele) + return + } + ele := m.ll.PushFront(&managerItem{key, client, now}) + m.cache[key] = ele + if m.MaxSize != 0 && m.ll.Len() > m.MaxSize { + m.mu.Unlock() + m.removeOldest() + m.mu.Lock() + } +} + +// Get gets a Client from the manager. If a Client is not found in the manager +// or if a Client has remained in the manager longer than MaxAge, Get will call +// the ClientManager's Factory function, store the result in the manager if +// non-nil, and return it. +func (m *ClientManager) Get(certificate tls.Certificate) *Client { + m.initInternals() + m.mu.Lock() + defer m.mu.Unlock() + + key := cacheKey(certificate) + now := time.Now() + if ele, hit := m.cache[key]; hit { + item := ele.Value.(*managerItem) + if m.MaxAge != 0 && item.lastUsed.Before(now.Add(-m.MaxAge)) { + c := m.Factory(certificate) + if c == nil { + return nil + } + item.client = c + } + item.lastUsed = now + m.ll.MoveToFront(ele) + return item.client + } + + c := m.Factory(certificate) + if c == nil { + return nil + } + m.mu.Unlock() + m.Add(c) + m.mu.Lock() + return c +} + +// Len returns the current size of the ClientManager. +func (m *ClientManager) Len() int { + if m.cache == nil { + return 0 + } + m.mu.Lock() + defer m.mu.Unlock() + return m.ll.Len() +} + +func (m *ClientManager) initInternals() { + m.once.Do(func() { + m.cache = map[[sha1.Size]byte]*list.Element{} + m.ll = list.New() + }) +} + +func (m *ClientManager) removeOldest() { + m.mu.Lock() + ele := m.ll.Back() + m.mu.Unlock() + if ele != nil { + m.removeElement(ele) + } +} + +func (m *ClientManager) removeElement(e *list.Element) { + m.mu.Lock() + defer m.mu.Unlock() + m.ll.Remove(e) + delete(m.cache, e.Value.(*managerItem).key) +} + +func cacheKey(certificate tls.Certificate) [sha1.Size]byte { + var data []byte + + for _, cert := range certificate.Certificate { + data = append(data, cert...) + } + + return sha1.Sum(data) +} diff --git a/vendor/github.com/sideshow/apns2/notification.go b/vendor/github.com/sideshow/apns2/notification.go new file mode 100644 index 0000000000..69bf312de5 --- /dev/null +++ b/vendor/github.com/sideshow/apns2/notification.go @@ -0,0 +1,148 @@ +package apns2 + +import ( + "encoding/json" + "time" +) + +// EPushType defines the value for the apns-push-type header +type EPushType string + +const ( + // PushTypeAlert is used for notifications that trigger a user interaction — + // for example, an alert, badge, or sound. If you set this push type, the + // topic field must use your app’s bundle ID as the topic. If the + // notification requires immediate action from the user, set notification + // priority to 10; otherwise use 5. The alert push type is required on + // watchOS 6 and later. It is recommended on macOS, iOS, tvOS, and iPadOS. + PushTypeAlert EPushType = "alert" + + // PushTypeBackground is used for notifications that deliver content in the + // background, and don’t trigger any user interactions. If you set this push + // type, the topic field must use your app’s bundle ID as the topic. Always + // use priority 5. Using priority 10 is an error. The background push type + // is required on watchOS 6 and later. It is recommended on macOS, iOS, + // tvOS, and iPadOS. + PushTypeBackground EPushType = "background" + + // PushTypeLocation is used for notifications that request a user’s + // location. If you set this push type, the topic field must use your app’s + // bundle ID with .location-query appended to the end. The location push + // type is recommended for iOS and iPadOS. It isn’t available on macOS, + // tvOS, and watchOS. If the location query requires an immediate response + // from the Location Push Service Extension, set notification apns-priority + // to 10; otherwise, use 5. The location push type supports only token-based + // authentication. + PushTypeLocation EPushType = "location" + + // PushTypeVOIP is used for notifications that provide information about an + // incoming Voice-over-IP (VoIP) call. If you set this push type, the topic + // field must use your app’s bundle ID with .voip appended to the end. If + // you’re using certificate-based authentication, you must also register the + // certificate for VoIP services. The voip push type is not available on + // watchOS. It is recommended on macOS, iOS, tvOS, and iPadOS. + PushTypeVOIP EPushType = "voip" + + // PushTypeComplication is used for notifications that contain update + // information for a watchOS app’s complications. If you set this push type, + // the topic field must use your app’s bundle ID with .complication appended + // to the end. If you’re using certificate-based authentication, you must + // also register the certificate for WatchKit services. The complication + // push type is recommended for watchOS and iOS. It is not available on + // macOS, tvOS, and iPadOS. + PushTypeComplication EPushType = "complication" + + // PushTypeFileProvider is used to signal changes to a File Provider + // extension. If you set this push type, the topic field must use your app’s + // bundle ID with .pushkit.fileprovider appended to the end. The + // fileprovider push type is not available on watchOS. It is recommended on + // macOS, iOS, tvOS, and iPadOS. + PushTypeFileProvider EPushType = "fileprovider" + + // PushTypeMDM is used for notifications that tell managed devices to + // contact the MDM server. If you set this push type, you must use the topic + // from the UID attribute in the subject of your MDM push certificate. + PushTypeMDM EPushType = "mdm" +) + +const ( + // PriorityLow will tell APNs to send the push message at a time that takes + // into account power considerations for the device. Notifications with this + // priority might be grouped and delivered in bursts. They are throttled, + // and in some cases are not delivered. + PriorityLow = 5 + + // PriorityHigh will tell APNs to send the push message immediately. + // Notifications with this priority must trigger an alert, sound, or badge + // on the target device. It is an error to use this priority for a push + // notification that contains only the content-available key. + PriorityHigh = 10 +) + +// Notification represents the the data and metadata for a APNs Remote Notification. +type Notification struct { + + // An optional canonical UUID that identifies the notification. The + // canonical form is 32 lowercase hexadecimal digits, displayed in five + // groups separated by hyphens in the form 8-4-4-4-12. An example UUID is as + // follows: + // + // 123e4567-e89b-12d3-a456-42665544000 + // + // If you don't set this, a new UUID is created by APNs and returned in the + // response. + ApnsID string + + // A string which allows multiple notifications with the same collapse + // identifier to be displayed to the user as a single notification. The + // value should not exceed 64 bytes. + CollapseID string + + // A string containing hexadecimal bytes of the device token for the target + // device. + DeviceToken string + + // The topic of the remote notification, which is typically the bundle ID + // for your app. The certificate you create in the Apple Developer Member + // Center must include the capability for this topic. If your certificate + // includes multiple topics, you must specify a value for this header. If + // you omit this header and your APNs certificate does not specify multiple + // topics, the APNs server uses the certificate’s Subject as the default + // topic. + Topic string + + // An optional time at which the notification is no longer valid and can be + // discarded by APNs. If this value is in the past, APNs treats the + // notification as if it expires immediately and does not store the + // notification or attempt to redeliver it. If this value is left as the + // default (ie, Expiration.IsZero()) an expiration header will not added to + // the http request. + Expiration time.Time + + // The priority of the notification. Specify ether apns.PriorityHigh (10) or + // apns.PriorityLow (5) If you don't set this, the APNs server will set the + // priority to 10. + Priority int + + // A byte array containing the JSON-encoded payload of this push notification. + // Refer to "The Remote Notification Payload" section in the Apple Local and + // Remote Notification Programming Guide for more info. + Payload interface{} + + // The pushtype of the push notification. If this values is left as the + // default an apns-push-type header with value 'alert' will be added to the + // http request. + PushType EPushType +} + +// MarshalJSON converts the notification payload to JSON. +func (n *Notification) MarshalJSON() ([]byte, error) { + switch payload := n.Payload.(type) { + case string: + return []byte(payload), nil + case []byte: + return payload, nil + default: + return json.Marshal(payload) + } +} diff --git a/vendor/github.com/sideshow/apns2/payload/builder.go b/vendor/github.com/sideshow/apns2/payload/builder.go new file mode 100644 index 0000000000..a2ff30da10 --- /dev/null +++ b/vendor/github.com/sideshow/apns2/payload/builder.go @@ -0,0 +1,402 @@ +// Package payload is a helper package which contains a payload +// builder to make constructing notification payloads easier. +package payload + +import "encoding/json" + +// InterruptionLevel defines the value for the payload aps interruption-level +type EInterruptionLevel string + +const ( + // InterruptionLevelPassive is used to indicate that notification be delivered in a passive manner. + InterruptionLevelPassive EInterruptionLevel = "passive" + + // InterruptionLevelActive is used to indicate the importance and delivery timing of a notification. + InterruptionLevelActive EInterruptionLevel = "active" + + // InterruptionLevelTimeSensitive is used to indicate the importance and delivery timing of a notification. + InterruptionLevelTimeSensitive EInterruptionLevel = "time-sensitive" + + // InterruptionLevelCritical is used to indicate the importance and delivery timing of a notification. + // This interruption level requires an approved entitlement from Apple. + // See: https://developer.apple.com/documentation/usernotifications/unnotificationinterruptionlevel/ + InterruptionLevelCritical EInterruptionLevel = "critical" +) + +// Payload represents a notification which holds the content that will be +// marshalled as JSON. +type Payload struct { + content map[string]interface{} +} + +type aps struct { + Alert interface{} `json:"alert,omitempty"` + Badge interface{} `json:"badge,omitempty"` + Category string `json:"category,omitempty"` + ContentAvailable int `json:"content-available,omitempty"` + InterruptionLevel EInterruptionLevel `json:"interruption-level,omitempty"` + MutableContent int `json:"mutable-content,omitempty"` + RelevanceScore interface{} `json:"relevance-score,omitempty"` + Sound interface{} `json:"sound,omitempty"` + ThreadID string `json:"thread-id,omitempty"` + URLArgs []string `json:"url-args,omitempty"` +} + +type alert struct { + Action string `json:"action,omitempty"` + ActionLocKey string `json:"action-loc-key,omitempty"` + Body string `json:"body,omitempty"` + LaunchImage string `json:"launch-image,omitempty"` + LocArgs []string `json:"loc-args,omitempty"` + LocKey string `json:"loc-key,omitempty"` + Title string `json:"title,omitempty"` + Subtitle string `json:"subtitle,omitempty"` + TitleLocArgs []string `json:"title-loc-args,omitempty"` + TitleLocKey string `json:"title-loc-key,omitempty"` + SummaryArg string `json:"summary-arg,omitempty"` + SummaryArgCount int `json:"summary-arg-count,omitempty"` +} + +type sound struct { + Critical int `json:"critical,omitempty"` + Name string `json:"name,omitempty"` + Volume float32 `json:"volume,omitempty"` +} + +// NewPayload returns a new Payload struct +func NewPayload() *Payload { + return &Payload{ + map[string]interface{}{ + "aps": &aps{}, + }, + } +} + +// Alert sets the aps alert on the payload. +// This will display a notification alert message to the user. +// +// {"aps":{"alert":alert}}` +func (p *Payload) Alert(alert interface{}) *Payload { + p.aps().Alert = alert + return p +} + +// Badge sets the aps badge on the payload. +// This will display a numeric badge on the app icon. +// +// {"aps":{"badge":b}} +func (p *Payload) Badge(b int) *Payload { + p.aps().Badge = b + return p +} + +// ZeroBadge sets the aps badge on the payload to 0. +// This will clear the badge on the app icon. +// +// {"aps":{"badge":0}} +func (p *Payload) ZeroBadge() *Payload { + p.aps().Badge = 0 + return p +} + +// UnsetBadge removes the badge attribute from the payload. +// This will leave the badge on the app icon unchanged. +// If you wish to clear the app icon badge, use ZeroBadge() instead. +// +// {"aps":{}} +func (p *Payload) UnsetBadge() *Payload { + p.aps().Badge = nil + return p +} + +// Sound sets the aps sound on the payload. +// This will play a sound from the app bundle, or the default sound otherwise. +// +// {"aps":{"sound":sound}} +func (p *Payload) Sound(sound interface{}) *Payload { + p.aps().Sound = sound + return p +} + +// ContentAvailable sets the aps content-available on the payload to 1. +// This will indicate to the app that there is new content available to download +// and launch the app in the background. +// +// {"aps":{"content-available":1}} +func (p *Payload) ContentAvailable() *Payload { + p.aps().ContentAvailable = 1 + return p +} + +// MutableContent sets the aps mutable-content on the payload to 1. +// This will indicate to the to the system to call your Notification Service +// extension to mutate or replace the notification's content. +// +// {"aps":{"mutable-content":1}} +func (p *Payload) MutableContent() *Payload { + p.aps().MutableContent = 1 + return p +} + +// Custom payload + +// Custom sets a custom key and value on the payload. +// This will add custom key/value data to the notification payload at root level. +// +// {"aps":{}, key:value} +func (p *Payload) Custom(key string, val interface{}) *Payload { + p.content[key] = val + return p +} + +// Alert dictionary + +// AlertTitle sets the aps alert title on the payload. +// This will display a short string describing the purpose of the notification. +// Apple Watch & Safari display this string as part of the notification interface. +// +// {"aps":{"alert":{"title":title}}} +func (p *Payload) AlertTitle(title string) *Payload { + p.aps().alert().Title = title + return p +} + +// AlertTitleLocKey sets the aps alert title localization key on the payload. +// This is the key to a title string in the Localizable.strings file for the +// current localization. See Localized Formatted Strings in Apple documentation +// for more information. +// +// {"aps":{"alert":{"title-loc-key":key}}} +func (p *Payload) AlertTitleLocKey(key string) *Payload { + p.aps().alert().TitleLocKey = key + return p +} + +// AlertTitleLocArgs sets the aps alert title localization args on the payload. +// These are the variable string values to appear in place of the format +// specifiers in title-loc-key. See Localized Formatted Strings in Apple +// documentation for more information. +// +// {"aps":{"alert":{"title-loc-args":args}}} +func (p *Payload) AlertTitleLocArgs(args []string) *Payload { + p.aps().alert().TitleLocArgs = args + return p +} + +// AlertSubtitle sets the aps alert subtitle on the payload. +// This will display a short string describing the purpose of the notification. +// Apple Watch & Safari display this string as part of the notification interface. +// +// {"aps":{"alert":{"subtitle":"subtitle"}}} +func (p *Payload) AlertSubtitle(subtitle string) *Payload { + p.aps().alert().Subtitle = subtitle + return p +} + +// AlertBody sets the aps alert body on the payload. +// This is the text of the alert message. +// +// {"aps":{"alert":{"body":body}}} +func (p *Payload) AlertBody(body string) *Payload { + p.aps().alert().Body = body + return p +} + +// AlertLaunchImage sets the aps launch image on the payload. +// This is the filename of an image file in the app bundle. The image is used +// as the launch image when users tap the action button or move the action +// slider. +// +// {"aps":{"alert":{"launch-image":image}}} +func (p *Payload) AlertLaunchImage(image string) *Payload { + p.aps().alert().LaunchImage = image + return p +} + +// AlertLocArgs sets the aps alert localization args on the payload. +// These are the variable string values to appear in place of the format +// specifiers in loc-key. See Localized Formatted Strings in Apple +// documentation for more information. +// +// {"aps":{"alert":{"loc-args":args}}} +func (p *Payload) AlertLocArgs(args []string) *Payload { + p.aps().alert().LocArgs = args + return p +} + +// AlertLocKey sets the aps alert localization key on the payload. +// This is the key to an alert-message string in the Localizable.strings file +// for the current localization. See Localized Formatted Strings in Apple +// documentation for more information. +// +// {"aps":{"alert":{"loc-key":key}}} +func (p *Payload) AlertLocKey(key string) *Payload { + p.aps().alert().LocKey = key + return p +} + +// AlertAction sets the aps alert action on the payload. +// This is the label of the action button, if the user sets the notifications +// to appear as alerts. This label should be succinct, such as “Details” or +// “Read more”. If omitted, the default value is “Show”. +// +// {"aps":{"alert":{"action":action}}} +func (p *Payload) AlertAction(action string) *Payload { + p.aps().alert().Action = action + return p +} + +// AlertActionLocKey sets the aps alert action localization key on the payload. +// This is the the string used as a key to get a localized string in the current +// localization to use for the notfication right button’s title instead of +// “View”. See Localized Formatted Strings in Apple documentation for more +// information. +// +// {"aps":{"alert":{"action-loc-key":key}}} +func (p *Payload) AlertActionLocKey(key string) *Payload { + p.aps().alert().ActionLocKey = key + return p +} + +// AlertSummaryArg sets the aps alert summary arg key on the payload. +// This is the string that is used as a key to fill in an argument +// at the bottom of a notification to provide more context, such as +// a name associated with the sender of the notification. +// +// {"aps":{"alert":{"summary-arg":key}}} +func (p *Payload) AlertSummaryArg(key string) *Payload { + p.aps().alert().SummaryArg = key + return p +} + +// AlertSummaryArgCount sets the aps alert summary arg count key on the payload. +// This integer sets a custom "weight" on the notification, effectively +// allowing a notification to be viewed internally as two. For example if +// a notification encompasses 3 messages, you can set it to 3. +// +// {"aps":{"alert":{"summary-arg-count":key}}} +func (p *Payload) AlertSummaryArgCount(key int) *Payload { + p.aps().alert().SummaryArgCount = key + return p +} + +// General + +// Category sets the aps category on the payload. +// This is a string value that represents the identifier property of the +// UIMutableUserNotificationCategory object you created to define custom actions. +// +// {"aps":{"category":category}} +func (p *Payload) Category(category string) *Payload { + p.aps().Category = category + return p +} + +// Mdm sets the mdm on the payload. +// This is for Apple Mobile Device Management (mdm) payloads. +// +// {"aps":{}:"mdm":mdm} +func (p *Payload) Mdm(mdm string) *Payload { + p.content["mdm"] = mdm + return p +} + +// ThreadID sets the aps thread id on the payload. +// This is for the purpose of updating the contents of a View Controller in a +// Notification Content app extension when a new notification arrives. If a +// new notification arrives whose thread-id value matches the thread-id of the +// notification already being displayed, the didReceiveNotification method +// is called. +// +// {"aps":{"thread-id":id}} +func (p *Payload) ThreadID(threadID string) *Payload { + p.aps().ThreadID = threadID + return p +} + +// URLArgs sets the aps category on the payload. +// This specifies an array of values that are paired with the placeholders +// inside the urlFormatString value of your website.json file. +// See Apple Notification Programming Guide for Websites. +// +// {"aps":{"url-args":urlArgs}} +func (p *Payload) URLArgs(urlArgs []string) *Payload { + p.aps().URLArgs = urlArgs + return p +} + +// SoundName sets the name value on the aps sound dictionary. +// This function makes the notification a critical alert, which should be pre-approved by Apple. +// See: https://developer.apple.com/contact/request/notifications-critical-alerts-entitlement/ +// +// {"aps":{"sound":{"critical":1,"name":name,"volume":1.0}}} +func (p *Payload) SoundName(name string) *Payload { + p.aps().sound().Name = name + return p +} + +// SoundVolume sets the volume value on the aps sound dictionary. +// This function makes the notification a critical alert, which should be pre-approved by Apple. +// See: https://developer.apple.com/contact/request/notifications-critical-alerts-entitlement/ +// +// {"aps":{"sound":{"critical":1,"name":"default","volume":volume}}} +func (p *Payload) SoundVolume(volume float32) *Payload { + p.aps().sound().Volume = volume + return p +} + +// InterruptionLevel defines the value for the payload aps interruption-level +// This is to indicate the importance and delivery timing of a notification. +// (Using InterruptionLevelCritical requires an approved entitlement from Apple.) +// See: https://developer.apple.com/documentation/usernotifications/unnotificationinterruptionlevel/ +// +// {"aps":{"interruption-level":passive}} +func (p *Payload) InterruptionLevel(interruptionLevel EInterruptionLevel) *Payload { + p.aps().InterruptionLevel = interruptionLevel + return p +} + +// The relevance score, a number between 0 and 1, +// that the system uses to sort the notifications from your app. +// The highest score gets featured in the notification summary. +// See https://developer.apple.com/documentation/usernotifications/unnotificationcontent/3821031-relevancescore. +// +// {"aps":{"relevance-score":0.1}} +func (p *Payload) RelevanceScore(b float32) *Payload { + p.aps().RelevanceScore = b + return p +} + +// Unsets the relevance score +// that the system uses to sort the notifications from your app. +// The highest score gets featured in the notification summary. +// See https://developer.apple.com/documentation/usernotifications/unnotificationcontent/3821031-relevancescore. +// +// {"aps":{"relevance-score":0.1}} +func (p *Payload) UnsetRelevanceScore() *Payload { + p.aps().RelevanceScore = nil + return p +} + +// MarshalJSON returns the JSON encoded version of the Payload +func (p *Payload) MarshalJSON() ([]byte, error) { + return json.Marshal(p.content) +} + +func (p *Payload) aps() *aps { + return p.content["aps"].(*aps) +} + +func (a *aps) alert() *alert { + if _, ok := a.Alert.(*alert); !ok { + a.Alert = &alert{} + } + return a.Alert.(*alert) +} + +func (a *aps) sound() *sound { + if _, ok := a.Sound.(*sound); !ok { + a.Sound = &sound{Critical: 1, Name: "default", Volume: 1.0} + } + return a.Sound.(*sound) +} diff --git a/vendor/github.com/sideshow/apns2/response.go b/vendor/github.com/sideshow/apns2/response.go new file mode 100644 index 0000000000..99d6345634 --- /dev/null +++ b/vendor/github.com/sideshow/apns2/response.go @@ -0,0 +1,156 @@ +package apns2 + +import ( + "net/http" + "strconv" + "time" +) + +// StatusSent is a 200 response. +const StatusSent = http.StatusOK + +// The possible Reason error codes returned from APNs. From table 4 in the +// Handling Notification Responses from APNs article +const ( + // 400 The collapse identifier exceeds the maximum allowed size + ReasonBadCollapseID = "BadCollapseId" + + // 400 The specified device token was bad. Verify that the request contains a + // valid token and that the token matches the environment. + ReasonBadDeviceToken = "BadDeviceToken" + + // 400 The apns-expiration value is bad. + ReasonBadExpirationDate = "BadExpirationDate" + + // 400 The apns-id value is bad. + ReasonBadMessageID = "BadMessageId" + + // 400 The apns-priority value is bad. + ReasonBadPriority = "BadPriority" + + // 400 The apns-topic was invalid. + ReasonBadTopic = "BadTopic" + + // 400 The device token does not match the specified topic. + ReasonDeviceTokenNotForTopic = "DeviceTokenNotForTopic" + + // 400 One or more headers were repeated. + ReasonDuplicateHeaders = "DuplicateHeaders" + + // 400 Idle time out. + ReasonIdleTimeout = "IdleTimeout" + + // 400 The apns-push-type value is invalid. + ReasonInvalidPushType = "InvalidPushType" + + // 400 The device token is not specified in the request :path. Verify that the + // :path header contains the device token. + ReasonMissingDeviceToken = "MissingDeviceToken" + + // 400 The apns-topic header of the request was not specified and was + // required. The apns-topic header is mandatory when the client is connected + // using a certificate that supports multiple topics. + ReasonMissingTopic = "MissingTopic" + + // 400 The message payload was empty. + ReasonPayloadEmpty = "PayloadEmpty" + + // 400 Pushing to this topic is not allowed. + ReasonTopicDisallowed = "TopicDisallowed" + + // 403 The certificate was bad. + ReasonBadCertificate = "BadCertificate" + + // 403 The client certificate was for the wrong environment. + ReasonBadCertificateEnvironment = "BadCertificateEnvironment" + + // 403 The provider token is stale and a new token should be generated. + ReasonExpiredProviderToken = "ExpiredProviderToken" + + // 403 The specified action is not allowed. + ReasonForbidden = "Forbidden" + + // 403 The provider token is not valid or the token signature could not be + // verified. + ReasonInvalidProviderToken = "InvalidProviderToken" + + // 403 No provider certificate was used to connect to APNs and Authorization + // header was missing or no provider token was specified. + ReasonMissingProviderToken = "MissingProviderToken" + + // 404 The request contained a bad :path value. + ReasonBadPath = "BadPath" + + // 405 The specified :method was not POST. + ReasonMethodNotAllowed = "MethodNotAllowed" + + // 410 The device token is inactive for the specified topic. + ReasonUnregistered = "Unregistered" + + // 413 The message payload was too large. See Creating the Remote Notification + // Payload in the Apple Local and Remote Notification Programming Guide for + // details on maximum payload size. + ReasonPayloadTooLarge = "PayloadTooLarge" + + // 429 The provider token is being updated too often. + ReasonTooManyProviderTokenUpdates = "TooManyProviderTokenUpdates" + + // 429 Too many requests were made consecutively to the same device token. + ReasonTooManyRequests = "TooManyRequests" + + // 500 An internal server error occurred. + ReasonInternalServerError = "InternalServerError" + + // 503 The service is unavailable. + ReasonServiceUnavailable = "ServiceUnavailable" + + // 503 The server is shutting down. + ReasonShutdown = "Shutdown" +) + +// Response represents a result from the APNs gateway indicating whether a +// notification was accepted or rejected and (if applicable) the metadata +// surrounding the rejection. +type Response struct { + + // The HTTP status code returned by APNs. + // A 200 value indicates that the notification was successfully sent. + // For a list of other possible status codes, see table 6-4 in the Apple Local + // and Remote Notification Programming Guide. + StatusCode int + + // The APNs error string indicating the reason for the notification failure (if + // any). The error code is specified as a string. For a list of possible + // values, see the Reason constants above. + // If the notification was accepted, this value will be "". + Reason string + + // The APNs ApnsID value from the Notification. If you didn't set an ApnsID on the + // Notification, this will be a new unique UUID which has been created by APNs. + ApnsID string + + // If the value of StatusCode is 410, this is the last time at which APNs + // confirmed that the device token was no longer valid for the topic. + Timestamp Time +} + +// Sent returns whether or not the notification was successfully sent. +// This is the same as checking if the StatusCode == 200. +func (c *Response) Sent() bool { + return c.StatusCode == StatusSent +} + +// Time represents a device uninstall time +type Time struct { + time.Time +} + +// UnmarshalJSON converts an epoch date into a Time struct. +func (t *Time) UnmarshalJSON(b []byte) error { + ts, err := strconv.ParseInt(string(b), 10, 64) + if err != nil { + return err + } + t.Time = time.Unix(ts/1000, 0) + return nil +} diff --git a/vendor/github.com/sideshow/apns2/token/token.go b/vendor/github.com/sideshow/apns2/token/token.go new file mode 100644 index 0000000000..26fec563dd --- /dev/null +++ b/vendor/github.com/sideshow/apns2/token/token.go @@ -0,0 +1,107 @@ +package token + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" + "io/ioutil" + "sync" + "time" + + "github.com/golang-jwt/jwt/v4" +) + +const ( + // TokenTimeout is the period of time in seconds that a token is valid for. + // If the timestamp for token issue is not within the last hour, APNs + // rejects subsequent push messages. This is set to under an hour so that + // we generate a new token before the existing one expires. + TokenTimeout = 3000 +) + +// Possible errors when parsing a .p8 file. +var ( + ErrAuthKeyNotPem = errors.New("token: AuthKey must be a valid .p8 PEM file") + ErrAuthKeyNotECDSA = errors.New("token: AuthKey must be of type ecdsa.PrivateKey") + ErrAuthKeyNil = errors.New("token: AuthKey was nil") +) + +// Token represents an Apple Provider Authentication Token (JSON Web Token). +type Token struct { + sync.Mutex + AuthKey *ecdsa.PrivateKey + KeyID string + TeamID string + IssuedAt int64 + Bearer string +} + +// AuthKeyFromFile loads a .p8 certificate from a local file and returns a +// *ecdsa.PrivateKey. +func AuthKeyFromFile(filename string) (*ecdsa.PrivateKey, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return AuthKeyFromBytes(bytes) +} + +// AuthKeyFromBytes loads a .p8 certificate from an in memory byte array and +// returns an *ecdsa.PrivateKey. +func AuthKeyFromBytes(bytes []byte) (*ecdsa.PrivateKey, error) { + block, _ := pem.Decode(bytes) + if block == nil { + return nil, ErrAuthKeyNotPem + } + key, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + if pk, ok := key.(*ecdsa.PrivateKey); ok { + return pk, nil + } + return nil, ErrAuthKeyNotECDSA +} + +// GenerateIfExpired checks to see if the token is about to expire and +// generates a new token. +func (t *Token) GenerateIfExpired() (bearer string) { + t.Lock() + defer t.Unlock() + if t.Expired() { + t.Generate() + } + return t.Bearer +} + +// Expired checks to see if the token has expired. +func (t *Token) Expired() bool { + return time.Now().Unix() >= (t.IssuedAt + TokenTimeout) +} + +// Generate creates a new token. +func (t *Token) Generate() (bool, error) { + if t.AuthKey == nil { + return false, ErrAuthKeyNil + } + issuedAt := time.Now().Unix() + jwtToken := &jwt.Token{ + Header: map[string]interface{}{ + "alg": "ES256", + "kid": t.KeyID, + }, + Claims: jwt.MapClaims{ + "iss": t.TeamID, + "iat": issuedAt, + }, + Method: jwt.SigningMethodES256, + } + bearer, err := jwtToken.SignedString(t.AuthKey) + if err != nil { + return false, err + } + t.IssuedAt = issuedAt + t.Bearer = bearer + return true, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 4f15786ed7..8d0052ea40 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -360,6 +360,11 @@ github.com/rinchsan/device-check-go # github.com/russross/blackfriday/v2 v2.1.0 ## explicit github.com/russross/blackfriday/v2 +# github.com/sideshow/apns2 v0.23.0 +## explicit; go 1.15 +github.com/sideshow/apns2 +github.com/sideshow/apns2/payload +github.com/sideshow/apns2/token # github.com/tidepool-org/clinic/client v0.0.0-20240926112325-657da308fce2 ## explicit; go 1.22 github.com/tidepool-org/clinic/client From ac0a3a930edceef60d1380b0f03d9e4572471673 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Wed, 26 Jun 2024 09:48:04 -0600 Subject: [PATCH 08/54] adapts sarama.Logger to implement log.Logger So that sarama log messages better follow our standards, and will be emitted as JSON when log.Logger is configured for that. Before this change, the sarama logs were printed in plain-text without any of the benefits of the platform log.Logger. BACK-2554 --- data/service/service/standard.go | 21 ++++++++++++------- log/sarama.go | 35 ++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 7 deletions(-) create mode 100644 log/sarama.go diff --git a/data/service/service/standard.go b/data/service/service/standard.go index 80911f4b20..20456e91e0 100644 --- a/data/service/service/standard.go +++ b/data/service/service/standard.go @@ -2,15 +2,12 @@ package service import ( "context" - "log" - "os" - - "github.com/tidepool-org/platform/clinics" "github.com/IBM/sarama" eventsCommon "github.com/tidepool-org/go-common/events" "github.com/tidepool-org/platform/application" + "github.com/tidepool-org/platform/clinics" dataDeduplicatorDeduplicator "github.com/tidepool-org/platform/data/deduplicator/deduplicator" dataDeduplicatorFactory "github.com/tidepool-org/platform/data/deduplicator/factory" dataEvents "github.com/tidepool-org/platform/data/events" @@ -22,7 +19,7 @@ import ( dataStoreMongo "github.com/tidepool-org/platform/data/store/mongo" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/events" - logInternal "github.com/tidepool-org/platform/log" + "github.com/tidepool-org/platform/log" metricClient "github.com/tidepool-org/platform/metric/client" "github.com/tidepool-org/platform/permission" permissionClient "github.com/tidepool-org/platform/permission/client" @@ -87,6 +84,9 @@ func (s *Standard) Initialize(provider application.Provider) error { if err := s.initializeDataSourceClient(); err != nil { return err } + if err := s.initializeSaramaLogger(); err != nil { + return err + } if err := s.initializeUserEventsHandler(); err != nil { return err } @@ -406,9 +406,8 @@ func (s *Standard) initializeServer() error { func (s *Standard) initializeUserEventsHandler() error { s.Logger().Debug("Initializing user events handler") - sarama.Logger = log.New(os.Stdout, "SARAMA ", log.LstdFlags|log.Lshortfile) - ctx := logInternal.NewContextWithLogger(context.Background(), s.Logger()) + ctx := log.NewContextWithLogger(context.Background(), s.Logger()) handler := dataEvents.NewUserDataDeletionHandler(ctx, s.dataStore, s.dataSourceStructuredStore) handlers := []eventsCommon.EventHandler{handler} runner := events.NewRunner(handlers) @@ -419,3 +418,11 @@ func (s *Standard) initializeUserEventsHandler() error { return nil } + +func (s *Standard) initializeSaramaLogger() error { + // Multiple properties of Standard use the sarama package. This is + // intended to be the one place that the sarama Logger is initialized, + // before any of the properties that need it are run. + sarama.Logger = log.NewSarama(s.Logger()) + return nil +} diff --git a/log/sarama.go b/log/sarama.go new file mode 100644 index 0000000000..d09576c5a6 --- /dev/null +++ b/log/sarama.go @@ -0,0 +1,35 @@ +package log + +import ( + "fmt" + "strings" + + "github.com/IBM/sarama" +) + +// NewSarama returns a [Logger] adapted to implement [sarama.StdLogger]. +func NewSarama(l Logger) sarama.StdLogger { + return &SaramaLogger{Logger: l.WithField("SARAMA", "1")} +} + +// SaramaLogger wraps a [Logger] to implement [sarama.StdLogger]. +// +// Sarama doesn't support the concept of logging levels, so all messages will +// use the info level. +type SaramaLogger struct { + Logger +} + +func (l *SaramaLogger) Print(args ...interface{}) { + l.Logger.Info(fmt.Sprint(args...)) +} + +func (l *SaramaLogger) Printf(format string, args ...interface{}) { + // Sarama log messages sent via this method include a newline, which + // doesn't fit with Logger's style, so remove it. + l.Logger.Infof(strings.TrimSuffix(format, "\n"), args...) +} + +func (l *SaramaLogger) Println(args ...interface{}) { + l.Logger.Info(fmt.Sprint(args...)) +} From 3207677938ee506f025a7b7080cfecec9d49e3f6 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Wed, 8 May 2024 14:34:37 -0600 Subject: [PATCH 09/54] adapts go-common's asyncevents.SaramaEventsConsumer for alerts The existing FaultTolerantConsumer isn't used because it's retry semantics are hard-wired and aren't compatible with what care partner alerting's needs. Note: A proper implementation of AlertsEventsConsumer to consume events is yet to be written. It will follow shortly. BACK-2554 --- data/events/events.go | 139 +++++++++++ data/events/events_suite_test.go | 34 +++ data/events/events_test.go | 163 +++++++++++++ go.mod | 2 +- go.sum | 4 +- log/gocommon_adapter.go | 54 +++++ .../go-common/asyncevents/sarama.go | 224 ++++++++++++++++++ .../tidepool-org/go-common/events/config.go | 1 + vendor/modules.txt | 3 +- 9 files changed, 620 insertions(+), 4 deletions(-) create mode 100644 data/events/events_suite_test.go create mode 100644 data/events/events_test.go create mode 100644 log/gocommon_adapter.go create mode 100644 vendor/github.com/tidepool-org/go-common/asyncevents/sarama.go diff --git a/data/events/events.go b/data/events/events.go index 3e41a0630d..10f9a664f9 100644 --- a/data/events/events.go +++ b/data/events/events.go @@ -2,7 +2,14 @@ package events import ( "context" + "fmt" + "log/slog" + "sync" + "time" + "github.com/IBM/sarama" + + "github.com/tidepool-org/go-common/asyncevents" ev "github.com/tidepool-org/go-common/events" dataSourceStoreStructured "github.com/tidepool-org/platform/data/source/store/structured" @@ -58,3 +65,135 @@ func (u *userDeletionEventsHandler) HandleDeleteUserEvent(payload ev.DeleteUserE } return nil } + +// AlertsEventRetryDelayMaximum is the maximum delay between consumption +// retries. +const AlertsEventRetryDelayMaximum = time.Minute + +// AlertsEventRetries is the maximum consumption attempts before giving up. +const AlertsEventRetries = 1000 + +// AlertsEventConsumptionTimeout is the maximum time to process an alerts event. +const AlertsEventConsumptionTimeout = 30 * time.Second + +// SaramaRunner interfaces between events.Runner and go-common's +// asyncevents.SaramaEventsConsumer. +type SaramaRunner struct { + EventsRunner SaramaEventsRunner + Config SaramaRunnerConfig + cancelCtx context.CancelFunc + cancelMu sync.Mutex +} + +// SaramaEventsRunner is implemented by go-common's +// asyncevents.SaramaEventsRunner. +type SaramaEventsRunner interface { + Run(ctx context.Context) error +} + +// SaramaRunnerConfig collects values needed to initialize a SaramaRunner. +// +// This provides isolation for the SaramaRunner from ConfigReporter, +// envconfig, or any of the other options in platform for reading config +// values. +type SaramaRunnerConfig struct { + Brokers []string + GroupID string + Logger log.Logger + Topics []string + MessageConsumer asyncevents.SaramaMessageConsumer + + Sarama *sarama.Config +} + +func (r *SaramaRunner) Initialize() error { + group, err := sarama.NewConsumerGroup(r.Config.Brokers, r.Config.GroupID, r.Config.Sarama) + if err != nil { + return errors.Wrap(err, "Unable to build sarama consumer group") + } + handler := asyncevents.NewSaramaConsumerGroupHandler(&asyncevents.NTimesRetryingConsumer{ + Consumer: r.Config.MessageConsumer, + Delay: CappedExponentialBinaryDelay(AlertsEventRetryDelayMaximum), + Times: AlertsEventRetries, + Logger: r.logger, + }, AlertsEventConsumptionTimeout) + r.EventsRunner = asyncevents.NewSaramaEventsConsumer(group, handler, r.Config.Topics...) + return nil +} + +func (r *SaramaRunner) logger(ctx context.Context) asyncevents.Logger { + // Prefer a logger from the context. + if ctxLogger := log.LoggerFromContext(ctx); ctxLogger != nil { + return &log.GoCommonAdapter{Logger: ctxLogger} + } + if r.Config.Logger != nil { + return &log.GoCommonAdapter{Logger: r.Config.Logger} + } + // No known log.Logger could be found, default to slog. + return slog.Default() +} + +// Run adapts platform's event.Runner to work with go-common's +// asyncevents.SaramaEventsConsumer. +func (r *SaramaRunner) Run() error { + if r.EventsRunner == nil { + return errors.New("Unable to run SaramaRunner, EventsRunner is nil") + } + + r.cancelMu.Lock() + ctx, err := func() (context.Context, error) { + defer r.cancelMu.Unlock() + if r.cancelCtx != nil { + return nil, errors.New("Unable to Run SaramaRunner, it's already initialized") + } + var ctx context.Context + ctx, r.cancelCtx = context.WithCancel(context.Background()) + return ctx, nil + }() + if err != nil { + return err + } + if err := r.EventsRunner.Run(ctx); err != nil { + return errors.Wrap(err, "Unable to Run SaramaRunner") + } + return nil +} + +// Terminate adapts platform's event.Runner to work with go-common's +// asyncevents.SaramaEventsConsumer. +func (r *SaramaRunner) Terminate() error { + r.cancelMu.Lock() + defer r.cancelMu.Unlock() + if r.cancelCtx == nil { + return errors.New("Unable to Terminate SaramaRunner, it's not running") + } + r.cancelCtx() + return nil +} + +// CappedExponentialBinaryDelay builds delay functions that use exponential +// binary backoff with a maximum duration. +func CappedExponentialBinaryDelay(cap time.Duration) func(int) time.Duration { + return func(tries int) time.Duration { + b := asyncevents.DelayExponentialBinary(tries) + if b > cap { + return cap + } + return b + } +} + +// TODO: implement me!! +type AlertsEventsConsumer struct { + Consumer asyncevents.SaramaMessageConsumer +} + +func (c *AlertsEventsConsumer) Consume(ctx context.Context, + session sarama.ConsumerGroupSession, message *sarama.ConsumerMessage) error { + err := c.Consumer.Consume(ctx, session, message) + if err != nil { + session.MarkMessage(message, fmt.Sprintf("I have given up after error: %s", err)) + return err + } + return nil +} diff --git a/data/events/events_suite_test.go b/data/events/events_suite_test.go new file mode 100644 index 0000000000..4bab08b129 --- /dev/null +++ b/data/events/events_suite_test.go @@ -0,0 +1,34 @@ +package events + +import ( + "log/slog" + "os" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/tidepool-org/platform/test" +) + +func TestSuite(t *testing.T) { + test.Test(t) +} + +var _ = BeforeSuite(func() { + slog.SetDefault(devNullSlogLogger(GinkgoT())) +}) + +// Cleaner is part of testing.T and FullGinkgoTInterface +type Cleaner interface { + Cleanup(func()) +} + +func devNullSlogLogger(c Cleaner) *slog.Logger { + f, err := os.Open(os.DevNull) + Expect(err).To(Succeed()) + c.Cleanup(func() { + Expect(f.Close()).To(Succeed()) + }) + return slog.New(slog.NewTextHandler(f, nil)) +} diff --git a/data/events/events_test.go b/data/events/events_test.go new file mode 100644 index 0000000000..492a059376 --- /dev/null +++ b/data/events/events_test.go @@ -0,0 +1,163 @@ +package events + +import ( + "context" + "log/slog" + "sync" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/tidepool-org/platform/log" + "github.com/tidepool-org/platform/log/test" +) + +var _ = Describe("SaramaRunner", func() { + Context("has a lifecycle", func() { + newTestRunner := func() *SaramaRunner { + return &SaramaRunner{ + Config: SaramaRunnerConfig{}, + EventsRunner: &mockEventsRunner{}, + } + } + It("starts with Run() and stops with Terminate()", func() { + r := newTestRunner() + var runErr error + var errMu sync.Mutex + launched := make(chan struct{}, 1) + go func() { + errMu.Lock() + func() { + defer errMu.Unlock() + runErr = r.Run() + launched <- struct{}{} + }() + }() + <-launched + time.Sleep(time.Millisecond) + errMu.Lock() + defer errMu.Unlock() + + Expect(r.Terminate()).To(Succeed()) + Eventually(runErr).WithTimeout(10 * time.Millisecond).Should(Succeed()) + }) + + Describe("Run()", func() { + var errMu sync.Mutex + + It("can be started only once", func() { + r := newTestRunner() + var firstRunErr, secondRunErr error + launched := make(chan struct{}, 2) + go func() { + errMu.Lock() + func() { + defer errMu.Unlock() + firstRunErr = r.Run() + launched <- struct{}{} + }() + }() + go func() { + errMu.Lock() + func() { + defer errMu.Unlock() + secondRunErr = r.Run() + launched <- struct{}{} + }() + + }() + <-launched + <-launched + errMu.Lock() + defer errMu.Unlock() + + // The above doesn't _guarantee_ that Run has been called twice, + // but... it should work. + + Expect(r.Terminate()).To(Succeed()) + if firstRunErr != nil { + Expect(firstRunErr).To(MatchError(ContainSubstring("it's already initialized"))) + Expect(secondRunErr).To(Succeed()) + } else { + Expect(firstRunErr).To(Succeed()) + Expect(secondRunErr).To(MatchError(ContainSubstring("it's already initialized"))) + } + }) + + It("can't be Terminate()'d before it's Run()", func() { + r := newTestRunner() + Expect(r.Terminate()).To(MatchError(ContainSubstring("it's not running"))) + }) + }) + }) + + Describe("logger", func() { + It("prefers a context's logger", func() { + testLogger := test.NewLogger() + ctxLogger := test.NewLogger() + r := &SaramaRunner{ + Config: SaramaRunnerConfig{Logger: testLogger}, + } + + ctx := log.NewContextWithLogger(context.Background(), ctxLogger) + got := r.logger(ctx) + + goCommonLogger, ok := got.(*log.GoCommonAdapter) + Expect(ok).To(BeTrue()) + Expect(goCommonLogger.Logger).To(Equal(ctxLogger)) + }) + + Context("without a context logger", func() { + It("uses the configured logger", func() { + testLogger := test.NewLogger() + r := &SaramaRunner{ + Config: SaramaRunnerConfig{ + Logger: testLogger, + }, + } + + got := r.logger(context.Background()) + + goCommonLogger, ok := got.(*log.GoCommonAdapter) + Expect(ok).To(BeTrue()) + Expect(goCommonLogger.Logger).To(Equal(testLogger)) + }) + + Context("or any configured logger", func() { + It("doesn't panic", func() { + r := &SaramaRunner{Config: SaramaRunnerConfig{}} + ctx := context.Background() + got := r.logger(ctx) + + Expect(func() { + got.Log(ctx, slog.LevelInfo, "testing") + }).ToNot(Panic()) + }) + }) + }) + }) + + DescribeTable("CappedExponentialBinaryDelay", + func(cap time.Duration, input int, output time.Duration) { + f := CappedExponentialBinaryDelay(cap) + Expect(f(input)).To(Equal(output)) + }, + Entry("cap: 1m; tries: 0", time.Minute, 0, time.Second), + Entry("cap: 1m; tries: 1", time.Minute, 1, 2*time.Second), + Entry("cap: 1m; tries: 2", time.Minute, 2, 4*time.Second), + Entry("cap: 1m; tries: 3", time.Minute, 3, 8*time.Second), + Entry("cap: 1m; tries: 4", time.Minute, 4, 16*time.Second), + Entry("cap: 1m; tries: 5", time.Minute, 5, 32*time.Second), + Entry("cap: 1m; tries: 6", time.Minute, 6, time.Minute), + Entry("cap: 1m; tries: 20", time.Minute, 20, time.Minute), + ) +}) + +type mockEventsRunner struct { + Err error +} + +func (r *mockEventsRunner) Run(ctx context.Context) error { + return r.Err +} diff --git a/go.mod b/go.mod index d73cc096dd..759f978210 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( github.com/sideshow/apns2 v0.23.0 github.com/tidepool-org/clinic/client v0.0.0-20240926112325-657da308fce2 github.com/tidepool-org/devices/api v0.0.0-20240806072455-2b18f22c9cf5 - github.com/tidepool-org/go-common v0.12.2-0.20250129210214-bd36b59b9733 + github.com/tidepool-org/go-common v0.12.2 github.com/tidepool-org/hydrophone/client v0.0.0-20240613043503-6c32828b1ace github.com/urfave/cli v1.22.15 go.mongodb.org/mongo-driver v1.16.0 diff --git a/go.sum b/go.sum index f984743dae..44733686bc 100644 --- a/go.sum +++ b/go.sum @@ -185,8 +185,8 @@ github.com/tidepool-org/clinic/client v0.0.0-20240926112325-657da308fce2 h1:fTIg github.com/tidepool-org/clinic/client v0.0.0-20240926112325-657da308fce2/go.mod h1:7BpAdFdGJNB3aw/xvCz5XnWjSWRoUtWIX4xcMc4Bsko= github.com/tidepool-org/devices/api v0.0.0-20240806072455-2b18f22c9cf5 h1:1kiZtHhs++yXayRD/Mh/3POLwtmxV99YR2bSCle1Q74= github.com/tidepool-org/devices/api v0.0.0-20240806072455-2b18f22c9cf5/go.mod h1:xuQ8k0mLR1ZyEmwe/m0v2BuXctqQuCZeR43urSQpTUM= -github.com/tidepool-org/go-common v0.12.2-0.20250129210214-bd36b59b9733 h1:WCOSrazmNv7KdjIJafWyHkLkHNp2SsoLAm6OXp2rAco= -github.com/tidepool-org/go-common v0.12.2-0.20250129210214-bd36b59b9733/go.mod h1:BeqsQcDwfSsmnmc+/N/EOT8h3m8/YtqrLNykk5kGkv4= +github.com/tidepool-org/go-common v0.12.2 h1:3mse3wJtq5irbgdCz3LeEfs8XE9oDX9kzDcHuWNW/jw= +github.com/tidepool-org/go-common v0.12.2/go.mod h1:BeqsQcDwfSsmnmc+/N/EOT8h3m8/YtqrLNykk5kGkv4= github.com/tidepool-org/hydrophone/client v0.0.0-20240613043503-6c32828b1ace h1:L0UiCj2eL/NOpLa19Tf5IgoK6feILmdA+zK3nCTIhqU= github.com/tidepool-org/hydrophone/client v0.0.0-20240613043503-6c32828b1ace/go.mod h1:gon+x+jAh8DZZ2hD23fBWqrYwOizVSwIBbxEsuXCbZ4= github.com/ugorji/go v1.2.4/go.mod h1:EuaSCk8iZMdIspsu6HXH7X2UGKw1ezO4wCfGszGmmo4= diff --git a/log/gocommon_adapter.go b/log/gocommon_adapter.go new file mode 100644 index 0000000000..14a35cbfaa --- /dev/null +++ b/log/gocommon_adapter.go @@ -0,0 +1,54 @@ +package log + +import ( + "context" + "fmt" + "log/slog" +) + +// GoCommonAdapter implements gocommon's asyncevents.Logger interface. +// +// It adapts a Logger for the purpose. +type GoCommonAdapter struct { + Logger Logger +} + +func (a *GoCommonAdapter) Log(ctx context.Context, level slog.Level, msg string, args ...any) { + logger := a.Logger + if fields := a.fieldsFromArgs(args); len(fields) > 0 { + logger = logger.WithFields(fields) + } + logger.Log(SlogLevelToLevel[level], msg) +} + +// fieldsFromArgs builds a Fields following the same rules as slog.Log. +// +// As Fields is a map instead of a slice, !BADKEY becomes !BADKEY[x] where +// x is the index counter of the value. See the godoc for slog.Log for +// details. +func (a *GoCommonAdapter) fieldsFromArgs(args []any) Fields { + fields := Fields{} + for i := 0; i < len(args); i++ { + switch v := args[i].(type) { + case slog.Attr: + fields[v.Key] = v.Value + case string: + if i+1 < len(args) { + fields[v] = args[i+1] + i++ + } else { + fields[fmt.Sprintf("!BADKEY[%d]", i)] = v + } + default: + fields[fmt.Sprintf("!BADKEY[%d]", i)] = v + } + } + return fields +} + +var SlogLevelToLevel = map[slog.Level]Level{ + slog.LevelDebug: DebugLevel, + slog.LevelInfo: InfoLevel, + slog.LevelWarn: WarnLevel, + slog.LevelError: ErrorLevel, +} diff --git a/vendor/github.com/tidepool-org/go-common/asyncevents/sarama.go b/vendor/github.com/tidepool-org/go-common/asyncevents/sarama.go new file mode 100644 index 0000000000..83ad151e31 --- /dev/null +++ b/vendor/github.com/tidepool-org/go-common/asyncevents/sarama.go @@ -0,0 +1,224 @@ +package asyncevents + +import ( + "context" + "errors" + "fmt" + "log/slog" + "math" + "time" + + "github.com/IBM/sarama" +) + +// SaramaEventsConsumer consumes Kafka messages for asynchronous event +// handling. +type SaramaEventsConsumer struct { + Handler sarama.ConsumerGroupHandler + ConsumerGroup sarama.ConsumerGroup + Topics []string +} + +func NewSaramaEventsConsumer(consumerGroup sarama.ConsumerGroup, + handler sarama.ConsumerGroupHandler, topics ...string) *SaramaEventsConsumer { + + return &SaramaEventsConsumer{ + ConsumerGroup: consumerGroup, + Handler: handler, + Topics: topics, + } +} + +// Run the consumer, to begin consuming Kafka messages. +// +// Run is stopped by its context being canceled. When its context is canceled, +// it returns nil. +func (p *SaramaEventsConsumer) Run(ctx context.Context) (err error) { + for { + err := p.ConsumerGroup.Consume(ctx, p.Topics, p.Handler) + if err != nil { + return err + } + if ctxErr := ctx.Err(); ctxErr != nil { + return nil + } + } +} + +// SaramaConsumerGroupHandler implements sarama.ConsumerGroupHandler. +type SaramaConsumerGroupHandler struct { + Consumer SaramaMessageConsumer + ConsumerTimeout time.Duration + Logger Logger +} + +// NewSaramaConsumerGroupHandler builds a consumer group handler. +// +// A timeout of 0 will use DefaultMessageConsumptionTimeout. +func NewSaramaConsumerGroupHandler(logger Logger, consumer SaramaMessageConsumer, + timeout time.Duration) *SaramaConsumerGroupHandler { + + if timeout == 0 { + timeout = DefaultMessageConsumptionTimeout + } + if logger == nil { + logger = slog.Default() + } + return &SaramaConsumerGroupHandler{ + Consumer: consumer, + ConsumerTimeout: timeout, + Logger: logger, + } +} + +const ( + // DefaultMessageConsumptionTimeout is the default time to allow + // SaramaMessageConsumer.Consume to work before canceling. + DefaultMessageConsumptionTimeout = 30 * time.Second +) + +// Setup implements sarama.ConsumerGroupHandler. +func (h *SaramaConsumerGroupHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil } + +// Cleanup implements sarama.ConsumerGroupHandler. +func (h *SaramaConsumerGroupHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil } + +// ConsumeClaim implements sarama.ConsumerGroupHandler. +func (h *SaramaConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, + claim sarama.ConsumerGroupClaim) error { + + done := session.Context().Done() + for { + select { + case <-done: + return nil + case message, more := <-claim.Messages(): + if !more { + return nil + } + err := func() error { + ctx, cancel := context.WithTimeout(session.Context(), h.ConsumerTimeout) + defer cancel() + return h.Consumer.Consume(ctx, session, message) + }() + switch { + case errors.Is(err, context.DeadlineExceeded): + h.Logger.Log(session.Context(), slog.LevelDebug, err.Error()) + case !errors.Is(err, nil): + return err + } + } + } +} + +// Close implements sarama.ConsumerGroupHandler. +func (h *SaramaConsumerGroupHandler) Close() error { return nil } + +// SaramaMessageConsumer processes Kafka messages. +type SaramaMessageConsumer interface { + // Consume should process a message. + // + // Consume is responsible for marking the message consumed, unless the + // context is canceled, in which case the caller should retry, or mark the + // message as appropriate. + Consume(ctx context.Context, session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) error +} + +var ErrRetriesLimitExceeded = errors.New("retry limit exceeded") + +// NTimesRetryingConsumer enhances a SaramaMessageConsumer with a finite +// number of immediate retries. +// +// The delay between each retry can be controlled via the Delay property. If +// no Delay property is specified, a delay based on the Fibonacci sequence is +// used. +// +// Logger is intentionally minimal. The slog.Log function is used by default. +type NTimesRetryingConsumer struct { + Times int + Consumer SaramaMessageConsumer + Delay func(tries int) time.Duration + Logger Logger +} + +// Logger is an intentionally minimal interface for basic logging. +// +// It matches the signature of slog.Log. +type Logger interface { + Log(ctx context.Context, level slog.Level, msg string, args ...any) +} + +func (c *NTimesRetryingConsumer) Consume(ctx context.Context, + session sarama.ConsumerGroupSession, message *sarama.ConsumerMessage) (err error) { + + var joinedErrors error + var tries int = 0 + var delay time.Duration = 0 + if c.Delay == nil { + c.Delay = DelayFibonacci + } + if c.Logger == nil { + c.Logger = slog.Default() + } + done := ctx.Done() + for tries < c.Times { + select { + case <-done: + return nil + case <-time.After(delay): + err := c.Consumer.Consume(ctx, session, message) + if errors.Is(err, nil) || errors.Is(err, context.Canceled) { + return nil + } + delay = c.Delay(tries) + c.Logger.Log(ctx, slog.LevelInfo, "failure consuming Kafka message, will retry", + slog.Attr{Key: "tries", Value: slog.IntValue(tries)}, + slog.Attr{Key: "times", Value: slog.IntValue(c.Times)}, + slog.Attr{Key: "delay", Value: slog.DurationValue(delay)}, + slog.Attr{Key: "err", Value: slog.AnyValue(err)}, + ) + joinedErrors = errors.Join(joinedErrors, err) + tries++ + } + } + + return errors.Join(joinedErrors, c.retryLimitError()) +} + +func (c *NTimesRetryingConsumer) retryLimitError() error { + return fmt.Errorf("%w (%d)", ErrRetriesLimitExceeded, c.Times) +} + +// DelayNone is a function returning a constant "no delay" of 0 seconds. +var DelayNone = func(_ int) time.Duration { return DelayConstant(0) } + +// DelayConstant is a function returning a constant number of seconds. +func DelayConstant(n int) time.Duration { return time.Duration(n) * time.Second } + +// DelayExponentialBinary returns a binary exponential delay. +// +// The delay is 2**tries seconds. +func DelayExponentialBinary(tries int) time.Duration { + return time.Second * time.Duration(math.Pow(2, float64(tries))) +} + +// DelayFibonacci returns a delay based on the Fibonacci sequence. +func DelayFibonacci(tries int) time.Duration { + return time.Second * time.Duration(Fib(tries)) +} + +// Fib returns the nth number in the Fibonacci sequence. +func Fib(n int) int { + if n == 0 { + return 0 + } else if n < 3 { + return 1 + } + + n1, n2 := 1, 1 + for i := 3; i <= n; i++ { + n1, n2 = n1+n2, n1 + } + + return n1 +} diff --git a/vendor/github.com/tidepool-org/go-common/events/config.go b/vendor/github.com/tidepool-org/go-common/events/config.go index a07d70ed6e..5deff14d06 100644 --- a/vendor/github.com/tidepool-org/go-common/events/config.go +++ b/vendor/github.com/tidepool-org/go-common/events/config.go @@ -2,6 +2,7 @@ package events import ( "errors" + "github.com/IBM/sarama" "github.com/kelseyhightower/envconfig" ) diff --git a/vendor/modules.txt b/vendor/modules.txt index 8d0052ea40..cc790ca23c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -371,8 +371,9 @@ github.com/tidepool-org/clinic/client # github.com/tidepool-org/devices/api v0.0.0-20240806072455-2b18f22c9cf5 ## explicit; go 1.22 github.com/tidepool-org/devices/api -# github.com/tidepool-org/go-common v0.12.2-0.20250129210214-bd36b59b9733 +# github.com/tidepool-org/go-common v0.12.2 ## explicit; go 1.22 +github.com/tidepool-org/go-common/asyncevents github.com/tidepool-org/go-common/clients github.com/tidepool-org/go-common/clients/disc github.com/tidepool-org/go-common/clients/hakken From 062e3653d4cf0079cbc311e6efed18c6bb197723 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Tue, 2 Jul 2024 13:14:29 -0600 Subject: [PATCH 10/54] allow invites to set an upload id The upload id is necessary to ensure that only the proper device data uploads are evaluated for care partner alert conditions. BACK-2554 --- data/service/api/v1/alerts.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/data/service/api/v1/alerts.go b/data/service/api/v1/alerts.go index 70941b9e20..a0aa2a354e 100644 --- a/data/service/api/v1/alerts.go +++ b/data/service/api/v1/alerts.go @@ -115,8 +115,13 @@ func UpsertAlert(dCtx service.Context) { return } - a := &alerts.Alerts{} - if err := request.DecodeRequestBody(r.Request, a); err != nil { + incomingCfg := &alerts.Config{} + var bodyReceiver interface{} = &incomingCfg.Alerts + if authDetails.IsService() && authDetails.UserID() == "" { + // Accept upload id only from services. + bodyReceiver = incomingCfg + } + if err := request.DecodeRequestBody(r.Request, bodyReceiver); err != nil { dCtx.RespondWithError(platform.ErrorJSONMalformed()) return } @@ -127,7 +132,12 @@ func UpsertAlert(dCtx service.Context) { return } - cfg := &alerts.Config{UserID: path.UserID, FollowedUserID: path.FollowedUserID, Alerts: *a} + cfg := &alerts.Config{ + UserID: path.UserID, + FollowedUserID: path.FollowedUserID, + UploadID: incomingCfg.UploadID, + Alerts: incomingCfg.Alerts, + } if err := repo.Upsert(ctx, cfg); err != nil { dCtx.RespondWithError(platform.ErrorInternalServerFailure()) lgr.WithError(err).Error("upserting alerts config") From ef03a862ee2003f41d712208982d552520632e3b Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Mon, 8 Jul 2024 13:36:56 -0600 Subject: [PATCH 11/54] integrates an APNs pusher into data service If the necessary configuration isn't found, then push notifications will instead be logged. BACK-2554 --- alerts/config.go | 8 +++++++ data/service/service/standard.go | 38 +++++++++++++++++++++++++++++++ push/logpush.go | 39 ++++++++++++++++++++++++++++++++ push/push.go | 28 +++++++++++++++++++++++ 4 files changed, 113 insertions(+) create mode 100644 push/logpush.go diff --git a/alerts/config.go b/alerts/config.go index b6f8334656..9437c6a5ff 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -269,3 +269,11 @@ type Repository interface { EnsureIndexes() error } + +// Note gathers information necessary for sending an alert notification. +type Note struct { + // Message communicates the alert to the recipient. + Message string + RecipientUserID string + FollowedUserID string +} diff --git a/data/service/service/standard.go b/data/service/service/standard.go index 20456e91e0..c08104f24d 100644 --- a/data/service/service/standard.go +++ b/data/service/service/standard.go @@ -4,6 +4,7 @@ import ( "context" "github.com/IBM/sarama" + "github.com/kelseyhightower/envconfig" eventsCommon "github.com/tidepool-org/go-common/events" "github.com/tidepool-org/platform/application" @@ -17,6 +18,7 @@ import ( dataSourceStoreStructured "github.com/tidepool-org/platform/data/source/store/structured" dataSourceStoreStructuredMongo "github.com/tidepool-org/platform/data/source/store/structured/mongo" dataStoreMongo "github.com/tidepool-org/platform/data/store/mongo" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/events" "github.com/tidepool-org/platform/log" @@ -24,6 +26,7 @@ import ( "github.com/tidepool-org/platform/permission" permissionClient "github.com/tidepool-org/platform/permission/client" "github.com/tidepool-org/platform/platform" + "github.com/tidepool-org/platform/push" "github.com/tidepool-org/platform/service/server" "github.com/tidepool-org/platform/service/service" storeStructuredMongo "github.com/tidepool-org/platform/store/structured/mongo" @@ -41,6 +44,7 @@ type Standard struct { dataClient *Client clinicsClient *clinics.Client dataSourceClient *dataSourceServiceClient.Client + pusher Pusher userEventsHandler events.Runner api *api.Standard server *server.Standard @@ -87,6 +91,9 @@ func (s *Standard) Initialize(provider application.Provider) error { if err := s.initializeSaramaLogger(); err != nil { return err } + if err := s.initializePusher(); err != nil { + return err + } if err := s.initializeUserEventsHandler(); err != nil { return err } @@ -426,3 +433,34 @@ func (s *Standard) initializeSaramaLogger() error { sarama.Logger = log.NewSarama(s.Logger()) return nil } + +// Pusher is a service-agnostic interface for sending push notifications. +type Pusher interface { + // Push a notification to a device. + Push(context.Context, *devicetokens.DeviceToken, *push.Notification) error +} + +func (s *Standard) initializePusher() error { + var err error + + apns2Config := &struct { + SigningKey []byte `envconfig:"TIDEPOOL_DATA_SERVICE_PUSHER_APNS_SIGNING_KEY"` + KeyID string `envconfig:"TIDEPOOL_DATA_SERVICE_PUSHER_APNS_KEY_ID"` + BundleID string `envconfig:"TIDEPOOL_DATA_SERVICE_PUSHER_APNS_BUNDLE_ID"` + TeamID string `envconfig:"TIDEPOOL_DATA_SERVICE_PUSHER_APNS_TEAM_ID"` + }{} + if err := envconfig.Process("", apns2Config); err != nil { + return errors.Wrap(err, "Unable to process APNs pusher config") + } + + var pusher Pusher + pusher, err = push.NewAPNSPusherFromKeyData(apns2Config.SigningKey, apns2Config.KeyID, + apns2Config.TeamID, apns2Config.BundleID) + if err != nil { + s.Logger().WithError(err).Warn("falling back to logging of push notifications") + pusher = push.NewLogPusher(s.Logger()) + } + s.pusher = pusher + + return nil +} diff --git a/push/logpush.go b/push/logpush.go new file mode 100644 index 0000000000..a313806a87 --- /dev/null +++ b/push/logpush.go @@ -0,0 +1,39 @@ +package push + +import ( + "context" + "os" + + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/log" + logjson "github.com/tidepool-org/platform/log/json" + lognull "github.com/tidepool-org/platform/log/null" +) + +// LogPusher logs notifications instead of sending push notifications. +// +// Useful for dev or testing situations. +type LogPusher struct { + log.Logger +} + +// NewLogPusher uses a [log.Logger] instead of pushing via APNs. +func NewLogPusher(l log.Logger) *LogPusher { + if l == nil { + var err error + l, err = logjson.NewLogger(os.Stderr, log.DefaultLevelRanks(), log.DefaultLevel()) + if err != nil { + l = lognull.NewLogger() + } + } + return &LogPusher{Logger: l} +} + +// Push implements [service.Pusher]. +func (p *LogPusher) Push(ctx context.Context, deviceToken *devicetokens.DeviceToken, note *Notification) error { + p.Logger.WithFields(log.Fields{ + "deviceToken": deviceToken, + "note": note, + }).Info("logging push notification") + return nil +} diff --git a/push/push.go b/push/push.go index 419cd395b3..bca2d45988 100644 --- a/push/push.go +++ b/push/push.go @@ -11,6 +11,7 @@ import ( "github.com/sideshow/apns2/payload" "github.com/sideshow/apns2/token" + "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" @@ -21,6 +22,17 @@ type Notification struct { Message string } +// String implements fmt.Stringer. +func (n Notification) String() string { + return n.Message +} + +func FromNote(note *alerts.Note) *Notification { + return &Notification{ + Message: note.Message, + } +} + // APNSPusher implements push notifications via Apple APNs. type APNSPusher struct { BundleID string @@ -47,6 +59,22 @@ func NewAPNSPusher(client APNS2Client, bundleID string) *APNSPusher { // // https://developer.apple.com/documentation/usernotifications/sending-notification-requests-to-apns func NewAPNSPusherFromKeyData(signingKey []byte, keyID, teamID, bundleID string) (*APNSPusher, error) { + if len(signingKey) == 0 { + return nil, errors.New("Unable to build APNSPusher: APNs signing key is blank") + } + + if bundleID == "" { + return nil, errors.New("Unable to build APNSPusher: bundleID is blank") + } + + if keyID == "" { + return nil, errors.New("Unable to build APNSPusher: keyID is blank") + } + + if teamID == "" { + return nil, errors.New("Unable to build APNSPusher: teamID is blank") + } + authKey, err := token.AuthKeyFromBytes(signingKey) if err != nil { return nil, err From 2e8ee0e72233c90c797fb7c6b9704bd6a25d6a98 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Mon, 8 Jul 2024 13:53:41 -0600 Subject: [PATCH 12/54] adds Evaluate methods to alerts.Config These methods return Note objects that can be sent as push notifications. NotLooping evaluation will be handled in a later commit. BACK-2554 --- alerts/config.go | 270 ++++++++++++++++- alerts/config_test.go | 682 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 931 insertions(+), 21 deletions(-) diff --git a/alerts/config.go b/alerts/config.go index 9437c6a5ff..f26387c1b8 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -6,10 +6,15 @@ import ( "bytes" "context" "encoding/json" + "slices" "time" "github.com/tidepool-org/platform/data" - "github.com/tidepool-org/platform/data/blood/glucose" + nontypesglucose "github.com/tidepool-org/platform/data/blood/glucose" + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/data/types/dosingdecision" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/log" "github.com/tidepool-org/platform/structure" "github.com/tidepool-org/platform/structure/validator" "github.com/tidepool-org/platform/user" @@ -50,6 +55,45 @@ func (c Config) Validate(validator structure.Validator) { c.Alerts.Validate(validator) } +// Evaluate alerts in the context of the provided data. +// +// While this method, or the methods it calls, can fail, there's no point in returning an +// error. Instead errors are logged before continuing. This is to ensure that any possible alert +// that should be triggered, will be triggered. +func (c Config) Evaluate(ctx context.Context, gd []*glucose.Glucose, dd []*dosingdecision.DosingDecision) *Note { + n := c.Alerts.Evaluate(ctx, gd, dd) + if n != nil { + n.FollowedUserID = c.FollowedUserID + n.RecipientUserID = c.UserID + } + if lgr := log.LoggerFromContext(ctx); lgr != nil { + lgr.WithField("note", n).Info("evaluated alert") + } + + return n +} + +// LongestDelay of the delays set on enabled alerts. +func (a Alerts) LongestDelay() time.Duration { + delays := []time.Duration{} + if a.Low != nil && a.Low.Enabled { + delays = append(delays, a.Low.Delay.Duration()) + } + if a.High != nil && a.High.Enabled { + delays = append(delays, a.High.Delay.Duration()) + } + if a.NotLooping != nil && a.NotLooping.Enabled { + delays = append(delays, a.NotLooping.Delay.Duration()) + } + if a.NoCommunication != nil && a.NoCommunication.Enabled { + delays = append(delays, a.NoCommunication.Delay.Duration()) + } + if len(delays) == 0 { + return 0 + } + return slices.Max(delays) +} + func (a Alerts) Validate(validator structure.Validator) { if a.UrgentLow != nil { a.UrgentLow.Validate(validator) @@ -68,6 +112,41 @@ func (a Alerts) Validate(validator structure.Validator) { } } +// Evaluate a user's data to determine if notifications are indicated. +// +// Evaluations are performed according to priority. The process is +// "short-circuited" at the first indicated notification. +func (a Alerts) Evaluate(ctx context.Context, + gd []*glucose.Glucose, dd []*dosingdecision.DosingDecision) *Note { + + if a.NoCommunication != nil && a.NoCommunication.Enabled { + if n := a.NoCommunication.Evaluate(ctx, gd); n != nil { + return n + } + } + if a.UrgentLow != nil && a.UrgentLow.Enabled { + if n := a.UrgentLow.Evaluate(ctx, gd); n != nil { + return n + } + } + if a.Low != nil && a.Low.Enabled { + if n := a.Low.Evaluate(ctx, gd); n != nil { + return n + } + } + if a.High != nil && a.High.Enabled { + if n := a.High.Evaluate(ctx, gd); n != nil { + return n + } + } + if a.NotLooping != nil && a.NotLooping.Enabled { + if n := a.NotLooping.Evaluate(ctx, dd); n != nil { + return n + } + } + return nil +} + // Base describes the minimum specifics of a desired alert. type Base struct { // Enabled controls whether notifications should be sent for this alert. @@ -81,6 +160,13 @@ func (b Base) Validate(validator structure.Validator) { validator.Bool("enabled", &b.Enabled) } +func (b Base) Evaluate(ctx context.Context, data []*glucose.Glucose) *Note { + if lgr := log.LoggerFromContext(ctx); lgr != nil { + lgr.Warn("alerts.Base.Evaluate called, this shouldn't happen!") + } + return nil +} + type Activity struct { // Triggered records the last time this alert was triggered. Triggered time.Time `json:"triggered" bson:"triggered"` @@ -132,6 +218,46 @@ func (a UrgentLowAlert) Validate(validator structure.Validator) { a.Threshold.Validate(validator) } +// Evaluate urgent low condition. +// +// Assumes data is pre-sorted in descending order by Time. +func (a *UrgentLowAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (note *Note) { + lgr := log.LoggerFromContext(ctx) + if len(data) == 0 { + lgr.Debug("no data to evaluate for urgent low") + return nil + } + datum := data[0] + okDatum, okThreshold, err := validateGlucoseAlertDatum(datum, a.Threshold) + if err != nil { + lgr.WithError(err).Warn("Unable to evaluate urgent low") + return nil + } + defer func() { logGlucoseAlertEvaluation(lgr, "urgent low", note, okDatum, okThreshold) }() + active := okDatum < okThreshold + if !active { + if a.IsActive() { + a.Resolved = time.Now() + } + return nil + } + if !a.IsActive() { + a.Triggered = time.Now() + } + return &Note{Message: genGlucoseThresholdMessage("below urgent low")} +} + +func validateGlucoseAlertDatum(datum *glucose.Glucose, t Threshold) (float64, float64, error) { + if datum.Blood.Units == nil || datum.Blood.Value == nil || datum.Blood.Time == nil { + return 0, 0, errors.Newf("Unable to evaluate datum: Units, Value, or Time is nil") + } + threshold := nontypesglucose.NormalizeValueForUnits(&t.Value, datum.Blood.Units) + if threshold == nil { + return 0, 0, errors.Newf("Unable to normalize threshold units: normalized to nil") + } + return *datum.Blood.Value, *threshold, nil +} + // NotLoopingAlert extends Base with a delay. type NotLoopingAlert struct { Base `bson:",inline"` @@ -144,6 +270,16 @@ func (a NotLoopingAlert) Validate(validator structure.Validator) { validator.Duration("delay", &dur).InRange(0, 2*time.Hour) } +// Evaluate if the device is looping. +func (a NotLoopingAlert) Evaluate(ctx context.Context, decisions []*dosingdecision.DosingDecision) (note *Note) { + // TODO will be implemented in the near future. + return nil +} + +// DosingDecisionReasonLoop is specified in a [dosingdecision.DosingDecision] to indicate that +// the decision is part of a loop adjustment (as opposed to bolus or something else). +const DosingDecisionReasonLoop string = "loop" + // NoCommunicationAlert extends Base with a delay. type NoCommunicationAlert struct { Base `bson:",inline"` @@ -156,6 +292,26 @@ func (a NoCommunicationAlert) Validate(validator structure.Validator) { validator.Duration("delay", &dur).InRange(0, 6*time.Hour) } +// Evaluate if CGM data is being received by Tidepool. +// +// Assumes data is pre-sorted by Time in descending order. +func (a NoCommunicationAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) *Note { + var newest time.Time + for _, d := range data { + if d != nil && d.Time != nil && !(*d.Time).IsZero() { + newest = *d.Time + break + } + } + if time.Since(newest) > a.Delay.Duration() { + return &Note{Message: NoCommunicationMessage} + } + + return nil +} + +const NoCommunicationMessage = "Tidepool is unable to communicate with a user's device" + // LowAlert extends Base with threshold and a delay. type LowAlert struct { Base `bson:",inline"` @@ -178,6 +334,51 @@ func (a LowAlert) Validate(validator structure.Validator) { validator.Duration("repeat", &repeatDur).Using(validateRepeat) } +// Evaluate the given data to determine if an alert should be sent. +// +// Assumes data is pre-sorted in descending order by Time. +func (a *LowAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (note *Note) { + lgr := log.LoggerFromContext(ctx) + if len(data) == 0 { + lgr.Debug("no data to evaluate for low") + return nil + } + var eventBegan time.Time + var okDatum, okThreshold float64 + var err error + defer func() { logGlucoseAlertEvaluation(lgr, "low", note, okDatum, okThreshold) }() + for _, datum := range data { + okDatum, okThreshold, err = validateGlucoseAlertDatum(datum, a.Threshold) + if err != nil { + lgr.WithError(err).Debug("Skipping low alert datum evaluation") + continue + } + active := okDatum < okThreshold + if !active { + break + } + if (*datum.Time).Before(eventBegan) || eventBegan.IsZero() { + eventBegan = *datum.Time + } + } + if eventBegan.IsZero() { + if a.IsActive() { + a.Resolved = time.Now() + } + return nil + } + if !a.IsActive() { + if time.Since(eventBegan) > a.Delay.Duration() { + a.Triggered = time.Now() + } + } + return &Note{Message: genGlucoseThresholdMessage("below low")} +} + +func genGlucoseThresholdMessage(alertType string) string { + return "Glucose reading " + alertType + " threshold" +} + // HighAlert extends Base with a threshold and a delay. type HighAlert struct { Base `bson:",inline"` @@ -200,6 +401,57 @@ func (a HighAlert) Validate(validator structure.Validator) { validator.Duration("repeat", &repeatDur).Using(validateRepeat) } +// Evaluate the given data to determine if an alert should be sent. +// +// Assumes data is pre-sorted in descending order by Time. +func (a *HighAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (note *Note) { + lgr := log.LoggerFromContext(ctx) + if len(data) == 0 { + lgr.Debug("no data to evaluate for high") + return nil + } + var eventBegan time.Time + var okDatum, okThreshold float64 + var err error + defer func() { logGlucoseAlertEvaluation(lgr, "high", note, okDatum, okThreshold) }() + for _, datum := range data { + okDatum, okThreshold, err = validateGlucoseAlertDatum(datum, a.Threshold) + if err != nil { + lgr.WithError(err).Debug("Skipping high alert datum evaluation") + continue + } + active := okDatum > okThreshold + if !active { + break + } + if (*datum.Time).Before(eventBegan) || eventBegan.IsZero() { + eventBegan = *datum.Time + } + } + if eventBegan.IsZero() { + if a.IsActive() { + a.Resolved = time.Now() + } + return nil + } + if !a.IsActive() { + if time.Since(eventBegan) > a.Delay.Duration() { + a.Triggered = time.Now() + } + } + return &Note{Message: genGlucoseThresholdMessage("above high")} +} + +// logGlucoseAlertEvaluation is called during each glucose-based evaluation for record-keeping. +func logGlucoseAlertEvaluation(lgr log.Logger, alertType string, note *Note, value, threshold float64) { + fields := log.Fields{ + "isAlerting?": note != nil, + "threshold": threshold, + "value": value, + } + lgr.WithFields(fields).Info(alertType) +} + // DurationMinutes reads a JSON integer and converts it to a time.Duration. // // Values are specified in minutes. @@ -227,7 +479,7 @@ func (m DurationMinutes) Duration() time.Duration { return time.Duration(m) } -// ValueWithUnits binds a value to its units. +// ValueWithUnits binds a value with its units. // // Other types can extend it to parse and validate the Units. type ValueWithUnits struct { @@ -240,20 +492,20 @@ type Threshold ValueWithUnits // Validate implements structure.Validatable func (t Threshold) Validate(v structure.Validator) { - v.String("units", &t.Units).OneOf(glucose.MgdL, glucose.MmolL) + v.String("units", &t.Units).OneOf(nontypesglucose.MgdL, nontypesglucose.MmolL) // This is a sanity check. Client software will likely further constrain these values. The // broadness of these values allows clients to change their own min and max values // independently, and it sidesteps rounding and conversion conflicts between the backend and // clients. var max, min float64 switch t.Units { - case glucose.MgdL, glucose.Mgdl: - max = glucose.MgdLMaximum - min = glucose.MgdLMinimum + case nontypesglucose.MgdL, nontypesglucose.Mgdl: + max = nontypesglucose.MgdLMaximum + min = nontypesglucose.MgdLMinimum v.Float64("value", &t.Value).InRange(min, max) - case glucose.MmolL, glucose.Mmoll: - max = glucose.MmolLMaximum - min = glucose.MmolLMinimum + case nontypesglucose.MmolL, nontypesglucose.Mmoll: + max = nontypesglucose.MmolLMaximum + min = nontypesglucose.MmolLMinimum v.Float64("value", &t.Value).InRange(min, max) default: v.WithReference("value").ReportError(validator.ErrorValueNotValid()) diff --git a/alerts/config_test.go b/alerts/config_test.go index df38650710..068614d5b6 100644 --- a/alerts/config_test.go +++ b/alerts/config_test.go @@ -11,8 +11,13 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/tidepool-org/platform/data/blood/glucose" + nontypesglucose "github.com/tidepool-org/platform/data/blood/glucose" + "github.com/tidepool-org/platform/data/types" + "github.com/tidepool-org/platform/data/types/blood" + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/log" logTest "github.com/tidepool-org/platform/log/test" + "github.com/tidepool-org/platform/pointer" "github.com/tidepool-org/platform/request" "github.com/tidepool-org/platform/structure/validator" "github.com/tidepool-org/platform/test" @@ -78,15 +83,15 @@ var _ = Describe("Config", func() { Expect(conf.High.Repeat).To(Equal(DurationMinutes(30 * time.Minute))) Expect(conf.High.Delay).To(Equal(DurationMinutes(5 * time.Minute))) Expect(conf.High.Threshold.Value).To(Equal(10.0)) - Expect(conf.High.Threshold.Units).To(Equal(glucose.MmolL)) + Expect(conf.High.Threshold.Units).To(Equal(nontypesglucose.MmolL)) Expect(conf.Low.Enabled).To(Equal(true)) Expect(conf.Low.Repeat).To(Equal(DurationMinutes(30 * time.Minute))) Expect(conf.Low.Delay).To(Equal(DurationMinutes(10 * time.Minute))) Expect(conf.Low.Threshold.Value).To(Equal(80.0)) - Expect(conf.Low.Threshold.Units).To(Equal(glucose.MgdL)) + Expect(conf.Low.Threshold.Units).To(Equal(nontypesglucose.MgdL)) Expect(conf.UrgentLow.Enabled).To(Equal(false)) Expect(conf.UrgentLow.Threshold.Value).To(Equal(47.5)) - Expect(conf.UrgentLow.Threshold.Units).To(Equal(glucose.MgdL)) + Expect(conf.UrgentLow.Threshold.Units).To(Equal(nontypesglucose.MgdL)) Expect(conf.NotLooping.Enabled).To(Equal(true)) Expect(conf.NotLooping.Delay).To(Equal(DurationMinutes(4 * time.Minute))) Expect(conf.NoCommunication.Enabled).To(Equal(true)) @@ -127,6 +132,44 @@ var _ = Describe("Config", func() { }) }) + Describe("Evaluate", func() { + Context("when a note is returned", func() { + It("injects the userIDs", func() { + ctx := contextWithTestLogger() + mockGlucoseData := []*glucose.Glucose{ + { + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(nontypesglucose.MmolL), + Value: pointer.FromAny(0.0), + }, + }, + } + conf := Config{ + UserID: mockUserID1, + FollowedUserID: mockUserID2, + Alerts: Alerts{ + UrgentLow: &UrgentLowAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 10, + Units: nontypesglucose.MmolL, + }, + }, + }, + } + + note := conf.Evaluate(ctx, mockGlucoseData, nil) + + Expect(note).ToNot(BeNil()) + Expect(note.RecipientUserID).To(Equal(mockUserID1)) + Expect(note.FollowedUserID).To(Equal(mockUserID2)) + }) + }) + }) + Context("Base", func() { Context("Activity", func() { Context("IsActive()", func() { @@ -175,6 +218,18 @@ var _ = Describe("Config", func() { }) }) + var testGlucoseDatum = func(v float64) *glucose.Glucose { + return &glucose.Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(nontypesglucose.MmolL), + Value: pointer.FromAny(v), + }, + } + } + Context("UrgentLowAlert", func() { Context("Threshold", func() { It("accepts values between 0 and 1000 mg/dL", func() { @@ -199,6 +254,138 @@ var _ = Describe("Config", func() { Expect(val.Error()).To(MatchError("value -1 is not between 0 and 1000")) }) }) + + Context("Evaluate", func() { + testUrgentLow := func() *UrgentLowAlert { + return &UrgentLowAlert{ + Threshold: Threshold{ + Value: 4.0, + Units: nontypesglucose.MmolL, + }, + } + } + + It("handles being passed empty data", func() { + ctx := contextWithTestLogger() + var note *Note + + alert := testUrgentLow() + + Expect(func() { + note = alert.Evaluate(ctx, []*glucose.Glucose{}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + Expect(func() { + note = alert.Evaluate(ctx, nil) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + }) + + It("logs evaluation results", func() { + ctx := contextWithTestLogger() + data := []*glucose.Glucose{testGlucoseDatum(1.1)} + + alert := testUrgentLow() + + Expect(func() { + alert.Evaluate(ctx, data) + }).ToNot(Panic()) + Expect(func() { + lgr := log.LoggerFromContext(ctx).(*logTest.Logger) + lgr.AssertLog(log.InfoLevel, "urgent low", log.Fields{ + "threshold": 4.0, + "value": 1.1, + "isAlerting?": true, + }) + }).ToNot(Panic()) + }) + + Context("when currently active", func() { + It("marks itself resolved", func() { + ctx := contextWithTestLogger() + + alert := testUrgentLow() + + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).ToNot(BeZero()) + }) + }) + + Context("when currently INactive", func() { + It("doesn't re-mark itself resolved", func() { + ctx := contextWithTestLogger() + + alert := testUrgentLow() + + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).ToNot(BeZero()) + was := alert.Resolved + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).To(Equal(was)) + }) + }) + + It("marks itself triggered", func() { + ctx := contextWithTestLogger() + + alert := testUrgentLow() + + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Triggered).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) + }).ToNot(Panic()) + Expect(alert.Triggered).ToNot(BeZero()) + }) + + It("validates glucose data", func() { + ctx := contextWithTestLogger() + var note *Note + + Expect(func() { + note = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1)}) + }).ToNot(Panic()) + Expect(note).ToNot(BeNil()) + + badUnits := testGlucoseDatum(1) + badUnits.Units = nil + Expect(func() { + note = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badUnits}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + + badValue := testGlucoseDatum(1) + badValue.Value = nil + Expect(func() { + note = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badValue}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + + badTime := testGlucoseDatum(1) + badTime.Time = nil + Expect(func() { + note = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badTime}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + + }) + }) }) Context("LowAlert", func() { @@ -258,6 +445,137 @@ var _ = Describe("Config", func() { Expect(val.Error()).To(MatchError("value 6h1m0s is not between 0s and 6h0m0s")) }) }) + + Context("Evaluate", func() { + testLow := func() *LowAlert { + return &LowAlert{ + Threshold: Threshold{ + Value: 4.0, + Units: nontypesglucose.MmolL, + }, + } + } + + It("handles being passed empty data", func() { + ctx := contextWithTestLogger() + var note *Note + + alert := testLow() + + Expect(func() { + note = alert.Evaluate(ctx, []*glucose.Glucose{}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + Expect(func() { + note = alert.Evaluate(ctx, nil) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + }) + + It("logs evaluation results", func() { + ctx := contextWithTestLogger() + data := []*glucose.Glucose{testGlucoseDatum(1.1)} + + alert := testLow() + + Expect(func() { + alert.Evaluate(ctx, data) + }).ToNot(Panic()) + Expect(func() { + lgr := log.LoggerFromContext(ctx).(*logTest.Logger) + lgr.AssertLog(log.InfoLevel, "low", log.Fields{ + "threshold": 4.0, + "value": 1.1, + "isAlerting?": true, + }) + }).ToNot(Panic()) + }) + + Context("when currently active", func() { + It("marks itself resolved", func() { + ctx := contextWithTestLogger() + + alert := testLow() + + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).ToNot(BeZero()) + }) + }) + + Context("when currently INactive", func() { + It("doesn't re-mark itself resolved", func() { + ctx := contextWithTestLogger() + + alert := testLow() + + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).ToNot(BeZero()) + was := alert.Resolved + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).To(Equal(was)) + }) + }) + + It("marks itself triggered", func() { + ctx := contextWithTestLogger() + + alert := testLow() + + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Triggered).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) + }).ToNot(Panic()) + Expect(alert.Triggered).ToNot(BeZero()) + }) + + It("validates glucose data", func() { + ctx := contextWithTestLogger() + var note *Note + + Expect(func() { + note = testLow().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1)}) + }).ToNot(Panic()) + Expect(note).ToNot(BeNil()) + + badUnits := testGlucoseDatum(1) + badUnits.Units = nil + Expect(func() { + note = testLow().Evaluate(ctx, []*glucose.Glucose{badUnits}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + + badValue := testGlucoseDatum(1) + badValue.Value = nil + Expect(func() { + note = testLow().Evaluate(ctx, []*glucose.Glucose{badValue}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + + badTime := testGlucoseDatum(1) + badTime.Time = nil + Expect(func() { + note = testLow().Evaluate(ctx, []*glucose.Glucose{badTime}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + }) + }) }) Context("HighAlert", func() { @@ -310,6 +628,137 @@ var _ = Describe("Config", func() { Expect(val.Error()).To(MatchError("value 6h1m0s is not between 0s and 6h0m0s")) }) }) + + Context("Evaluate", func() { + testHigh := func() *HighAlert { + return &HighAlert{ + Threshold: Threshold{ + Value: 20.0, + Units: nontypesglucose.MmolL, + }, + } + } + + It("handles being passed empty data", func() { + ctx := contextWithTestLogger() + var note *Note + + alert := testHigh() + + Expect(func() { + note = alert.Evaluate(ctx, []*glucose.Glucose{}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + Expect(func() { + note = alert.Evaluate(ctx, nil) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + }) + + It("logs evaluation results", func() { + ctx := contextWithTestLogger() + data := []*glucose.Glucose{testGlucoseDatum(21.1)} + + alert := testHigh() + + Expect(func() { + alert.Evaluate(ctx, data) + }).ToNot(Panic()) + Expect(func() { + lgr := log.LoggerFromContext(ctx).(*logTest.Logger) + lgr.AssertLog(log.InfoLevel, "high", log.Fields{ + "threshold": 20.0, + "value": 21.1, + "isAlerting?": true, + }) + }).ToNot(Panic()) + }) + + Context("when currently active", func() { + It("marks itself resolved", func() { + ctx := contextWithTestLogger() + + alert := testHigh() + + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(21.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).ToNot(BeZero()) + }) + }) + + Context("when currently INactive", func() { + It("doesn't re-mark itself resolved", func() { + ctx := contextWithTestLogger() + + alert := testHigh() + + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(21.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).ToNot(BeZero()) + was := alert.Resolved + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).To(Equal(was)) + }) + }) + + It("marks itself triggered", func() { + ctx := contextWithTestLogger() + + alert := testHigh() + + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Triggered).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(21.0)}) + }).ToNot(Panic()) + Expect(alert.Triggered).ToNot(BeZero()) + }) + + It("validates glucose data", func() { + ctx := contextWithTestLogger() + var note *Note + + Expect(func() { + note = testHigh().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(21)}) + }).ToNot(Panic()) + Expect(note).ToNot(BeNil()) + + badUnits := testGlucoseDatum(1) + badUnits.Units = nil + Expect(func() { + note = testHigh().Evaluate(ctx, []*glucose.Glucose{badUnits}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + + badValue := testGlucoseDatum(1) + badValue.Value = nil + Expect(func() { + note = testHigh().Evaluate(ctx, []*glucose.Glucose{badValue}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + + badTime := testGlucoseDatum(1) + badTime.Time = nil + Expect(func() { + note = testHigh().Evaluate(ctx, []*glucose.Glucose{badTime}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + }) + }) }) Context("NoCommunicationAlert", func() { @@ -367,7 +816,7 @@ var _ = Describe("Config", func() { Context("repeat", func() { var defaultAlert = LowAlert{ - Threshold: Threshold{Value: 11, Units: glucose.MmolL}, + Threshold: Threshold{Value: 11, Units: nontypesglucose.MmolL}, } It("accepts values of 0 (indicating disabled)", func() { @@ -448,7 +897,7 @@ var _ = Describe("Config", func() { "value": 47.5 } } -}`, mockUserID1, mockUserID2, mockUploadID, glucose.MgdL) +}`, mockUserID1, mockUserID2, mockUploadID, nontypesglucose.MgdL) cfg := &Config{} err := request.DecodeObject(context.Background(), nil, buf, cfg) Expect(err).To(MatchError("value -11m0s is not greater than or equal to 15m0s")) @@ -466,13 +915,217 @@ var _ = Describe("Config", func() { "value": 1 } } -}`, mockUserID1, mockUserID2, mockUploadID, glucose.MgdL) +}`, mockUserID1, mockUserID2, mockUploadID, nontypesglucose.MgdL) cfg := &Config{} err := request.DecodeObject(context.Background(), nil, buf, cfg) Expect(err).To(MatchError("json is malformed")) }) }) +var ( + testNoCommunicationAlert = func() *NoCommunicationAlert { + return &NoCommunicationAlert{ + Base: Base{Enabled: true}, + } + } + testLowAlert = func() *LowAlert { + return &LowAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 4, + Units: nontypesglucose.MmolL, + }, + } + } + testHighAlert = func() *HighAlert { + return &HighAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 10, + Units: nontypesglucose.MmolL, + }, + } + } + testUrgentLowAlert = func() *UrgentLowAlert { + return &UrgentLowAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 3, + Units: nontypesglucose.MmolL, + }, + } + } + testNotLoopingAlert = func() *NotLoopingAlert { + return &NotLoopingAlert{ + Base: Base{Enabled: true}, + } + } + testNoCommunicationDatum = &glucose.Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(nontypesglucose.MmolL), + Value: pointer.FromAny(11.0), + }, + } + testHighDatum = &glucose.Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(nontypesglucose.MmolL), + Value: pointer.FromAny(11.0), + }, + } + testLowDatum = &glucose.Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(nontypesglucose.MmolL), + Value: pointer.FromAny(3.9), + }, + } + testUrgentLowDatum = &glucose.Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(nontypesglucose.MmolL), + Value: pointer.FromAny(2.9), + }, + } +) + +var _ = Describe("Alerts", func() { + Describe("LongestDelay", func() { + It("does what it says", func() { + noComm := testNoCommunicationAlert() + noComm.Delay = DurationMinutes(10 * time.Minute) + low := testLowAlert() + low.Delay = DurationMinutes(5 * time.Minute) + high := testHighAlert() + high.Delay = DurationMinutes(5 * time.Minute) + notLooping := testNotLoopingAlert() + notLooping.Delay = DurationMinutes(5 * time.Minute) + + a := Alerts{ + NoCommunication: noComm, + Low: low, + High: high, + NotLooping: notLooping, + } + + delay := a.LongestDelay() + + Expect(delay).To(Equal(10 * time.Minute)) + }) + + It("ignores disabled alerts", func() { + noComm := testNoCommunicationAlert() + noComm.Delay = DurationMinutes(10 * time.Minute) + noComm.Enabled = false + low := testLowAlert() + low.Delay = DurationMinutes(7 * time.Minute) + high := testHighAlert() + high.Delay = DurationMinutes(5 * time.Minute) + notLooping := testNotLoopingAlert() + notLooping.Delay = DurationMinutes(5 * time.Minute) + + a := Alerts{ + NoCommunication: noComm, + Low: low, + High: high, + NotLooping: notLooping, + } + + delay := a.LongestDelay() + + Expect(delay).To(Equal(7 * time.Minute)) + }) + + It("returns a Zero Duration when no alerts are set", func() { + a := Alerts{ + NoCommunication: nil, + Low: nil, + High: nil, + NotLooping: nil, + } + + delay := a.LongestDelay() + + Expect(delay).To(Equal(time.Duration(0))) + }) + }) + + Describe("Evaluate", func() { + Context("when not communicating", func() { + It("returns only NoCommunication alerts", func() { + ctx := contextWithTestLogger() + data := []*glucose.Glucose{testNoCommunicationDatum} + data[0].Value = pointer.FromAny(0.0) + a := Alerts{ + NoCommunication: testNoCommunicationAlert(), + UrgentLow: testUrgentLowAlert(), + Low: testLowAlert(), + High: testHighAlert(), + } + + note := a.Evaluate(ctx, data, nil) + + Expect(note).To(HaveField("Message", ContainSubstring(NoCommunicationMessage))) + }) + }) + + It("logs decisions", func() { + Skip("TODO logAlertEvaluation") + }) + + It("detects low data", func() { + ctx := contextWithTestLogger() + data := []*glucose.Glucose{testLowDatum} + a := Alerts{ + Low: testLowAlert(), + } + + note := a.Evaluate(ctx, data, nil) + + Expect(note).ToNot(BeNil()) + Expect(note.Message).To(ContainSubstring("below low threshold")) + }) + + It("detects high data", func() { + ctx := contextWithTestLogger() + data := []*glucose.Glucose{testHighDatum} + a := Alerts{ + High: testHighAlert(), + } + + note := a.Evaluate(ctx, data, nil) + + Expect(note).ToNot(BeNil()) + Expect(note.Message).To(ContainSubstring("above high threshold")) + }) + + Context("with both low and urgent low alerts detected", func() { + It("prefers urgent low", func() { + ctx := contextWithTestLogger() + data := []*glucose.Glucose{testUrgentLowDatum} + a := Alerts{ + Low: testLowAlert(), + UrgentLow: testUrgentLowAlert(), + } + + note := a.Evaluate(ctx, data, nil) + + Expect(note).ToNot(BeNil()) + Expect(note.Message).To(ContainSubstring("below urgent low threshold")) + }) + }) + }) +}) + var _ = Describe("DurationMinutes", func() { It("parses 42", func() { d := DurationMinutes(0) @@ -508,20 +1161,20 @@ var _ = Describe("DurationMinutes", func() { var _ = Describe("Threshold", func() { It("accepts mg/dL", func() { - buf := buff(`{"units":"%s","value":42}`, glucose.MgdL) + buf := buff(`{"units":"%s","value":42}`, nontypesglucose.MgdL) threshold := &Threshold{} err := request.DecodeObject(context.Background(), nil, buf, threshold) Expect(err).To(BeNil()) Expect(threshold.Value).To(Equal(42.0)) - Expect(threshold.Units).To(Equal(glucose.MgdL)) + Expect(threshold.Units).To(Equal(nontypesglucose.MgdL)) }) It("accepts mmol/L", func() { - buf := buff(`{"units":"%s","value":42}`, glucose.MmolL) + buf := buff(`{"units":"%s","value":42}`, nontypesglucose.MmolL) threshold := &Threshold{} err := request.DecodeObject(context.Background(), nil, buf, threshold) Expect(err).To(BeNil()) Expect(threshold.Value).To(Equal(42.0)) - Expect(threshold.Units).To(Equal(glucose.MmolL)) + Expect(threshold.Units).To(Equal(nontypesglucose.MmolL)) }) It("rejects lb/gal", func() { buf := buff(`{"units":"%s","value":42}`, "lb/gal") @@ -534,7 +1187,7 @@ var _ = Describe("Threshold", func() { Expect(err).Should(HaveOccurred()) }) It("is case-sensitive with respect to Units", func() { - badUnits := strings.ToUpper(glucose.MmolL) + badUnits := strings.ToUpper(nontypesglucose.MmolL) buf := buff(`{"units":"%s","value":42}`, badUnits) err := request.DecodeObject(context.Background(), nil, buf, &Threshold{}) Expect(err).Should(HaveOccurred()) @@ -546,3 +1199,8 @@ var _ = Describe("Threshold", func() { func buff(format string, args ...interface{}) *bytes.Buffer { return bytes.NewBufferString(fmt.Sprintf(format, args...)) } + +func contextWithTestLogger() context.Context { + lgr := logTest.NewLogger() + return log.NewContextWithLogger(context.Background(), lgr) +} From 7b512db099b7e6b4b7234638f33d71d35a745c3f Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Mon, 8 Jul 2024 14:05:43 -0600 Subject: [PATCH 13/54] adds the alerts events consumer to the data service It uses the new asyncevents from go-common, as alerts processing requires different retry semantics than the existing solution. The Pusher interface is moved out of data/service into data/events to avoid a circular dependency. BACK-2554 --- data/events/alerts.go | 358 +++++++++++++++++ data/events/alerts_test.go | 639 +++++++++++++++++++++++++++++++ data/service/service/standard.go | 86 ++++- 3 files changed, 1074 insertions(+), 9 deletions(-) create mode 100644 data/events/alerts.go create mode 100644 data/events/alerts_test.go diff --git a/data/events/alerts.go b/data/events/alerts.go new file mode 100644 index 0000000000..a1dcccaef4 --- /dev/null +++ b/data/events/alerts.go @@ -0,0 +1,358 @@ +package events + +import ( + "cmp" + "context" + "os" + "slices" + "strings" + "time" + + "github.com/IBM/sarama" + "go.mongodb.org/mongo-driver/bson" + + "github.com/tidepool-org/platform/alerts" + "github.com/tidepool-org/platform/auth" + "github.com/tidepool-org/platform/data/store" + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/data/types/dosingdecision" + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/log" + logjson "github.com/tidepool-org/platform/log/json" + lognull "github.com/tidepool-org/platform/log/null" + "github.com/tidepool-org/platform/permission" + "github.com/tidepool-org/platform/push" +) + +type Consumer struct { + Alerts AlertsClient + Data store.DataRepository + DeviceTokens auth.DeviceTokensClient + Evaluator AlertsEvaluator + Permissions permission.Client + Pusher Pusher + Tokens alerts.TokenProvider + + Logger log.Logger +} + +// DosingDecision removes a stutter to improve readability. +type DosingDecision = dosingdecision.DosingDecision + +// Glucose removes a stutter to improve readability. +type Glucose = glucose.Glucose + +func (c *Consumer) Consume(ctx context.Context, + session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) (err error) { + + if msg == nil { + c.logger(ctx).Info("UNEXPECTED: nil message; ignoring") + return nil + } + + switch { + case strings.HasSuffix(msg.Topic, ".data.alerts"): + return c.consumeAlertsConfigs(ctx, session, msg) + case strings.HasSuffix(msg.Topic, ".data.deviceData.alerts"): + return c.consumeDeviceData(ctx, session, msg) + default: + c.logger(ctx).WithField("topic", msg.Topic). + Infof("UNEXPECTED: topic; ignoring") + } + + return nil +} + +func (c *Consumer) consumeAlertsConfigs(ctx context.Context, + session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) error { + + cfg := &alerts.Config{} + if err := unmarshalMessageValue(msg.Value, cfg); err != nil { + return err + } + lgr := c.logger(ctx) + lgr.WithField("cfg", cfg).Info("consuming an alerts config message") + + ctxLog := c.logger(ctx).WithField("followedUserID", cfg.FollowedUserID) + ctx = log.NewContextWithLogger(ctx, ctxLog) + + notes, err := c.Evaluator.Evaluate(ctx, cfg.FollowedUserID) + if err != nil { + format := "Unable to evalaute alerts configs triggered event for user %s" + return errors.Wrapf(err, format, cfg.UserID) + } + ctxLog.WithField("notes", notes).Debug("notes generated from alerts config") + + c.pushNotes(ctx, notes) + + session.MarkMessage(msg, "") + lgr.WithField("message", msg).Debug("marked") + return nil +} + +func (c *Consumer) consumeDeviceData(ctx context.Context, + session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) (err error) { + + datum := &Glucose{} + if err := unmarshalMessageValue(msg.Value, datum); err != nil { + return err + } + lgr := c.logger(ctx) + lgr.WithField("data", datum).Info("consuming a device data message") + + if datum.UserID == nil { + return errors.New("Unable to retrieve alerts configs: userID is nil") + } + ctx = log.NewContextWithLogger(ctx, lgr.WithField("followedUserID", *datum.UserID)) + notes, err := c.Evaluator.Evaluate(ctx, *datum.UserID) + if err != nil { + format := "Unable to evalaute device data triggered event for user %s" + return errors.Wrapf(err, format, *datum.UserID) + } + for idx, note := range notes { + lgr.WithField("idx", idx).WithField("note", note).Debug("notes") + } + + c.pushNotes(ctx, notes) + + session.MarkMessage(msg, "") + lgr.WithField("message", msg).Debug("marked") + return nil +} + +func (c *Consumer) pushNotes(ctx context.Context, notes []*alerts.Note) { + lgr := c.logger(ctx) + + // Notes could be pushed into a Kafka topic to have a more durable retry, + // but that can be added later. + for _, note := range notes { + lgr := lgr.WithField("recipientUserID", note.RecipientUserID) + tokens, err := c.DeviceTokens.GetDeviceTokens(ctx, note.RecipientUserID) + if err != nil { + lgr.WithError(err).Info("Unable to retrieve device tokens") + } + if len(tokens) == 0 { + lgr.Debug("no device tokens found, won't push any notifications") + } + pushNote := push.FromNote(note) + for _, token := range tokens { + if err := c.Pusher.Push(ctx, token, pushNote); err != nil { + lgr.WithError(err).Info("Unable to push notification") + } + } + } +} + +// logger produces a log.Logger. +// +// It tries a number of options before falling back to a null Logger. +func (c *Consumer) logger(ctx context.Context) log.Logger { + // A context's Logger is preferred, as it has the most... context. + if ctxLgr := log.LoggerFromContext(ctx); ctxLgr != nil { + return ctxLgr + } + if c.Logger != nil { + return c.Logger + } + fallback, err := logjson.NewLogger(os.Stderr, log.DefaultLevelRanks(), log.DefaultLevel()) + if err != nil { + fallback = lognull.NewLogger() + } + return fallback +} + +type AlertsEvaluator interface { + Evaluate(ctx context.Context, followedUserID string) ([]*alerts.Note, error) +} + +func NewAlertsEvaluator(alerts AlertsClient, data store.DataRepository, + perms permission.Client, tokens alerts.TokenProvider) *evaluator { + + return &evaluator{ + Alerts: alerts, + Data: data, + Permissions: perms, + Tokens: tokens, + } +} + +// evaluator implements AlertsEvaluator. +type evaluator struct { + Alerts AlertsClient + Data store.DataRepository + Permissions permission.Client + Tokens alerts.TokenProvider +} + +// logger produces a log.Logger. +// +// It tries a number of options before falling back to a null Logger. +func (e *evaluator) logger(ctx context.Context) log.Logger { + // A context's Logger is preferred, as it has the most... context. + if ctxLgr := log.LoggerFromContext(ctx); ctxLgr != nil { + return ctxLgr + } + fallback, err := logjson.NewLogger(os.Stderr, log.DefaultLevelRanks(), log.DefaultLevel()) + if err != nil { + fallback = lognull.NewLogger() + } + return fallback +} + +// Evaluate followers' alerts.Configs to generate alert notifications. +func (e *evaluator) Evaluate(ctx context.Context, followedUserID string) ( + []*alerts.Note, error) { + + alertsConfigs, err := e.gatherAlertsConfigs(ctx, followedUserID) + if err != nil { + return nil, err + } + e.logger(ctx).Debugf("%d alerts configs found", len(alertsConfigs)) + + alertsConfigsByUploadID := e.mapAlertsConfigsByUploadID(alertsConfigs) + + notes := []*alerts.Note{} + for uploadID, cfgs := range alertsConfigsByUploadID { + resp, err := e.gatherData(ctx, followedUserID, uploadID, cfgs) + if err != nil { + return nil, err + } + notes = slices.Concat(notes, e.generateNotes(ctx, cfgs, resp)) + } + + return notes, nil +} + +func (e *evaluator) mapAlertsConfigsByUploadID(cfgs []*alerts.Config) map[string][]*alerts.Config { + mapped := map[string][]*alerts.Config{} + for _, cfg := range cfgs { + if _, found := mapped[cfg.UploadID]; !found { + mapped[cfg.UploadID] = []*alerts.Config{} + } + mapped[cfg.UploadID] = append(mapped[cfg.UploadID], cfg) + } + return mapped +} + +func (e *evaluator) gatherAlertsConfigs(ctx context.Context, + followedUserID string) ([]*alerts.Config, error) { + + alertsConfigs, err := e.Alerts.List(ctx, followedUserID) + if err != nil { + return nil, err + } + e.logger(ctx).Debugf("after List, %d alerts configs", len(alertsConfigs)) + alertsConfigs = slices.DeleteFunc(alertsConfigs, e.authDenied(ctx)) + e.logger(ctx).Debugf("after perms check, %d alerts configs", len(alertsConfigs)) + return alertsConfigs, nil +} + +// authDenied builds functions that enable slices.DeleteFunc to remove +// unauthorized users' alerts.Configs. +// +// Via a closure it's able to inject information from the Context and the +// evaluator itself into the resulting function. +func (e *evaluator) authDenied(ctx context.Context) func(ac *alerts.Config) bool { + lgr := e.logger(ctx) + return func(ac *alerts.Config) bool { + if ac == nil { + return true + } + lgr = lgr.WithFields(log.Fields{ + "userID": ac.UserID, + "followedUserID": ac.FollowedUserID, + }) + token, err := e.Tokens.ServerSessionToken() + if err != nil { + lgr.WithError(err).Warn("Unable to confirm permissions; skipping") + return false + } + ctx = auth.NewContextWithServerSessionToken(ctx, token) + perms, err := e.Permissions.GetUserPermissions(ctx, ac.UserID, ac.FollowedUserID) + if err != nil { + lgr.WithError(err).Warn("Unable to confirm permissions; skipping") + return true + } + if _, found := perms[permission.Follow]; !found { + lgr.Debug("permission denied: skipping") + return true + } + return false + } +} + +func (e *evaluator) gatherData(ctx context.Context, followedUserID, uploadID string, + alertsConfigs []*alerts.Config) (*store.AlertableResponse, error) { + + if len(alertsConfigs) == 0 { + return nil, nil + } + + longestDelay := slices.MaxFunc(alertsConfigs, func(i, j *alerts.Config) int { + return cmp.Compare(i.LongestDelay(), j.LongestDelay()) + }).LongestDelay() + longestDelay = max(5*time.Minute, longestDelay) + e.logger(ctx).WithField("longestDelay", longestDelay).Debug("here it is") + params := store.AlertableParams{ + UserID: followedUserID, + UploadID: uploadID, + Start: time.Now().Add(-longestDelay), + } + resp, err := e.Data.GetAlertableData(ctx, params) + if err != nil { + return nil, err + } + + return resp, nil +} + +func (e *evaluator) generateNotes(ctx context.Context, + alertsConfigs []*alerts.Config, resp *store.AlertableResponse) []*alerts.Note { + + if len(alertsConfigs) == 0 { + return nil + } + + lgr := e.logger(ctx) + notes := []*alerts.Note{} + for _, alertsConfig := range alertsConfigs { + l := lgr.WithFields(log.Fields{ + "userID": alertsConfig.UserID, + "followedUserID": alertsConfig.FollowedUserID, + "uploadID": alertsConfig.UploadID, + }) + c := log.NewContextWithLogger(ctx, l) + note := alertsConfig.Evaluate(c, resp.Glucose, resp.DosingDecisions) + if note != nil { + notes = append(notes, note) + continue + } + } + + return notes +} + +func unmarshalMessageValue[A any](b []byte, payload *A) error { + wrapper := &struct { + FullDocument A `json:"fullDocument"` + }{} + if err := bson.UnmarshalExtJSON(b, false, wrapper); err != nil { + return errors.Wrap(err, "Unable to unmarshal ExtJSON") + } + *payload = wrapper.FullDocument + return nil +} + +type AlertsClient interface { + Delete(context.Context, *alerts.Config) error + Get(context.Context, *alerts.Config) (*alerts.Config, error) + List(_ context.Context, userID string) ([]*alerts.Config, error) + Upsert(context.Context, *alerts.Config) error +} + +// Pusher is a service-agnostic interface for sending push notifications. +type Pusher interface { + // Push a notification to a device. + Push(context.Context, *devicetokens.DeviceToken, *push.Notification) error +} diff --git a/data/events/alerts_test.go b/data/events/alerts_test.go new file mode 100644 index 0000000000..64ed0f8bca --- /dev/null +++ b/data/events/alerts_test.go @@ -0,0 +1,639 @@ +package events + +import ( + "context" + "time" + + "github.com/IBM/sarama" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + + "github.com/tidepool-org/platform/alerts" + nontypesglucose "github.com/tidepool-org/platform/data/blood/glucose" + "github.com/tidepool-org/platform/data/store" + storetest "github.com/tidepool-org/platform/data/store/test" + "github.com/tidepool-org/platform/data/types" + "github.com/tidepool-org/platform/data/types/blood" + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/log" + logtest "github.com/tidepool-org/platform/log/test" + "github.com/tidepool-org/platform/permission" + "github.com/tidepool-org/platform/pointer" + "github.com/tidepool-org/platform/push" +) + +const ( + testUserID = "test-user-id" + testFollowedUserID = "test-followed-user-id" + testUserNoPermsID = "test-user-no-perms" + testUploadID = "test-upload-id" +) + +var ( + testMongoUrgentLowResponse = &store.AlertableResponse{ + Glucose: []*glucose.Glucose{ + newTestStaticDatumMmolL(1.0)}, + } +) + +var _ = Describe("Consumer", func() { + + Describe("Consume", func() { + It("ignores nil messages", func() { + ctx, _ := addLogger(context.Background()) + c := &Consumer{} + + Expect(c.Consume(ctx, nil, nil)).To(Succeed()) + }) + + It("processes alerts config events", func() { + cfg := &alerts.Config{ + UserID: testUserID, + FollowedUserID: testFollowedUserID, + Alerts: alerts.Alerts{ + Low: &alerts.LowAlert{ + Base: alerts.Base{ + Enabled: true}, + Threshold: alerts.Threshold{ + Value: 101.1, + Units: "mg/dL", + }, + }, + }, + } + kafkaMsg := newAlertsMockConsumerMessage(".data.alerts", cfg) + docs := []interface{}{bson.M{}} + c, deps := newConsumerTestDeps(docs) + + Expect(c.Consume(deps.Context, deps.Session, kafkaMsg)).To(Succeed()) + Expect(deps.Session.MarkCalls).To(Equal(1)) + }) + + It("processes device data events", func() { + blood := &glucose.Glucose{ + Blood: blood.Blood{ + Units: pointer.FromAny("mmol/L"), + Value: pointer.FromAny(7.2), + Base: types.Base{ + UserID: pointer.FromAny(testFollowedUserID), + }, + }, + } + kafkaMsg := newAlertsMockConsumerMessage(".data.deviceData.alerts", blood) + docs := []interface{}{bson.M{}} + c, deps := newConsumerTestDeps(docs) + + Expect(c.Consume(deps.Context, deps.Session, kafkaMsg)).To(Succeed()) + Expect(deps.Session.MarkCalls).To(Equal(1)) + }) + + }) + + Describe("Evaluator", func() { + Describe("Evaluate", func() { + It("checks that alerts config owners have permission", func() { + testLogger := logtest.NewLogger() + ctx := log.NewContextWithLogger(context.Background(), testLogger) + + eval, deps := newEvaluatorTestDeps([]*store.AlertableResponse{testMongoUrgentLowResponse}) + deps.Permissions.Allow(testUserID, permission.Follow, testFollowedUserID) + deps.Permissions.DenyAll(testUserNoPermsID, testFollowedUserID) + deps.Alerts.Configs = append(deps.Alerts.Configs, testAlertsConfigUrgentLow(testUserNoPermsID)) + deps.Alerts.Configs = append(deps.Alerts.Configs, testAlertsConfigUrgentLow(testUserID)) + + notes, err := eval.Evaluate(ctx, testFollowedUserID) + + Expect(err).To(Succeed()) + Expect(notes).To(ConsistOf(HaveField("RecipientUserID", testUserID))) + }) + + It("uses the longest delay", func() { + + }) + }) + + }) + + // Describe("evaluateUrgentLow", func() { + // It("can't function without datum units", func() { + // ctx, _ := addLogger(context.Background()) + // alert := newTestUrgentLowAlert() + // datum := newTestStaticDatumMmolL(11) + // datum.Blood.Units = nil + // c := &Consumer{ + // Pusher: newMockPusher(), + // DeviceTokens: newMockDeviceTokensClient(), + // } + + // _, err := c.evaluateUrgentLow(ctx, datum, testUserID, alert) + + // Expect(err).To(MatchError("Unable to evaluate datum: Units, Value, or Time is nil")) + // }) + + // It("can't function without datum value", func() { + // ctx, _ := addLogger(context.Background()) + // alert := newTestUrgentLowAlert() + // datum := newTestStaticDatumMmolL(11) + // datum.Blood.Value = nil + // c := &Consumer{ + // Pusher: newMockPusher(), + // DeviceTokens: newMockDeviceTokensClient(), + // } + + // _, err := c.evaluateUrgentLow(ctx, datum, testUserID, alert) + + // Expect(err).To(MatchError("Unable to evaluate datum: Units, Value, or Time is nil")) + // }) + + // It("can't function without datum time", func() { + // ctx, _ := addLogger(context.Background()) + // alert := newTestUrgentLowAlert() + // datum := newTestStaticDatumMmolL(11) + // datum.Blood.Time = nil + // c := &Consumer{ + // Pusher: newMockPusher(), + // DeviceTokens: newMockDeviceTokensClient(), + // } + + // _, err := c.evaluateUrgentLow(ctx, datum, testUserID, alert) + // Expect(err).To(MatchError("Unable to evaluate datum: Units, Value, or Time is nil")) + // }) + + // It("is marked resolved", func() { + // ctx, _ := addLogger(context.Background()) + // datum := newTestStaticDatumMmolL(11) + // alert := newTestUrgentLowAlert() + // alert.Threshold.Value = *datum.Blood.Value - 1 + // userID := "test-user-id" + // c := &Consumer{ + // Pusher: newMockPusher(), + // DeviceTokens: newMockDeviceTokensClient(), + // } + + // updated, err := c.evaluateUrgentLow(ctx, datum, userID, alert) + // Expect(err).To(Succeed()) + // Expect(updated).To(BeTrue()) + // Expect(alert.Resolved).To(BeTemporally("~", time.Now(), time.Second)) + // }) + + // It("is marked both notified and triggered", func() { + // ctx, _ := addLogger(context.Background()) + // datum := newTestStaticDatumMmolL(11) + // alert := newTestUrgentLowAlert() + // alert.Threshold.Value = *datum.Blood.Value + 1 + // userID := "test-user-id" + // c := &Consumer{ + // Pusher: newMockPusher(), + // DeviceTokens: newMockDeviceTokensClient(), + // } + + // updated, err := c.evaluateUrgentLow(ctx, datum, userID, alert) + // Expect(err).To(Succeed()) + // Expect(updated).To(BeTrue()) + // Expect(alert.Sent).To(BeTemporally("~", time.Now(), time.Second)) + // Expect(alert.Triggered).To(BeTemporally("~", time.Now(), time.Second)) + // }) + + // It("sends notifications regardless of previous notification time", func() { + // ctx, _ := addLogger(context.Background()) + // datum := newTestStaticDatumMmolL(11) + // alert := newTestUrgentLowAlert() + // lastTime := time.Now().Add(-10 * time.Second) + // alert.Activity.Sent = lastTime + // alert.Threshold.Value = *datum.Blood.Value + 1 + // userID := "test-user-id" + // c := &Consumer{ + // Pusher: newMockPusher(), + // DeviceTokens: newMockDeviceTokensClient(), + // } + + // updated, err := c.evaluateUrgentLow(ctx, datum, userID, alert) + // Expect(err).To(Succeed()) + // Expect(updated).To(BeTrue()) + // Expect(alert.Sent).To(BeTemporally("~", time.Now(), time.Second)) + // }) + // }) +}) + +type consumerTestDeps struct { + Alerts *mockAlertsConfigClient + Context context.Context + Cursor *mongo.Cursor + Evaluator *mockStaticEvaluator + Logger log.Logger + Permissions *mockPermissionsClient + Repo *storetest.DataRepository + Session *mockConsumerGroupSession + Tokens alerts.TokenProvider +} + +func newConsumerTestDeps(docs []interface{}) (*Consumer, *consumerTestDeps) { + GinkgoHelper() + ctx, logger := addLogger(context.Background()) + alertsClient := newMockAlertsConfigClient([]*alerts.Config{ + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + Alerts: alerts.Alerts{}, + }, + }, nil) + dataRepo := storetest.NewDataRepository() + dataRepo.GetLastUpdatedForUserOutputs = []storetest.GetLastUpdatedForUserOutput{} + augmentedDocs := augmentMockMongoDocs(docs) + cur := newMockMongoCursor(augmentedDocs) + dataRepo.GetDataRangeOutputs = []storetest.GetDataRangeOutput{ + {Error: nil, Cursor: cur}, + } + tokens := &mockAlertsTokenProvider{Token: "test-token"} + permissions := newMockPermissionsClient() + evaluator := newMockStaticEvaluator() + + return &Consumer{ + Alerts: alertsClient, + Evaluator: evaluator, + Tokens: tokens, + Data: dataRepo, + Permissions: permissions, + }, &consumerTestDeps{ + Alerts: alertsClient, + Context: ctx, + Cursor: cur, + Evaluator: evaluator, + Repo: dataRepo, + Session: &mockConsumerGroupSession{}, + Logger: logger, + Tokens: tokens, + Permissions: permissions, + } +} + +func newEvaluatorTestDeps(responses []*store.AlertableResponse) (*evaluator, *evaluatorTestDeps) { + alertsClient := newMockAlertsConfigClient(nil, nil) + dataRepo := storetest.NewDataRepository() + dataRepo.GetLastUpdatedForUserOutputs = []storetest.GetLastUpdatedForUserOutput{} + for _, r := range responses { + out := storetest.GetAlertableDataOutput{Response: r} + dataRepo.GetAlertableDataOutputs = append(dataRepo.GetAlertableDataOutputs, out) + } + permissions := newMockPermissionsClient() + tokens := newMockTokensProvider() + return &evaluator{ + Alerts: alertsClient, + Data: dataRepo, + Permissions: permissions, + Tokens: tokens, + }, &evaluatorTestDeps{ + Alerts: alertsClient, + Permissions: permissions, + } +} + +type evaluatorTestDeps struct { + Alerts *mockAlertsConfigClient + Permissions *mockPermissionsClient +} + +// mockEvaluator implements Evaluator. +type mockEvaluator struct { + Evaluations map[string][]mockEvaluatorResponse + EvaluateCalls map[string]int +} + +type mockEvaluatorResponse struct { + Notes []*alerts.Note + Error error +} + +func newMockEvaluator() *mockEvaluator { + return &mockEvaluator{ + Evaluations: map[string][]mockEvaluatorResponse{}, + EvaluateCalls: map[string]int{}, + } +} + +func (e *mockEvaluator) Evaluate(ctx context.Context, followedUserID string) ([]*alerts.Note, error) { + if _, found := e.Evaluations[followedUserID]; !found { + return nil, nil + } + resp := e.Evaluations[followedUserID][0] + if len(e.Evaluations[followedUserID]) > 1 { + e.Evaluations[followedUserID] = e.Evaluations[followedUserID][1:] + } + e.EvaluateCalls[followedUserID] += 1 + if resp.Error != nil { + return nil, resp.Error + } + return resp.Notes, nil +} + +func (e *mockEvaluator) EvaluateCallsTotal() int { + total := 0 + for _, val := range e.EvaluateCalls { + total += val + } + return total +} + +// mockStaticEvaluator wraps mock evaluator with a static response. +// +// Useful when testing Consumer behavior, when the behavior of the Evaulator +// isn't relevant to the Consumer test. +type mockStaticEvaluator struct { + *mockEvaluator +} + +func newMockStaticEvaluator() *mockStaticEvaluator { + return &mockStaticEvaluator{newMockEvaluator()} +} + +func (e *mockStaticEvaluator) Evaluate(ctx context.Context, followedUserID string) ([]*alerts.Note, error) { + e.EvaluateCalls[followedUserID] += 1 + return nil, nil +} + +func newAlertsMockConsumerMessage(topic string, v any) *sarama.ConsumerMessage { + GinkgoHelper() + doc := &struct { + FullDocument any `json:"fullDocument" bson:"fullDocument"` + }{FullDocument: v} + vBytes, err := bson.MarshalExtJSON(doc, false, false) + Expect(err).To(Succeed()) + return &sarama.ConsumerMessage{ + Value: vBytes, + Topic: topic, + } +} + +func addLogger(ctx context.Context) (context.Context, log.Logger) { + GinkgoHelper() + if ctx == nil { + ctx = context.Background() + } + + lgr := newTestLogger() + return log.NewContextWithLogger(ctx, lgr), lgr +} + +func newTestLogger() log.Logger { + GinkgoHelper() + lgr := logtest.NewLogger() + return lgr +} + +func augmentMockMongoDocs(inDocs []interface{}) []interface{} { + defaultDoc := bson.M{ + "_userId": testFollowedUserID, + "_active": true, + "type": "upload", + "time": time.Now(), + } + outDocs := []interface{}{} + for _, inDoc := range inDocs { + newDoc := defaultDoc + switch v := (inDoc).(type) { + case map[string]interface{}: + for key, val := range v { + newDoc[key] = val + } + outDocs = append(outDocs, newDoc) + default: + outDocs = append(outDocs, inDoc) + } + } + return outDocs +} + +func newMockMongoCursor(docs []interface{}) *mongo.Cursor { + GinkgoHelper() + cur, err := mongo.NewCursorFromDocuments(docs, nil, nil) + Expect(err).To(Succeed()) + return cur +} + +func newTestStaticDatumMmolL(value float64) *glucose.Glucose { + return &glucose.Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromTime(time.Now()), + }, + Units: pointer.FromString(nontypesglucose.MmolL), + Value: pointer.FromFloat64(value), + }, + } +} + +func newTestUrgentLowAlert() *alerts.UrgentLowAlert { + return &alerts.UrgentLowAlert{ + Base: alerts.Base{ + Enabled: true, + Activity: alerts.Activity{}, + }, + Threshold: alerts.Threshold{ + Units: nontypesglucose.MmolL, + }, + } +} + +type mockDeviceTokensClient struct { + Error error + Tokens []*devicetokens.DeviceToken +} + +func newMockDeviceTokensClient() *mockDeviceTokensClient { + return &mockDeviceTokensClient{ + Tokens: []*devicetokens.DeviceToken{}, + } +} + +// // testingT is a subset of testing.TB +// type testingT interface { +// Errorf(format string, args ...any) +// Fatalf(format string, args ...any) +// } + +func (m *mockDeviceTokensClient) GetDeviceTokens(ctx context.Context, + userID string) ([]*devicetokens.DeviceToken, error) { + + if m.Error != nil { + return nil, m.Error + } + return m.Tokens, nil +} + +type mockPusher struct { + Pushes []string +} + +func newMockPusher() *mockPusher { + return &mockPusher{ + Pushes: []string{}, + } +} + +func (p *mockPusher) Push(ctx context.Context, + deviceToken *devicetokens.DeviceToken, notification *push.Notification) error { + p.Pushes = append(p.Pushes, notification.Message) + return nil +} + +type mockAlertsConfigClient struct { + Error error + Configs []*alerts.Config +} + +func newMockAlertsConfigClient(c []*alerts.Config, err error) *mockAlertsConfigClient { + if c == nil { + c = []*alerts.Config{} + } + return &mockAlertsConfigClient{ + Configs: c, + Error: err, + } +} + +func (c *mockAlertsConfigClient) Delete(_ context.Context, _ *alerts.Config) error { + return c.Error +} + +func (c *mockAlertsConfigClient) Get(_ context.Context, _ *alerts.Config) (*alerts.Config, error) { + if c.Error != nil { + return nil, c.Error + } else if len(c.Configs) > 0 { + return c.Configs[0], nil + } + return nil, nil +} + +func (c *mockAlertsConfigClient) List(_ context.Context, userID string) ([]*alerts.Config, error) { + if c.Error != nil { + return nil, c.Error + } else if len(c.Configs) > 0 { + return c.Configs, nil + } + return nil, nil +} + +func (c *mockAlertsConfigClient) Upsert(_ context.Context, _ *alerts.Config) error { + return c.Error +} + +type mockConsumerGroupSession struct { + MarkCalls int +} + +func (s *mockConsumerGroupSession) Claims() map[string][]int32 { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) MemberID() string { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) GenerationID() int32 { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) Commit() { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) MarkMessage(msg *sarama.ConsumerMessage, metadata string) { + s.MarkCalls++ +} + +func (s *mockConsumerGroupSession) Context() context.Context { + panic("not implemented") // TODO: Implement +} + +type mockAlertsTokenProvider struct { + Token string + Error error +} + +func (p *mockAlertsTokenProvider) ServerSessionToken() (string, error) { + if p.Error != nil { + return "", p.Error + } + return p.Token, nil +} + +type mockPermissionsClient struct { + Error error + Perms map[string]permission.Permissions +} + +func newMockPermissionsClient() *mockPermissionsClient { + return &mockPermissionsClient{ + Perms: map[string]permission.Permissions{}, + } +} + +func (c *mockPermissionsClient) Key(requesterUserID, targetUserID string) string { + return requesterUserID + targetUserID +} + +func (c *mockPermissionsClient) Allow(requestUserID, perm, targetUserID string) { + key := c.Key(requestUserID, targetUserID) + if _, found := c.Perms[key]; !found { + c.Perms[key] = permission.Permissions{} + } + c.Perms[key][perm] = permission.Permission{} +} + +func (c *mockPermissionsClient) DenyAll(requestUserID, targetUserID string) { + key := c.Key(requestUserID, targetUserID) + delete(c.Perms, key) +} + +func (c *mockPermissionsClient) GetUserPermissions(ctx context.Context, requestUserID string, targetUserID string) (permission.Permissions, error) { + if c.Error != nil { + return nil, c.Error + } + if p, ok := c.Perms[c.Key(requestUserID, targetUserID)]; ok { + return p, nil + } else { + return nil, errors.New("test error NOT FOUND") + } +} + +type mockTokensProvider struct{} + +func newMockTokensProvider() *mockTokensProvider { + return &mockTokensProvider{} +} + +func (p *mockTokensProvider) ServerSessionToken() (string, error) { + return "test-server-session-token", nil +} + +func testAlertsConfigUrgentLow(userID string) *alerts.Config { + return &alerts.Config{ + UserID: userID, + FollowedUserID: testFollowedUserID, + UploadID: testUploadID, + Alerts: alerts.Alerts{ + UrgentLow: &alerts.UrgentLowAlert{ + Base: alerts.Base{ + Enabled: true, + Activity: alerts.Activity{}, + }, + Threshold: alerts.Threshold{ + Value: 10.0, + Units: nontypesglucose.MgdL, + }, + }, + }, + } +} diff --git a/data/service/service/standard.go b/data/service/service/standard.go index c08104f24d..6aaa6fe3d3 100644 --- a/data/service/service/standard.go +++ b/data/service/service/standard.go @@ -2,6 +2,7 @@ package service import ( "context" + "strings" "github.com/IBM/sarama" "github.com/kelseyhightower/envconfig" @@ -18,7 +19,6 @@ import ( dataSourceStoreStructured "github.com/tidepool-org/platform/data/source/store/structured" dataSourceStoreStructuredMongo "github.com/tidepool-org/platform/data/source/store/structured/mongo" dataStoreMongo "github.com/tidepool-org/platform/data/store/mongo" - "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/events" "github.com/tidepool-org/platform/log" @@ -44,8 +44,9 @@ type Standard struct { dataClient *Client clinicsClient *clinics.Client dataSourceClient *dataSourceServiceClient.Client - pusher Pusher + pusher dataEvents.Pusher userEventsHandler events.Runner + alertsEventsHandler events.Runner api *api.Standard server *server.Standard } @@ -97,6 +98,9 @@ func (s *Standard) Initialize(provider application.Provider) error { if err := s.initializeUserEventsHandler(); err != nil { return err } + if err := s.initializeAlertsEventsHandler(); err != nil { + return err + } if err := s.initializeAPI(); err != nil { return err } @@ -117,6 +121,13 @@ func (s *Standard) Terminate() { } s.userEventsHandler = nil } + if s.alertsEventsHandler != nil { + s.Logger().Info("Terminating the alertsEventsHandler") + if err := s.alertsEventsHandler.Terminate(); err != nil { + s.Logger().Errorf("Error while terminating the alertsEventsHandler: %v", err) + } + s.alertsEventsHandler = nil + } s.api = nil s.dataClient = nil if s.syncTaskStore != nil { @@ -147,6 +158,9 @@ func (s *Standard) Run() error { go func() { errs <- s.userEventsHandler.Run() }() + go func() { + errs <- s.alertsEventsHandler.Run() + }() go func() { errs <- s.server.Serve() }() @@ -434,12 +448,6 @@ func (s *Standard) initializeSaramaLogger() error { return nil } -// Pusher is a service-agnostic interface for sending push notifications. -type Pusher interface { - // Push a notification to a device. - Push(context.Context, *devicetokens.DeviceToken, *push.Notification) error -} - func (s *Standard) initializePusher() error { var err error @@ -453,7 +461,7 @@ func (s *Standard) initializePusher() error { return errors.Wrap(err, "Unable to process APNs pusher config") } - var pusher Pusher + var pusher dataEvents.Pusher pusher, err = push.NewAPNSPusherFromKeyData(apns2Config.SigningKey, apns2Config.KeyID, apns2Config.TeamID, apns2Config.BundleID) if err != nil { @@ -464,3 +472,63 @@ func (s *Standard) initializePusher() error { return nil } + +func (s *Standard) initializeAlertsEventsHandler() error { + s.Logger().Debug("Initializing alerts events handler") + + commonConfig := eventsCommon.NewConfig() + if err := commonConfig.LoadFromEnv(); err != nil { + return err + } + + // In addition to the CloudEventsConfig, additional specific config values + // are needed. + config := &struct { + KafkaAlertsTopics []string `envconfig:"KAFKA_ALERTS_TOPICS" default:"alerts,deviceData.alerts"` + KafkaAlertsGroupID string `envconfig:"KAFKA_ALERTS_CONSUMER_GROUP" required:"true"` + }{} + if err := envconfig.Process("", config); err != nil { + return errors.Wrap(err, "Unable to process envconfig") + } + + // Some kafka topics use a `-` as a prefix. But MongoDB CDC topics are created with + // `.`. This code is using CDC topics, so ensuring that a `.` is used for alerts events + // lines everything up as expected. + topicPrefix := strings.ReplaceAll(commonConfig.KafkaTopicPrefix, "-", ".") + prefixedTopics := make([]string, 0, len(config.KafkaAlertsTopics)) + for _, topic := range config.KafkaAlertsTopics { + prefixedTopics = append(prefixedTopics, topicPrefix+topic) + } + + alerts := s.dataStore.NewAlertsRepository() + dataRepo := s.dataStore.NewDataRepository() + s.Logger().WithField("permissionClient", s.permissionClient).Debug("yo!") + ec := &dataEvents.Consumer{ + Alerts: alerts, + Data: dataRepo, + DeviceTokens: s.AuthClient(), + Evaluator: dataEvents.NewAlertsEvaluator(alerts, dataRepo, s.permissionClient, s.AuthClient()), + Permissions: s.permissionClient, + Pusher: s.pusher, + Tokens: s.AuthClient(), + Logger: s.Logger(), + } + + runnerCfg := dataEvents.SaramaRunnerConfig{ + Brokers: commonConfig.KafkaBrokers, + GroupID: config.KafkaAlertsGroupID, + Logger: s.Logger(), + Topics: prefixedTopics, + Sarama: commonConfig.SaramaConfig, + MessageConsumer: &dataEvents.AlertsEventsConsumer{ + Consumer: ec, + }, + } + runner := &dataEvents.SaramaRunner{Config: runnerCfg} + if err := runner.Initialize(); err != nil { + return errors.Wrap(err, "Unable to initialize alerts events handler runner") + } + s.alertsEventsHandler = runner + + return nil +} From 35824382ec571faab3a0279d8a90269f0e2b0d1c Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Thu, 11 Jul 2024 15:15:06 -0600 Subject: [PATCH 14/54] remove some debugging logs No longer needed --- data/events/alerts.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/data/events/alerts.go b/data/events/alerts.go index a1dcccaef4..819db3a1af 100644 --- a/data/events/alerts.go +++ b/data/events/alerts.go @@ -208,7 +208,6 @@ func (e *evaluator) Evaluate(ctx context.Context, followedUserID string) ( if err != nil { return nil, err } - e.logger(ctx).Debugf("%d alerts configs found", len(alertsConfigs)) alertsConfigsByUploadID := e.mapAlertsConfigsByUploadID(alertsConfigs) @@ -242,9 +241,7 @@ func (e *evaluator) gatherAlertsConfigs(ctx context.Context, if err != nil { return nil, err } - e.logger(ctx).Debugf("after List, %d alerts configs", len(alertsConfigs)) alertsConfigs = slices.DeleteFunc(alertsConfigs, e.authDenied(ctx)) - e.logger(ctx).Debugf("after perms check, %d alerts configs", len(alertsConfigs)) return alertsConfigs, nil } @@ -293,7 +290,6 @@ func (e *evaluator) gatherData(ctx context.Context, followedUserID, uploadID str return cmp.Compare(i.LongestDelay(), j.LongestDelay()) }).LongestDelay() longestDelay = max(5*time.Minute, longestDelay) - e.logger(ctx).WithField("longestDelay", longestDelay).Debug("here it is") params := store.AlertableParams{ UserID: followedUserID, UploadID: uploadID, From bbe380e1d23979c433a4f4b4edfeba573fa28597 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Fri, 12 Jul 2024 10:39:25 -0600 Subject: [PATCH 15/54] small fixes from code review --- auth/store/mongo/device_tokens_repository.go | 2 +- data/events/events.go | 1 - data/service/service/standard.go | 1 - 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/auth/store/mongo/device_tokens_repository.go b/auth/store/mongo/device_tokens_repository.go index d338c27ea3..8cc60fffbb 100644 --- a/auth/store/mongo/device_tokens_repository.go +++ b/auth/store/mongo/device_tokens_repository.go @@ -23,7 +23,7 @@ func (r *deviceTokenRepo) GetAllByUserID(ctx context.Context, userID string) ([] return nil, err } defer cursor.Close(ctx) - docs := make([]*devicetokens.Document, 0, cursor.RemainingBatchLength()) + var docs []*devicetokens.Document if err := cursor.All(ctx, &docs); err != nil { return nil, err } diff --git a/data/events/events.go b/data/events/events.go index 10f9a664f9..60deb640d9 100644 --- a/data/events/events.go +++ b/data/events/events.go @@ -183,7 +183,6 @@ func CappedExponentialBinaryDelay(cap time.Duration) func(int) time.Duration { } } -// TODO: implement me!! type AlertsEventsConsumer struct { Consumer asyncevents.SaramaMessageConsumer } diff --git a/data/service/service/standard.go b/data/service/service/standard.go index 6aaa6fe3d3..9e7ec5d3c6 100644 --- a/data/service/service/standard.go +++ b/data/service/service/standard.go @@ -502,7 +502,6 @@ func (s *Standard) initializeAlertsEventsHandler() error { alerts := s.dataStore.NewAlertsRepository() dataRepo := s.dataStore.NewDataRepository() - s.Logger().WithField("permissionClient", s.permissionClient).Debug("yo!") ec := &dataEvents.Consumer{ Alerts: alerts, Data: dataRepo, From fb6208c25bdb7c4186f7e608890794ffa00e0dac Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Fri, 12 Jul 2024 11:00:46 -0600 Subject: [PATCH 16/54] rename Note => Notification In response to request during code review. --- alerts/config.go | 54 +++++++++-------- alerts/config_test.go | 116 ++++++++++++++++++------------------- data/events/alerts.go | 28 ++++----- data/events/alerts_test.go | 14 +++-- push/logpush.go | 8 ++- push/push.go | 24 ++++---- 6 files changed, 129 insertions(+), 115 deletions(-) diff --git a/alerts/config.go b/alerts/config.go index f26387c1b8..20be69333b 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -60,17 +60,17 @@ func (c Config) Validate(validator structure.Validator) { // While this method, or the methods it calls, can fail, there's no point in returning an // error. Instead errors are logged before continuing. This is to ensure that any possible alert // that should be triggered, will be triggered. -func (c Config) Evaluate(ctx context.Context, gd []*glucose.Glucose, dd []*dosingdecision.DosingDecision) *Note { - n := c.Alerts.Evaluate(ctx, gd, dd) - if n != nil { - n.FollowedUserID = c.FollowedUserID - n.RecipientUserID = c.UserID +func (c Config) Evaluate(ctx context.Context, gd []*glucose.Glucose, dd []*dosingdecision.DosingDecision) *Notification { + notification := c.Alerts.Evaluate(ctx, gd, dd) + if notification != nil { + notification.FollowedUserID = c.FollowedUserID + notification.RecipientUserID = c.UserID } if lgr := log.LoggerFromContext(ctx); lgr != nil { - lgr.WithField("note", n).Info("evaluated alert") + lgr.WithField("notification", notification).Info("evaluated alert") } - return n + return notification } // LongestDelay of the delays set on enabled alerts. @@ -117,7 +117,7 @@ func (a Alerts) Validate(validator structure.Validator) { // Evaluations are performed according to priority. The process is // "short-circuited" at the first indicated notification. func (a Alerts) Evaluate(ctx context.Context, - gd []*glucose.Glucose, dd []*dosingdecision.DosingDecision) *Note { + gd []*glucose.Glucose, dd []*dosingdecision.DosingDecision) *Notification { if a.NoCommunication != nil && a.NoCommunication.Enabled { if n := a.NoCommunication.Evaluate(ctx, gd); n != nil { @@ -160,7 +160,7 @@ func (b Base) Validate(validator structure.Validator) { validator.Bool("enabled", &b.Enabled) } -func (b Base) Evaluate(ctx context.Context, data []*glucose.Glucose) *Note { +func (b Base) Evaluate(ctx context.Context, data []*glucose.Glucose) *Notification { if lgr := log.LoggerFromContext(ctx); lgr != nil { lgr.Warn("alerts.Base.Evaluate called, this shouldn't happen!") } @@ -221,7 +221,7 @@ func (a UrgentLowAlert) Validate(validator structure.Validator) { // Evaluate urgent low condition. // // Assumes data is pre-sorted in descending order by Time. -func (a *UrgentLowAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (note *Note) { +func (a *UrgentLowAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (notification *Notification) { lgr := log.LoggerFromContext(ctx) if len(data) == 0 { lgr.Debug("no data to evaluate for urgent low") @@ -233,7 +233,9 @@ func (a *UrgentLowAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) lgr.WithError(err).Warn("Unable to evaluate urgent low") return nil } - defer func() { logGlucoseAlertEvaluation(lgr, "urgent low", note, okDatum, okThreshold) }() + defer func() { + logGlucoseAlertEvaluation(lgr, "urgent low", notification, okDatum, okThreshold) + }() active := okDatum < okThreshold if !active { if a.IsActive() { @@ -244,7 +246,7 @@ func (a *UrgentLowAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) if !a.IsActive() { a.Triggered = time.Now() } - return &Note{Message: genGlucoseThresholdMessage("below urgent low")} + return &Notification{Message: genGlucoseThresholdMessage("below urgent low")} } func validateGlucoseAlertDatum(datum *glucose.Glucose, t Threshold) (float64, float64, error) { @@ -271,7 +273,7 @@ func (a NotLoopingAlert) Validate(validator structure.Validator) { } // Evaluate if the device is looping. -func (a NotLoopingAlert) Evaluate(ctx context.Context, decisions []*dosingdecision.DosingDecision) (note *Note) { +func (a NotLoopingAlert) Evaluate(ctx context.Context, decisions []*dosingdecision.DosingDecision) (notifcation *Notification) { // TODO will be implemented in the near future. return nil } @@ -295,7 +297,7 @@ func (a NoCommunicationAlert) Validate(validator structure.Validator) { // Evaluate if CGM data is being received by Tidepool. // // Assumes data is pre-sorted by Time in descending order. -func (a NoCommunicationAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) *Note { +func (a NoCommunicationAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) *Notification { var newest time.Time for _, d := range data { if d != nil && d.Time != nil && !(*d.Time).IsZero() { @@ -304,7 +306,7 @@ func (a NoCommunicationAlert) Evaluate(ctx context.Context, data []*glucose.Gluc } } if time.Since(newest) > a.Delay.Duration() { - return &Note{Message: NoCommunicationMessage} + return &Notification{Message: NoCommunicationMessage} } return nil @@ -337,7 +339,7 @@ func (a LowAlert) Validate(validator structure.Validator) { // Evaluate the given data to determine if an alert should be sent. // // Assumes data is pre-sorted in descending order by Time. -func (a *LowAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (note *Note) { +func (a *LowAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (notification *Notification) { lgr := log.LoggerFromContext(ctx) if len(data) == 0 { lgr.Debug("no data to evaluate for low") @@ -346,7 +348,7 @@ func (a *LowAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (note var eventBegan time.Time var okDatum, okThreshold float64 var err error - defer func() { logGlucoseAlertEvaluation(lgr, "low", note, okDatum, okThreshold) }() + defer func() { logGlucoseAlertEvaluation(lgr, "low", notification, okDatum, okThreshold) }() for _, datum := range data { okDatum, okThreshold, err = validateGlucoseAlertDatum(datum, a.Threshold) if err != nil { @@ -372,7 +374,7 @@ func (a *LowAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (note a.Triggered = time.Now() } } - return &Note{Message: genGlucoseThresholdMessage("below low")} + return &Notification{Message: genGlucoseThresholdMessage("below low")} } func genGlucoseThresholdMessage(alertType string) string { @@ -404,7 +406,7 @@ func (a HighAlert) Validate(validator structure.Validator) { // Evaluate the given data to determine if an alert should be sent. // // Assumes data is pre-sorted in descending order by Time. -func (a *HighAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (note *Note) { +func (a *HighAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (notification *Notification) { lgr := log.LoggerFromContext(ctx) if len(data) == 0 { lgr.Debug("no data to evaluate for high") @@ -413,7 +415,7 @@ func (a *HighAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (note var eventBegan time.Time var okDatum, okThreshold float64 var err error - defer func() { logGlucoseAlertEvaluation(lgr, "high", note, okDatum, okThreshold) }() + defer func() { logGlucoseAlertEvaluation(lgr, "high", notification, okDatum, okThreshold) }() for _, datum := range data { okDatum, okThreshold, err = validateGlucoseAlertDatum(datum, a.Threshold) if err != nil { @@ -439,13 +441,15 @@ func (a *HighAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (note a.Triggered = time.Now() } } - return &Note{Message: genGlucoseThresholdMessage("above high")} + return &Notification{Message: genGlucoseThresholdMessage("above high")} } // logGlucoseAlertEvaluation is called during each glucose-based evaluation for record-keeping. -func logGlucoseAlertEvaluation(lgr log.Logger, alertType string, note *Note, value, threshold float64) { +func logGlucoseAlertEvaluation(lgr log.Logger, alertType string, notification *Notification, + value, threshold float64) { + fields := log.Fields{ - "isAlerting?": note != nil, + "isAlerting?": notification != nil, "threshold": threshold, "value": value, } @@ -522,8 +526,8 @@ type Repository interface { EnsureIndexes() error } -// Note gathers information necessary for sending an alert notification. -type Note struct { +// Notification gathers information necessary for sending an alert notification. +type Notification struct { // Message communicates the alert to the recipient. Message string RecipientUserID string diff --git a/alerts/config_test.go b/alerts/config_test.go index 068614d5b6..6ded948bb2 100644 --- a/alerts/config_test.go +++ b/alerts/config_test.go @@ -133,7 +133,7 @@ var _ = Describe("Config", func() { }) Describe("Evaluate", func() { - Context("when a note is returned", func() { + Context("when a notification is returned", func() { It("injects the userIDs", func() { ctx := contextWithTestLogger() mockGlucoseData := []*glucose.Glucose{ @@ -161,11 +161,11 @@ var _ = Describe("Config", func() { }, } - note := conf.Evaluate(ctx, mockGlucoseData, nil) + notification := conf.Evaluate(ctx, mockGlucoseData, nil) - Expect(note).ToNot(BeNil()) - Expect(note.RecipientUserID).To(Equal(mockUserID1)) - Expect(note.FollowedUserID).To(Equal(mockUserID2)) + Expect(notification).ToNot(BeNil()) + Expect(notification.RecipientUserID).To(Equal(mockUserID1)) + Expect(notification.FollowedUserID).To(Equal(mockUserID2)) }) }) }) @@ -267,18 +267,18 @@ var _ = Describe("Config", func() { It("handles being passed empty data", func() { ctx := contextWithTestLogger() - var note *Note + var notification *Notification alert := testUrgentLow() Expect(func() { - note = alert.Evaluate(ctx, []*glucose.Glucose{}) + notification = alert.Evaluate(ctx, []*glucose.Glucose{}) }).ToNot(Panic()) - Expect(note).To(BeNil()) + Expect(notification).To(BeNil()) Expect(func() { - note = alert.Evaluate(ctx, nil) + notification = alert.Evaluate(ctx, nil) }).ToNot(Panic()) - Expect(note).To(BeNil()) + Expect(notification).To(BeNil()) }) It("logs evaluation results", func() { @@ -356,33 +356,33 @@ var _ = Describe("Config", func() { It("validates glucose data", func() { ctx := contextWithTestLogger() - var note *Note + var notification *Notification Expect(func() { - note = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1)}) + notification = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1)}) }).ToNot(Panic()) - Expect(note).ToNot(BeNil()) + Expect(notification).ToNot(BeNil()) badUnits := testGlucoseDatum(1) badUnits.Units = nil Expect(func() { - note = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badUnits}) + notification = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badUnits}) }).ToNot(Panic()) - Expect(note).To(BeNil()) + Expect(notification).To(BeNil()) badValue := testGlucoseDatum(1) badValue.Value = nil Expect(func() { - note = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badValue}) + notification = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badValue}) }).ToNot(Panic()) - Expect(note).To(BeNil()) + Expect(notification).To(BeNil()) badTime := testGlucoseDatum(1) badTime.Time = nil Expect(func() { - note = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badTime}) + notification = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badTime}) }).ToNot(Panic()) - Expect(note).To(BeNil()) + Expect(notification).To(BeNil()) }) }) @@ -458,18 +458,18 @@ var _ = Describe("Config", func() { It("handles being passed empty data", func() { ctx := contextWithTestLogger() - var note *Note + var notification *Notification alert := testLow() Expect(func() { - note = alert.Evaluate(ctx, []*glucose.Glucose{}) + notification = alert.Evaluate(ctx, []*glucose.Glucose{}) }).ToNot(Panic()) - Expect(note).To(BeNil()) + Expect(notification).To(BeNil()) Expect(func() { - note = alert.Evaluate(ctx, nil) + notification = alert.Evaluate(ctx, nil) }).ToNot(Panic()) - Expect(note).To(BeNil()) + Expect(notification).To(BeNil()) }) It("logs evaluation results", func() { @@ -547,33 +547,33 @@ var _ = Describe("Config", func() { It("validates glucose data", func() { ctx := contextWithTestLogger() - var note *Note + var notification *Notification Expect(func() { - note = testLow().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1)}) + notification = testLow().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1)}) }).ToNot(Panic()) - Expect(note).ToNot(BeNil()) + Expect(notification).ToNot(BeNil()) badUnits := testGlucoseDatum(1) badUnits.Units = nil Expect(func() { - note = testLow().Evaluate(ctx, []*glucose.Glucose{badUnits}) + notification = testLow().Evaluate(ctx, []*glucose.Glucose{badUnits}) }).ToNot(Panic()) - Expect(note).To(BeNil()) + Expect(notification).To(BeNil()) badValue := testGlucoseDatum(1) badValue.Value = nil Expect(func() { - note = testLow().Evaluate(ctx, []*glucose.Glucose{badValue}) + notification = testLow().Evaluate(ctx, []*glucose.Glucose{badValue}) }).ToNot(Panic()) - Expect(note).To(BeNil()) + Expect(notification).To(BeNil()) badTime := testGlucoseDatum(1) badTime.Time = nil Expect(func() { - note = testLow().Evaluate(ctx, []*glucose.Glucose{badTime}) + notification = testLow().Evaluate(ctx, []*glucose.Glucose{badTime}) }).ToNot(Panic()) - Expect(note).To(BeNil()) + Expect(notification).To(BeNil()) }) }) }) @@ -641,18 +641,18 @@ var _ = Describe("Config", func() { It("handles being passed empty data", func() { ctx := contextWithTestLogger() - var note *Note + var notification *Notification alert := testHigh() Expect(func() { - note = alert.Evaluate(ctx, []*glucose.Glucose{}) + notification = alert.Evaluate(ctx, []*glucose.Glucose{}) }).ToNot(Panic()) - Expect(note).To(BeNil()) + Expect(notification).To(BeNil()) Expect(func() { - note = alert.Evaluate(ctx, nil) + notification = alert.Evaluate(ctx, nil) }).ToNot(Panic()) - Expect(note).To(BeNil()) + Expect(notification).To(BeNil()) }) It("logs evaluation results", func() { @@ -730,33 +730,33 @@ var _ = Describe("Config", func() { It("validates glucose data", func() { ctx := contextWithTestLogger() - var note *Note + var notification *Notification Expect(func() { - note = testHigh().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(21)}) + notification = testHigh().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(21)}) }).ToNot(Panic()) - Expect(note).ToNot(BeNil()) + Expect(notification).ToNot(BeNil()) badUnits := testGlucoseDatum(1) badUnits.Units = nil Expect(func() { - note = testHigh().Evaluate(ctx, []*glucose.Glucose{badUnits}) + notification = testHigh().Evaluate(ctx, []*glucose.Glucose{badUnits}) }).ToNot(Panic()) - Expect(note).To(BeNil()) + Expect(notification).To(BeNil()) badValue := testGlucoseDatum(1) badValue.Value = nil Expect(func() { - note = testHigh().Evaluate(ctx, []*glucose.Glucose{badValue}) + notification = testHigh().Evaluate(ctx, []*glucose.Glucose{badValue}) }).ToNot(Panic()) - Expect(note).To(BeNil()) + Expect(notification).To(BeNil()) badTime := testGlucoseDatum(1) badTime.Time = nil Expect(func() { - note = testHigh().Evaluate(ctx, []*glucose.Glucose{badTime}) + notification = testHigh().Evaluate(ctx, []*glucose.Glucose{badTime}) }).ToNot(Panic()) - Expect(note).To(BeNil()) + Expect(notification).To(BeNil()) }) }) }) @@ -1072,9 +1072,9 @@ var _ = Describe("Alerts", func() { High: testHighAlert(), } - note := a.Evaluate(ctx, data, nil) + notification := a.Evaluate(ctx, data, nil) - Expect(note).To(HaveField("Message", ContainSubstring(NoCommunicationMessage))) + Expect(notification).To(HaveField("Message", ContainSubstring(NoCommunicationMessage))) }) }) @@ -1089,10 +1089,10 @@ var _ = Describe("Alerts", func() { Low: testLowAlert(), } - note := a.Evaluate(ctx, data, nil) + notification := a.Evaluate(ctx, data, nil) - Expect(note).ToNot(BeNil()) - Expect(note.Message).To(ContainSubstring("below low threshold")) + Expect(notification).ToNot(BeNil()) + Expect(notification.Message).To(ContainSubstring("below low threshold")) }) It("detects high data", func() { @@ -1102,10 +1102,10 @@ var _ = Describe("Alerts", func() { High: testHighAlert(), } - note := a.Evaluate(ctx, data, nil) + notification := a.Evaluate(ctx, data, nil) - Expect(note).ToNot(BeNil()) - Expect(note.Message).To(ContainSubstring("above high threshold")) + Expect(notification).ToNot(BeNil()) + Expect(notification.Message).To(ContainSubstring("above high threshold")) }) Context("with both low and urgent low alerts detected", func() { @@ -1117,10 +1117,10 @@ var _ = Describe("Alerts", func() { UrgentLow: testUrgentLowAlert(), } - note := a.Evaluate(ctx, data, nil) + notification := a.Evaluate(ctx, data, nil) - Expect(note).ToNot(BeNil()) - Expect(note.Message).To(ContainSubstring("below urgent low threshold")) + Expect(notification).ToNot(BeNil()) + Expect(notification.Message).To(ContainSubstring("below urgent low threshold")) }) }) }) diff --git a/data/events/alerts.go b/data/events/alerts.go index 819db3a1af..1cea469d91 100644 --- a/data/events/alerts.go +++ b/data/events/alerts.go @@ -121,21 +121,21 @@ func (c *Consumer) consumeDeviceData(ctx context.Context, return nil } -func (c *Consumer) pushNotes(ctx context.Context, notes []*alerts.Note) { +func (c *Consumer) pushNotes(ctx context.Context, notifications []*alerts.Notification) { lgr := c.logger(ctx) // Notes could be pushed into a Kafka topic to have a more durable retry, // but that can be added later. - for _, note := range notes { - lgr := lgr.WithField("recipientUserID", note.RecipientUserID) - tokens, err := c.DeviceTokens.GetDeviceTokens(ctx, note.RecipientUserID) + for _, notification := range notifications { + lgr := lgr.WithField("recipientUserID", notification.RecipientUserID) + tokens, err := c.DeviceTokens.GetDeviceTokens(ctx, notification.RecipientUserID) if err != nil { lgr.WithError(err).Info("Unable to retrieve device tokens") } if len(tokens) == 0 { lgr.Debug("no device tokens found, won't push any notifications") } - pushNote := push.FromNote(note) + pushNote := push.FromAlertsNotification(notification) for _, token := range tokens { if err := c.Pusher.Push(ctx, token, pushNote); err != nil { lgr.WithError(err).Info("Unable to push notification") @@ -163,7 +163,7 @@ func (c *Consumer) logger(ctx context.Context) log.Logger { } type AlertsEvaluator interface { - Evaluate(ctx context.Context, followedUserID string) ([]*alerts.Note, error) + Evaluate(ctx context.Context, followedUserID string) ([]*alerts.Notification, error) } func NewAlertsEvaluator(alerts AlertsClient, data store.DataRepository, @@ -202,7 +202,7 @@ func (e *evaluator) logger(ctx context.Context) log.Logger { // Evaluate followers' alerts.Configs to generate alert notifications. func (e *evaluator) Evaluate(ctx context.Context, followedUserID string) ( - []*alerts.Note, error) { + []*alerts.Notification, error) { alertsConfigs, err := e.gatherAlertsConfigs(ctx, followedUserID) if err != nil { @@ -211,16 +211,16 @@ func (e *evaluator) Evaluate(ctx context.Context, followedUserID string) ( alertsConfigsByUploadID := e.mapAlertsConfigsByUploadID(alertsConfigs) - notes := []*alerts.Note{} + notifications := []*alerts.Notification{} for uploadID, cfgs := range alertsConfigsByUploadID { resp, err := e.gatherData(ctx, followedUserID, uploadID, cfgs) if err != nil { return nil, err } - notes = slices.Concat(notes, e.generateNotes(ctx, cfgs, resp)) + notifications = slices.Concat(notifications, e.generateNotes(ctx, cfgs, resp)) } - return notes, nil + return notifications, nil } func (e *evaluator) mapAlertsConfigsByUploadID(cfgs []*alerts.Config) map[string][]*alerts.Config { @@ -304,14 +304,14 @@ func (e *evaluator) gatherData(ctx context.Context, followedUserID, uploadID str } func (e *evaluator) generateNotes(ctx context.Context, - alertsConfigs []*alerts.Config, resp *store.AlertableResponse) []*alerts.Note { + alertsConfigs []*alerts.Config, resp *store.AlertableResponse) []*alerts.Notification { if len(alertsConfigs) == 0 { return nil } lgr := e.logger(ctx) - notes := []*alerts.Note{} + notifications := []*alerts.Notification{} for _, alertsConfig := range alertsConfigs { l := lgr.WithFields(log.Fields{ "userID": alertsConfig.UserID, @@ -321,12 +321,12 @@ func (e *evaluator) generateNotes(ctx context.Context, c := log.NewContextWithLogger(ctx, l) note := alertsConfig.Evaluate(c, resp.Glucose, resp.DosingDecisions) if note != nil { - notes = append(notes, note) + notifications = append(notifications, note) continue } } - return notes + return notifications } func unmarshalMessageValue[A any](b []byte, payload *A) error { diff --git a/data/events/alerts_test.go b/data/events/alerts_test.go index 64ed0f8bca..626c80a027 100644 --- a/data/events/alerts_test.go +++ b/data/events/alerts_test.go @@ -304,8 +304,8 @@ type mockEvaluator struct { } type mockEvaluatorResponse struct { - Notes []*alerts.Note - Error error + Notifications []*alerts.Notification + Error error } func newMockEvaluator() *mockEvaluator { @@ -315,7 +315,9 @@ func newMockEvaluator() *mockEvaluator { } } -func (e *mockEvaluator) Evaluate(ctx context.Context, followedUserID string) ([]*alerts.Note, error) { +func (e *mockEvaluator) Evaluate(ctx context.Context, followedUserID string) ( + []*alerts.Notification, error) { + if _, found := e.Evaluations[followedUserID]; !found { return nil, nil } @@ -327,7 +329,7 @@ func (e *mockEvaluator) Evaluate(ctx context.Context, followedUserID string) ([] if resp.Error != nil { return nil, resp.Error } - return resp.Notes, nil + return resp.Notifications, nil } func (e *mockEvaluator) EvaluateCallsTotal() int { @@ -350,7 +352,9 @@ func newMockStaticEvaluator() *mockStaticEvaluator { return &mockStaticEvaluator{newMockEvaluator()} } -func (e *mockStaticEvaluator) Evaluate(ctx context.Context, followedUserID string) ([]*alerts.Note, error) { +func (e *mockStaticEvaluator) Evaluate(ctx context.Context, followedUserID string) ( + []*alerts.Notification, error) { + e.EvaluateCalls[followedUserID] += 1 return nil, nil } diff --git a/push/logpush.go b/push/logpush.go index a313806a87..41e772260d 100644 --- a/push/logpush.go +++ b/push/logpush.go @@ -30,10 +30,12 @@ func NewLogPusher(l log.Logger) *LogPusher { } // Push implements [service.Pusher]. -func (p *LogPusher) Push(ctx context.Context, deviceToken *devicetokens.DeviceToken, note *Notification) error { +func (p *LogPusher) Push(ctx context.Context, deviceToken *devicetokens.DeviceToken, + notification *Notification) error { + p.Logger.WithFields(log.Fields{ - "deviceToken": deviceToken, - "note": note, + "deviceToken": deviceToken, + "notification": notification, }).Info("logging push notification") return nil } diff --git a/push/push.go b/push/push.go index bca2d45988..92d5a28eaa 100644 --- a/push/push.go +++ b/push/push.go @@ -27,9 +27,9 @@ func (n Notification) String() string { return n.Message } -func FromNote(note *alerts.Note) *Notification { +func FromAlertsNotification(notification *alerts.Notification) *Notification { return &Notification{ - Message: note.Message, + Message: notification.Message, } } @@ -88,14 +88,16 @@ func NewAPNSPusherFromKeyData(signingKey []byte, keyID, teamID, bundleID string) return NewAPNSPusher(client, bundleID), nil } -func (p *APNSPusher) Push(ctx context.Context, deviceToken *devicetokens.DeviceToken, note *Notification) error { +func (p *APNSPusher) Push(ctx context.Context, deviceToken *devicetokens.DeviceToken, + notification *Notification) error { + if deviceToken.Apple == nil { return errors.New("Unable to push notification: APNSPusher can only use Apple device tokens but the Apple token is nil") } hexToken := hex.EncodeToString(deviceToken.Apple.Token) - appleNote := p.buildAppleNotification(hexToken, note) - resp, err := p.safePush(ctx, deviceToken.Apple.Environment, appleNote) + appleNotification := p.buildAppleNotification(hexToken, notification) + resp, err := p.safePush(ctx, deviceToken.Apple.Environment, appleNotification) if err != nil { return errors.Wrap(err, "Unable to push notification") } @@ -115,7 +117,9 @@ func (p *APNSPusher) Push(ctx context.Context, deviceToken *devicetokens.DeviceT // // This prevents the environment from being changed out from under // you. Unlikely, but better safe than sorry. -func (p *APNSPusher) safePush(ctx context.Context, env string, note *apns2.Notification) (*apns2.Response, error) { +func (p *APNSPusher) safePush(ctx context.Context, env string, notification *apns2.Notification) ( + *apns2.Response, error) { + p.clientMu.Lock() defer p.clientMu.Unlock() if env == devicetokens.AppleEnvProduction { @@ -123,13 +127,13 @@ func (p *APNSPusher) safePush(ctx context.Context, env string, note *apns2.Notif } else { p.client.Development() } - return p.client.PushWithContext(ctx, note) + return p.client.PushWithContext(ctx, notification) } -func (p *APNSPusher) buildAppleNotification(hexToken string, note *Notification) *apns2.Notification { +func (p *APNSPusher) buildAppleNotification(hexToken string, notification *Notification) *apns2.Notification { payload := payload.NewPayload(). - Alert(note.Message). - AlertBody(note.Message) + Alert(notification.Message). + AlertBody(notification.Message) return &apns2.Notification{ DeviceToken: hexToken, Payload: payload, From c48da7e304621e6451746a9e0692d1f99fce0aee Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Fri, 19 Jul 2024 08:46:39 -0600 Subject: [PATCH 17/54] one mock of DeviceTokenRepository is enough As caught by Todd in code review. BACK-2554 --- auth/service/api/v1/devicetokens_test.go | 11 ++--- auth/service/service/client_test.go | 55 +++++----------------- auth/store/test/device_token_repository.go | 9 +++- 3 files changed, 23 insertions(+), 52 deletions(-) diff --git a/auth/service/api/v1/devicetokens_test.go b/auth/service/api/v1/devicetokens_test.go index ef61df746e..223208b617 100644 --- a/auth/service/api/v1/devicetokens_test.go +++ b/auth/service/api/v1/devicetokens_test.go @@ -93,15 +93,14 @@ var _ = Describe("Device tokens endpoints", func() { It("may return multiple documents", func() { repo := &storetest.DeviceTokenRepository{ - Documents: []*devicetokens.Document{ - { - DeviceToken: devicetokens.DeviceToken{}, - }, - { - DeviceToken: devicetokens.DeviceToken{}, + Tokens: map[string][]*devicetokens.DeviceToken{ + test.TestUserID1: { + &devicetokens.DeviceToken{}, + &devicetokens.DeviceToken{}, }, }, } + raw := rtr.Service.AuthStore().(*storetest.Store) raw.NewDeviceTokenRepositoryImpl = repo res := test.NewMockRestResponseWriter() diff --git a/auth/service/service/client_test.go b/auth/service/service/client_test.go index 1714e947a3..26792ca30e 100644 --- a/auth/service/service/client_test.go +++ b/auth/service/service/client_test.go @@ -14,6 +14,7 @@ import ( "github.com/tidepool-org/platform/auth/client" "github.com/tidepool-org/platform/auth/service/service" "github.com/tidepool-org/platform/auth/store" + storetest "github.com/tidepool-org/platform/auth/store/test" platformclient "github.com/tidepool-org/platform/client" "github.com/tidepool-org/platform/devicetokens" logtest "github.com/tidepool-org/platform/log/test" @@ -47,14 +48,14 @@ var _ = Describe("Client", func() { name := "test auth client" logger := logtest.NewLogger() if authStore == nil { + repo := storetest.NewDeviceTokenRepository() + repo.Tokens = map[string][]*devicetokens.DeviceToken{ + testUserID: { + testDeviceToken1, + }} + authStore = &mockAuthStore{ - DeviceTokenRepository: &mockDeviceTokenRepository{ - Tokens: map[string][]*devicetokens.DeviceToken{ - testUserID: { - testDeviceToken1, - }, - }, - }, + DeviceTokenRepository: repo, } } providerFactory := &mockProviderFactory{} @@ -81,10 +82,10 @@ var _ = Describe("Client", func() { ctx := context.Background() server := NewServer() defer server.Close() + repo := storetest.NewDeviceTokenRepository() + repo.Error = fmt.Errorf("test error") authStore := &mockAuthStore{ - DeviceTokenRepository: &mockDeviceTokenRepository{ - Error: fmt.Errorf("test error"), - }, + DeviceTokenRepository: repo, } serviceClient := newTestServiceClient(server.URL(), authStore) @@ -120,37 +121,3 @@ type mockProviderFactory struct{} func (f *mockProviderFactory) Get(typ string, name string) (provider.Provider, error) { return nil, nil } - -type mockDeviceTokenRepository struct { - Error error - Tokens map[string][]*devicetokens.DeviceToken -} - -func (r *mockDeviceTokenRepository) GetAllByUserID(ctx context.Context, userID string) ([]*devicetokens.Document, error) { - if r.Error != nil { - return nil, r.Error - } - - if tokens, ok := r.Tokens[userID]; ok { - docs := make([]*devicetokens.Document, 0, len(tokens)) - for _, token := range tokens { - docs = append(docs, &devicetokens.Document{DeviceToken: *token}) - } - return docs, nil - } - return nil, nil -} - -func (r *mockDeviceTokenRepository) Upsert(ctx context.Context, doc *devicetokens.Document) error { - if r.Error != nil { - return r.Error - } - return nil -} - -func (r *mockDeviceTokenRepository) EnsureIndexes() error { - if r.Error != nil { - return r.Error - } - return nil -} diff --git a/auth/store/test/device_token_repository.go b/auth/store/test/device_token_repository.go index dbb40d9200..a15f913af1 100644 --- a/auth/store/test/device_token_repository.go +++ b/auth/store/test/device_token_repository.go @@ -10,6 +10,7 @@ import ( type DeviceTokenRepository struct { *authTest.DeviceTokenAccessor Documents []*devicetokens.Document + Tokens map[string][]*devicetokens.DeviceToken Error error } @@ -27,8 +28,12 @@ func (r *DeviceTokenRepository) GetAllByUserID(ctx context.Context, userID strin if r.Error != nil { return nil, r.Error } - if len(r.Documents) > 0 { - return r.Documents, nil + if tokens, ok := r.Tokens[userID]; ok { + docs := make([]*devicetokens.Document, 0, len(tokens)) + for _, token := range tokens { + docs = append(docs, &devicetokens.Document{DeviceToken: *token}) + } + return docs, nil } return nil, nil } From 8b22dd4e536bd1c388d6f1fcf913f1158f1b653b Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Wed, 24 Jul 2024 13:16:08 -0600 Subject: [PATCH 18/54] add a topic cascading retry mechanism for care partner alerts When a care partner alert encounters an error, the message is moved to a separate topic that will cause it to be retried after a delay. Any number of these topics can be configured. BACK-2499 --- alerts/client.go | 2 +- data/events/alerts.go | 4 +- data/events/events.go | 334 +++++++++-- data/events/events_test.go | 559 ++++++++++++++++-- data/service/service/standard.go | 8 +- vendor/github.com/IBM/sarama/mocks/README.md | 13 + .../IBM/sarama/mocks/async_producer.go | 272 +++++++++ .../github.com/IBM/sarama/mocks/consumer.go | 441 ++++++++++++++ vendor/github.com/IBM/sarama/mocks/mocks.go | 110 ++++ .../IBM/sarama/mocks/sync_producer.go | 264 +++++++++ vendor/modules.txt | 1 + 11 files changed, 1925 insertions(+), 83 deletions(-) create mode 100644 vendor/github.com/IBM/sarama/mocks/README.md create mode 100644 vendor/github.com/IBM/sarama/mocks/async_producer.go create mode 100644 vendor/github.com/IBM/sarama/mocks/consumer.go create mode 100644 vendor/github.com/IBM/sarama/mocks/mocks.go create mode 100644 vendor/github.com/IBM/sarama/mocks/sync_producer.go diff --git a/alerts/client.go b/alerts/client.go index 4709ba87dc..3198c489af 100644 --- a/alerts/client.go +++ b/alerts/client.go @@ -43,7 +43,7 @@ type PlatformClient interface { requestBody interface{}, responseBody interface{}, inspectors ...request.ResponseInspector) error } -// TokenProvider retrieves session tokens for calling the alerts API. +// TokenProvider retrieves session tokens needed for calling the alerts API. // // client.External is one implementation type TokenProvider interface { diff --git a/data/events/alerts.go b/data/events/alerts.go index 1cea469d91..ede74be49c 100644 --- a/data/events/alerts.go +++ b/data/events/alerts.go @@ -52,9 +52,9 @@ func (c *Consumer) Consume(ctx context.Context, } switch { - case strings.HasSuffix(msg.Topic, ".data.alerts"): + case strings.Contains(msg.Topic, ".data.alerts"): return c.consumeAlertsConfigs(ctx, session, msg) - case strings.HasSuffix(msg.Topic, ".data.deviceData.alerts"): + case strings.Contains(msg.Topic, ".data.deviceData.alerts"): return c.consumeDeviceData(ctx, session, msg) default: c.logger(ctx).WithField("topic", msg.Topic). diff --git a/data/events/events.go b/data/events/events.go index 60deb640d9..cdd511750b 100644 --- a/data/events/events.go +++ b/data/events/events.go @@ -3,7 +3,7 @@ package events import ( "context" "fmt" - "log/slog" + "os" "sync" "time" @@ -17,6 +17,7 @@ import ( summaryStore "github.com/tidepool-org/platform/data/summary/store" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" + logjson "github.com/tidepool-org/platform/log/json" ) type userDeletionEventsHandler struct { @@ -76,17 +77,25 @@ const AlertsEventRetries = 1000 // AlertsEventConsumptionTimeout is the maximum time to process an alerts event. const AlertsEventConsumptionTimeout = 30 * time.Second -// SaramaRunner interfaces between events.Runner and go-common's -// asyncevents.SaramaEventsConsumer. +// SaramaRunner interfaces between [events.Runner] and go-common's +// [asyncevents.SaramaEventsConsumer]. +// +// This means providing Initialize(), Run(), and Terminate() to satisfy events.Runner, while +// under the hood calling SaramaEventConsumer's Run(), and canceling its Context as +// appropriate. type SaramaRunner struct { - EventsRunner SaramaEventsRunner - Config SaramaRunnerConfig + eventsRunner SaramaEventsRunner cancelCtx context.CancelFunc cancelMu sync.Mutex } -// SaramaEventsRunner is implemented by go-common's -// asyncevents.SaramaEventsRunner. +func NewSaramaRunner(eventsRunner SaramaEventsRunner) *SaramaRunner { + return &SaramaRunner{ + eventsRunner: eventsRunner, + } +} + +// SaramaEventsRunner is implemented by go-common's [asyncevents.SaramaEventsRunner]. type SaramaEventsRunner interface { Run(ctx context.Context) error } @@ -99,45 +108,19 @@ type SaramaEventsRunner interface { type SaramaRunnerConfig struct { Brokers []string GroupID string - Logger log.Logger Topics []string MessageConsumer asyncevents.SaramaMessageConsumer Sarama *sarama.Config } -func (r *SaramaRunner) Initialize() error { - group, err := sarama.NewConsumerGroup(r.Config.Brokers, r.Config.GroupID, r.Config.Sarama) - if err != nil { - return errors.Wrap(err, "Unable to build sarama consumer group") - } - handler := asyncevents.NewSaramaConsumerGroupHandler(&asyncevents.NTimesRetryingConsumer{ - Consumer: r.Config.MessageConsumer, - Delay: CappedExponentialBinaryDelay(AlertsEventRetryDelayMaximum), - Times: AlertsEventRetries, - Logger: r.logger, - }, AlertsEventConsumptionTimeout) - r.EventsRunner = asyncevents.NewSaramaEventsConsumer(group, handler, r.Config.Topics...) - return nil -} - -func (r *SaramaRunner) logger(ctx context.Context) asyncevents.Logger { - // Prefer a logger from the context. - if ctxLogger := log.LoggerFromContext(ctx); ctxLogger != nil { - return &log.GoCommonAdapter{Logger: ctxLogger} - } - if r.Config.Logger != nil { - return &log.GoCommonAdapter{Logger: r.Config.Logger} - } - // No known log.Logger could be found, default to slog. - return slog.Default() -} +func (r *SaramaRunner) Initialize() error { return nil } // Run adapts platform's event.Runner to work with go-common's // asyncevents.SaramaEventsConsumer. func (r *SaramaRunner) Run() error { - if r.EventsRunner == nil { - return errors.New("Unable to run SaramaRunner, EventsRunner is nil") + if r.eventsRunner == nil { + return errors.New("Unable to run SaramaRunner, eventsRunner is nil") } r.cancelMu.Lock() @@ -153,7 +136,7 @@ func (r *SaramaRunner) Run() error { if err != nil { return err } - if err := r.EventsRunner.Run(ctx); err != nil { + if err := r.eventsRunner.Run(ctx); err != nil { return errors.Wrap(err, "Unable to Run SaramaRunner") } return nil @@ -196,3 +179,280 @@ func (c *AlertsEventsConsumer) Consume(ctx context.Context, } return nil } + +// CascadingSaramaEventsRunner manages multiple sarama consumer groups to execute a +// topic-cascading retry process. +// +// The topic names are generated from Config.Topics combined with Delays. If given a single +// topic "updates", and delays: 0s, 1s, and 5s, then the following topics will be consumed: +// updates, updates-retry-1s, updates-retry-5s. The consumer of the updates-retry-5s topic +// will write failed messages to updates-dead. +// +// The inspiration for this system was drawn from +// https://www.uber.com/blog/reliable-reprocessing/ +type CascadingSaramaEventsRunner struct { + Config SaramaRunnerConfig + Delays []time.Duration + Logger log.Logger + SaramaBuilders SaramaBuilders +} + +func NewCascadingSaramaEventsRunner(config SaramaRunnerConfig, logger log.Logger, + delays []time.Duration) *CascadingSaramaEventsRunner { + + return &CascadingSaramaEventsRunner{ + Config: config, + Delays: delays, + Logger: logger, + SaramaBuilders: DefaultSaramaBuilders{}, + } +} + +// LimitedAsyncProducer restricts the [sarama.AsyncProducer] interface to ensure that its +// recipient isn't able to call Close(), thereby opening the potential for a panic when +// writing to a closed channel. +type LimitedAsyncProducer interface { + AbortTxn() error + BeginTxn() error + CommitTxn() error + Input() chan<- *sarama.ProducerMessage +} + +func (r *CascadingSaramaEventsRunner) Run(ctx context.Context) error { + if len(r.Config.Topics) == 0 { + return errors.New("no topics") + } + if len(r.Delays) == 0 { + return errors.New("no delays") + } + + producersCtx, cancel := context.WithCancel(ctx) + defer cancel() + var wg sync.WaitGroup + errs := make(chan error, len(r.Config.Topics)*len(r.Delays)) + defer func() { + r.logger(ctx).Debug("CascadingSaramaEventsRunner: waiting for consumers") + wg.Wait() + r.logger(ctx).Debug("CascadingSaramaEventsRunner: all consumers returned") + close(errs) + }() + + for _, topic := range r.Config.Topics { + for idx, delay := range r.Delays { + producerCfg := r.producerConfig(idx, delay) + // The producer is built here rather than in buildConsumer() to control when + // producer is closed. Were the producer to be closed before consumer.Run() + // returns, it would be possible for consumer to write to the producer's + // Inputs() channel, which if closed, would cause a panic. + producer, err := r.SaramaBuilders.NewAsyncProducer(r.Config.Brokers, producerCfg) + if err != nil { + return errors.Wrapf(err, "Unable to build async producer: %s", r.Config.GroupID) + } + + consumer, err := r.buildConsumer(producersCtx, idx, producer, delay, topic) + if err != nil { + return err + } + + wg.Add(1) + go func(topic string) { + defer func() { wg.Done(); producer.Close() }() + if err := consumer.Run(producersCtx); err != nil { + errs <- fmt.Errorf("topics[%q]: %s", topic, err) + } + r.logger(ctx).WithField("topic", topic). + Debug("CascadingSaramaEventsRunner: consumer go proc returning") + }(topic) + } + } + + select { + case <-ctx.Done(): + r.logger(ctx).Debug("CascadingSaramaEventsRunner: context is done") + return nil + case err := <-errs: + r.logger(ctx).WithError(err). + Debug("CascadingSaramaEventsRunner: Run(): error from consumer") + return err + } +} + +func (r *CascadingSaramaEventsRunner) producerConfig(idx int, delay time.Duration) *sarama.Config { + uniqueConfig := *r.Config.Sarama + hostID := os.Getenv("HOSTNAME") // set by default in kubernetes pods + if hostID == "" { + hostID = fmt.Sprintf("%d-%d", time.Now().UnixNano()/int64(time.Second), os.Getpid()) + } + txnID := fmt.Sprintf("%s-%s-%d-%s", r.Config.GroupID, delay.String(), idx, hostID) + uniqueConfig.Producer.Transaction.ID = txnID + uniqueConfig.Producer.Idempotent = true + uniqueConfig.Producer.RequiredAcks = sarama.WaitForAll + uniqueConfig.Net.MaxOpenRequests = 1 + uniqueConfig.Consumer.IsolationLevel = sarama.ReadCommitted + return &uniqueConfig +} + +// SaramaBuilders allows tests to inject mock objects. +type SaramaBuilders interface { + NewAsyncProducer([]string, *sarama.Config) (sarama.AsyncProducer, error) + NewConsumerGroup([]string, string, *sarama.Config) (sarama.ConsumerGroup, error) +} + +// DefaultSaramaBuilders implements SaramaBuilders for normal, non-test use. +type DefaultSaramaBuilders struct{} + +func (DefaultSaramaBuilders) NewAsyncProducer(brokers []string, config *sarama.Config) ( + sarama.AsyncProducer, error) { + + return sarama.NewAsyncProducer(brokers, config) +} + +func (DefaultSaramaBuilders) NewConsumerGroup(brokers []string, groupID string, + config *sarama.Config) (sarama.ConsumerGroup, error) { + + return sarama.NewConsumerGroup(brokers, groupID, config) +} + +func (r *CascadingSaramaEventsRunner) buildConsumer(ctx context.Context, idx int, + producer LimitedAsyncProducer, delay time.Duration, baseTopic string) ( + *asyncevents.SaramaEventsConsumer, error) { + + groupID := r.Config.GroupID + if delay > 0 { + groupID += "-retry-" + delay.String() + } + group, err := r.SaramaBuilders.NewConsumerGroup(r.Config.Brokers, groupID, + r.Config.Sarama) + if err != nil { + return nil, errors.Wrapf(err, "Unable to build sarama consumer group: %s", groupID) + } + + var consumer asyncevents.SaramaMessageConsumer = r.Config.MessageConsumer + if len(r.Delays) > 0 { + nextTopic := baseTopic + "-dead" + if idx+1 < len(r.Delays) { + nextTopic = baseTopic + "-retry-" + r.Delays[idx+1].String() + } + consumer = &CascadingConsumer{ + Consumer: consumer, + NextTopic: nextTopic, + Producer: producer, + Logger: r.Logger, + } + } + if delay > 0 { + consumer = &DelayingConsumer{ + Consumer: consumer, + Delay: delay, + Logger: r.Logger, + } + } + handler := asyncevents.NewSaramaConsumerGroupHandler(consumer, + AlertsEventConsumptionTimeout) + topic := baseTopic + if delay > 0 { + topic += "-retry-" + delay.String() + } + r.logger(ctx).WithField("topic", topic).Debug("creating consumer") + + return asyncevents.NewSaramaEventsConsumer(group, handler, topic), nil +} + +func (r *CascadingSaramaEventsRunner) logger(ctx context.Context) log.Logger { + // A context logger might have more fields or ... context. So prefer that if availble. + if ctxLogger := log.LoggerFromContext(ctx); ctxLogger != nil { + return ctxLogger + } + if r.Logger == nil { + // logjson.NewLogger will only fail if an argument is missing. + r.Logger, _ = logjson.NewLogger(os.Stderr, log.DefaultLevelRanks(), log.DefaultLevel()) + } + return r.Logger +} + +// DelayingConsumer injects a delay before consuming a message. +type DelayingConsumer struct { + Consumer asyncevents.SaramaMessageConsumer + Delay time.Duration + Logger log.Logger +} + +func (c *DelayingConsumer) Consume(ctx context.Context, session sarama.ConsumerGroupSession, + msg *sarama.ConsumerMessage) error { + + select { + case <-ctx.Done(): + if ctxErr := ctx.Err(); ctxErr != context.Canceled { + return ctxErr + } + return nil + case <-time.After(c.Delay): + c.Logger.WithFields(log.Fields{"topic": msg.Topic, "delay": c.Delay}).Debugf("delayed") + return c.Consumer.Consume(ctx, session, msg) + } +} + +// CascadingConsumer cascades messages that failed to be consumed to another topic. +type CascadingConsumer struct { + Consumer asyncevents.SaramaMessageConsumer + NextTopic string + Producer LimitedAsyncProducer + Logger log.Logger +} + +func (c *CascadingConsumer) Consume(ctx context.Context, session sarama.ConsumerGroupSession, + msg *sarama.ConsumerMessage) (err error) { + + if err := c.Consumer.Consume(ctx, session, msg); err != nil { + txnErr := c.withTxn(func() error { + select { + case <-ctx.Done(): + if ctxErr := ctx.Err(); ctxErr != context.Canceled { + return ctxErr + } + return nil + case c.Producer.Input() <- c.cascadeMessage(msg): + fields := log.Fields{"from": msg.Topic, "to": c.NextTopic} + c.Logger.WithFields(fields).Debug("cascaded") + return nil + } + }) + if txnErr != nil { + c.Logger.WithError(txnErr).Info("Unable to complete cascading transaction") + return err + } + } + return nil +} + +// withTxn wraps a function with a transaction that is aborted if an error is returned. +func (c *CascadingConsumer) withTxn(f func() error) (err error) { + if err := c.Producer.BeginTxn(); err != nil { + return errors.Wrap(err, "Unable to begin transaction") + } + defer func(err *error) { + if err != nil && *err != nil { + if abortErr := c.Producer.AbortTxn(); abortErr != nil { + c.Logger.WithError(abortErr).Info("Unable to abort transaction") + } + return + } + if commitErr := c.Producer.CommitTxn(); commitErr != nil { + c.Logger.WithError(commitErr).Info("Unable to commit transaction") + } + }(&err) + return f() +} + +func (c *CascadingConsumer) cascadeMessage(msg *sarama.ConsumerMessage) *sarama.ProducerMessage { + pHeaders := make([]sarama.RecordHeader, len(msg.Headers)) + for idx, header := range msg.Headers { + pHeaders[idx] = *header + } + return &sarama.ProducerMessage{ + Key: sarama.ByteEncoder(msg.Key), + Value: sarama.ByteEncoder(msg.Value), + Topic: c.NextTopic, + Headers: pHeaders, + } +} diff --git a/data/events/events_test.go b/data/events/events_test.go index 492a059376..5294d9f277 100644 --- a/data/events/events_test.go +++ b/data/events/events_test.go @@ -2,24 +2,26 @@ package events import ( "context" - "log/slog" + "fmt" "sync" + "sync/atomic" "time" + "github.com/IBM/sarama" + "github.com/IBM/sarama/mocks" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/tidepool-org/platform/log" - "github.com/tidepool-org/platform/log/test" + "github.com/tidepool-org/platform/log/devlog" + lognull "github.com/tidepool-org/platform/log/null" + logtest "github.com/tidepool-org/platform/log/test" ) var _ = Describe("SaramaRunner", func() { Context("has a lifecycle", func() { newTestRunner := func() *SaramaRunner { - return &SaramaRunner{ - Config: SaramaRunnerConfig{}, - EventsRunner: &mockEventsRunner{}, - } + return NewSaramaRunner(&mockEventsRunner{}) } It("starts with Run() and stops with Terminate()", func() { r := newTestRunner() @@ -91,69 +93,292 @@ var _ = Describe("SaramaRunner", func() { }) }) }) +}) + +var _ = DescribeTable("CappedExponentialBinaryDelay", + func(cap time.Duration, input int, output time.Duration) { + f := CappedExponentialBinaryDelay(cap) + Expect(f(input)).To(Equal(output)) + }, + Entry("cap: 1m; tries: 0", time.Minute, 0, time.Second), + Entry("cap: 1m; tries: 1", time.Minute, 1, 2*time.Second), + Entry("cap: 1m; tries: 2", time.Minute, 2, 4*time.Second), + Entry("cap: 1m; tries: 3", time.Minute, 3, 8*time.Second), + Entry("cap: 1m; tries: 4", time.Minute, 4, 16*time.Second), + Entry("cap: 1m; tries: 5", time.Minute, 5, 32*time.Second), + Entry("cap: 1m; tries: 6", time.Minute, 6, time.Minute), + Entry("cap: 1m; tries: 20", time.Minute, 20, time.Minute), +) + +var _ = Describe("DelayingConsumer", func() { + Describe("Consume", func() { + var testMsg = &sarama.ConsumerMessage{ + Topic: "test.topic", + } + + It("delays by the configured duration", func() { + logger := newTestDevlog() + testDelay := 10 * time.Millisecond + ctx := context.Background() + start := time.Now() + dc := &DelayingConsumer{ + Consumer: &mockSaramaMessageConsumer{Logger: logger}, + Delay: testDelay, + Logger: logger, + } + + err := dc.Consume(ctx, nil, testMsg) + + Expect(err).To(BeNil()) + Expect(time.Since(start)).To(BeNumerically(">", testDelay)) + }) + + It("aborts if canceled", func() { + logger := newTestDevlog() + testDelay := 10 * time.Millisecond + abortAfter := 1 * time.Millisecond + dc := &DelayingConsumer{ + Consumer: &mockSaramaMessageConsumer{Delay: time.Minute, Logger: logger}, + Delay: testDelay, + Logger: logger, + } + ctx, cancel := context.WithCancel(context.Background()) + go func() { + defer cancel() + <-time.After(abortAfter) + }() + start := time.Now() + + err := dc.Consume(ctx, nil, testMsg) + + Expect(err).To(BeNil()) + Expect(time.Since(start)).To(BeNumerically(">", abortAfter)) + }) + + }) +}) + +var _ = Describe("ShiftingConsumer", func() { + Describe("Consume", func() { + var testMsg = &sarama.ConsumerMessage{ + Topic: "test.topic", + } + + Context("on failure", func() { + It("shifts topics", func() { + t := GinkgoT() + logger := newTestDevlog() + ctx := context.Background() + testConfig := mocks.NewTestConfig() + mockProducer := mocks.NewAsyncProducer(t, testConfig) + msg := &sarama.ConsumerMessage{} + nextTopic := "text-next" + sc := &CascadingConsumer{ + Consumer: &mockSaramaMessageConsumer{ + Err: fmt.Errorf("test error"), + Logger: logger, + }, + NextTopic: nextTopic, + Producer: mockProducer, + Logger: logger, + } + + cf := func(msg *sarama.ProducerMessage) error { + if msg.Topic != nextTopic { + return fmt.Errorf("expected topic to be %q, got %q", nextTopic, msg.Topic) + } + return nil + } + mockProducer.ExpectInputWithMessageCheckerFunctionAndSucceed(cf) + + err := sc.Consume(ctx, nil, msg) + Expect(mockProducer.Close()).To(Succeed()) + Expect(err).To(BeNil()) + }) + }) + + Context("on success", func() { + It("doesn't produce a new message", func() { + t := GinkgoT() + logger := newTestDevlog() + ctx := context.Background() + testConfig := mocks.NewTestConfig() + mockProducer := mocks.NewAsyncProducer(t, testConfig) + msg := &sarama.ConsumerMessage{} + nextTopic := "text-next" + sc := &CascadingConsumer{ + Consumer: &mockSaramaMessageConsumer{Logger: logger}, + NextTopic: nextTopic, + Producer: mockProducer, + Logger: logger, + } + + err := sc.Consume(ctx, nil, msg) + Expect(mockProducer.Close()).To(Succeed()) + Expect(err).To(BeNil()) + }) + }) + + Context("when canceled", func() { + It("aborts", func() { + logger := newTestDevlog() + abortAfter := 1 * time.Millisecond + p := newMockSaramaAsyncProducer(nil) + sc := &CascadingConsumer{ + Consumer: &mockSaramaMessageConsumer{Delay: time.Minute, Logger: logger}, + Logger: lognull.NewLogger(), + Producer: p, + } + ctx, cancel := context.WithCancel(context.Background()) + go func() { + defer cancel() + time.Sleep(abortAfter) + }() + start := time.Now() + + err := sc.Consume(ctx, nil, testMsg) + Expect(err).To(BeNil()) + Expect(time.Since(start)).To(BeNumerically(">", abortAfter)) + }) + }) + }) +}) + +var _ = Describe("ShiftingSaramaEventsRunner", func() { + It("shifts through configured delays", func() { + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + testDelays := []time.Duration{0, 1, 2, 3, 5} + testLogger := newTestDevlog() + testMessageConsumer := &mockSaramaMessageConsumer{ + Delay: time.Millisecond, + Err: fmt.Errorf("test error"), + Logger: testLogger, + } + testConfig := SaramaRunnerConfig{ + Topics: []string{"test.shifting"}, + MessageConsumer: testMessageConsumer, + Sarama: mocks.NewTestConfig(), + } + producers := []*mockSaramaAsyncProducer{} + var msgsReceived atomic.Int32 + prodFunc := func(_ []string, config *sarama.Config) (sarama.AsyncProducer, error) { + prod := newMockSaramaAsyncProducer(func(msg *sarama.ProducerMessage) { + msgsReceived.Add(1) + if int(msgsReceived.Load()) == len(testDelays) { + // Once all messages are entered, the test is complete. Cancel the + // context to shut it all down properly. + cancel() + } + }) + producers = append(producers, prod) + return prod, nil + } + sser := NewCascadingSaramaEventsRunner(testConfig, testLogger, testDelays) + sser.SaramaBuilders = newTestSaramaBuilders(nil, prodFunc) + + err := sser.Run(ctx) + Expect(err).To(Succeed()) + for pIdx, p := range producers { + Expect(p.isClosed()).To(BeTrue()) + Expect(p.messages).To(HaveLen(1)) + topic := p.messages[0].Topic + switch { + case pIdx+1 < len(testDelays): + Expect(topic).To(MatchRegexp(fmt.Sprintf(".*-retry-%s$", testDelays[pIdx+1]))) + default: + Expect(topic).To(MatchRegexp(".*-dead$")) + } + } + }) Describe("logger", func() { It("prefers a context's logger", func() { - testLogger := test.NewLogger() - ctxLogger := test.NewLogger() - r := &SaramaRunner{ - Config: SaramaRunnerConfig{Logger: testLogger}, - } + testLogger := logtest.NewLogger() + ctxLogger := logtest.NewLogger() + testDelays := []time.Duration{0} + testConfig := SaramaRunnerConfig{} + r := NewCascadingSaramaEventsRunner(testConfig, testLogger, testDelays) ctx := log.NewContextWithLogger(context.Background(), ctxLogger) got := r.logger(ctx) - goCommonLogger, ok := got.(*log.GoCommonAdapter) - Expect(ok).To(BeTrue()) - Expect(goCommonLogger.Logger).To(Equal(ctxLogger)) + Expect(got).To(Equal(ctxLogger)) }) Context("without a context logger", func() { It("uses the configured logger", func() { - testLogger := test.NewLogger() - r := &SaramaRunner{ - Config: SaramaRunnerConfig{ - Logger: testLogger, - }, - } + testLogger := logtest.NewLogger() + testDelays := []time.Duration{0} + testConfig := SaramaRunnerConfig{} + r := NewCascadingSaramaEventsRunner(testConfig, testLogger, testDelays) - got := r.logger(context.Background()) + ctx := context.Background() + got := r.logger(ctx) - goCommonLogger, ok := got.(*log.GoCommonAdapter) - Expect(ok).To(BeTrue()) - Expect(goCommonLogger.Logger).To(Equal(testLogger)) + Expect(got).To(Equal(testLogger)) }) Context("or any configured logger", func() { It("doesn't panic", func() { - r := &SaramaRunner{Config: SaramaRunnerConfig{}} + testLogger := logtest.NewLogger() + testDelays := []time.Duration{0} + testConfig := SaramaRunnerConfig{} + r := NewCascadingSaramaEventsRunner(testConfig, testLogger, testDelays) + ctx := context.Background() got := r.logger(ctx) Expect(func() { - got.Log(ctx, slog.LevelInfo, "testing") + got.Debug("testing") }).ToNot(Panic()) }) }) }) }) - - DescribeTable("CappedExponentialBinaryDelay", - func(cap time.Duration, input int, output time.Duration) { - f := CappedExponentialBinaryDelay(cap) - Expect(f(input)).To(Equal(output)) - }, - Entry("cap: 1m; tries: 0", time.Minute, 0, time.Second), - Entry("cap: 1m; tries: 1", time.Minute, 1, 2*time.Second), - Entry("cap: 1m; tries: 2", time.Minute, 2, 4*time.Second), - Entry("cap: 1m; tries: 3", time.Minute, 3, 8*time.Second), - Entry("cap: 1m; tries: 4", time.Minute, 4, 16*time.Second), - Entry("cap: 1m; tries: 5", time.Minute, 5, 32*time.Second), - Entry("cap: 1m; tries: 6", time.Minute, 6, time.Minute), - Entry("cap: 1m; tries: 20", time.Minute, 20, time.Minute), - ) }) +// testSaramaBuilders injects mocks into the ShiftingSaramaEventsRunner +type testSaramaBuilders struct { + consumerGroup func([]string, string, *sarama.Config) (sarama.ConsumerGroup, error) + producer func([]string, *sarama.Config) (sarama.AsyncProducer, error) +} + +func newTestSaramaBuilders( + cgFunc func([]string, string, *sarama.Config) (sarama.ConsumerGroup, error), + prodFunc func([]string, *sarama.Config) (sarama.AsyncProducer, error)) *testSaramaBuilders { + + if cgFunc == nil { + cgFunc = func(_ []string, groupID string, config *sarama.Config) (sarama.ConsumerGroup, error) { + logger := newTestDevlog() + return &mockSaramaConsumerGroup{ + Logger: logger, + }, nil + } + } + if prodFunc == nil { + prodFunc = func(_ []string, config *sarama.Config) (sarama.AsyncProducer, error) { + return mocks.NewAsyncProducer(GinkgoT(), config), nil + } + } + return &testSaramaBuilders{ + consumerGroup: cgFunc, + producer: prodFunc, + } +} + +func (b testSaramaBuilders) NewAsyncProducer(brokers []string, config *sarama.Config) ( + sarama.AsyncProducer, error) { + + return b.producer(brokers, config) +} + +func (b testSaramaBuilders) NewConsumerGroup(brokers []string, groupID string, + config *sarama.Config) (sarama.ConsumerGroup, error) { + + return b.consumerGroup(brokers, groupID, config) +} + type mockEventsRunner struct { Err error } @@ -161,3 +386,255 @@ type mockEventsRunner struct { func (r *mockEventsRunner) Run(ctx context.Context) error { return r.Err } + +type mockSaramaMessageConsumer struct { + Delay time.Duration + Err error + Logger log.Logger +} + +func (c *mockSaramaMessageConsumer) Consume(ctx context.Context, + session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) (err error) { + + c.Logger.Debugf("mockSaramaMessageConsumer[%q] is consuming %+v", msg.Topic, msg) + defer func(err *error) { + c.Logger.Debugf("mockSaramaMessageConsumer[%q] returns %s", msg.Topic, *err) + }(&err) + + done := ctx.Done() + select { + case <-time.After(c.Delay): + // no op + case <-done: + return ctx.Err() + } + + if c.Err != nil { + return c.Err + } + return nil +} + +type mockSaramaConsumerGroup struct { + Messages chan *sarama.ConsumerMessage + ConsumeErr error + Logger log.Logger +} + +func (g *mockSaramaConsumerGroup) Consume(ctx context.Context, + topics []string, handler sarama.ConsumerGroupHandler) error { + + if g.ConsumeErr != nil { + return g.ConsumeErr + } + + g.Logger.Debugf("mockSaramaConsumerGroup%s consuming", topics) + session := &mockSaramaConsumerGroupSession{} + if g.Messages == nil { + g.Messages = make(chan *sarama.ConsumerMessage) + go func() { <-ctx.Done(); close(g.Messages) }() + go g.feedYourClaim(ctx, topics[0]) + } + claim := &mockSaramaConsumerGroupClaim{ + topic: topics[0], + messages: g.Messages, + } + + err := handler.ConsumeClaim(session, claim) + if err != nil { + return err + } + return nil +} + +func (g *mockSaramaConsumerGroup) feedYourClaim(ctx context.Context, topic string) { + msg := &sarama.ConsumerMessage{Topic: topic} + select { + case <-ctx.Done(): + return + case g.Messages <- msg: + // no op + } +} + +func (g *mockSaramaConsumerGroup) Errors() <-chan error { + panic("not implemented") // implement if needed +} + +func (g *mockSaramaConsumerGroup) Close() error { + panic("not implemented") // implement if needed +} + +func (g *mockSaramaConsumerGroup) Pause(partitions map[string][]int32) { + panic("not implemented") // implement if needed +} + +func (g *mockSaramaConsumerGroup) Resume(partitions map[string][]int32) { + panic("not implemented") // implement if needed +} + +func (g *mockSaramaConsumerGroup) PauseAll() { + panic("not implemented") // implement if needed +} + +func (g *mockSaramaConsumerGroup) ResumeAll() { + panic("not implemented") // implement if needed} +} + +type mockSaramaConsumerGroupSession struct{} + +func (s *mockSaramaConsumerGroupSession) Claims() map[string][]int32 { + panic("not implemented") // implement if needed +} + +func (s *mockSaramaConsumerGroupSession) MemberID() string { + panic("not implemented") // implement if needed +} + +func (s *mockSaramaConsumerGroupSession) GenerationID() int32 { + panic("not implemented") // implement if needed +} + +func (s *mockSaramaConsumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) { + panic("not implemented") // implement if needed +} + +func (s *mockSaramaConsumerGroupSession) Commit() { + panic("not implemented") // implement if needed +} + +func (s *mockSaramaConsumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) { + panic("not implemented") // implement if needed +} + +func (s *mockSaramaConsumerGroupSession) MarkMessage(msg *sarama.ConsumerMessage, metadata string) { + panic("not implemented") // implement if needed +} + +func (s *mockSaramaConsumerGroupSession) Context() context.Context { + panic("not implemented") // implement if needed +} + +type mockSaramaConsumerGroupClaim struct { + messages <-chan *sarama.ConsumerMessage + topic string +} + +func (c *mockSaramaConsumerGroupClaim) Topic() string { + return c.topic +} + +func (c *mockSaramaConsumerGroupClaim) Partition() int32 { + panic("not implemented") // implement if needed +} + +func (c *mockSaramaConsumerGroupClaim) InitialOffset() int64 { + panic("not implemented") // implement if needed +} + +func (c *mockSaramaConsumerGroupClaim) HighWaterMarkOffset() int64 { + panic("not implemented") // implement if needed +} + +func (c *mockSaramaConsumerGroupClaim) Messages() <-chan *sarama.ConsumerMessage { + return c.messages +} + +type mockSaramaAsyncProducer struct { + input chan *sarama.ProducerMessage + messages []*sarama.ProducerMessage + mu sync.Mutex + setupCallbacksOnce sync.Once + closeOnce sync.Once + msgCallback func(*sarama.ProducerMessage) +} + +func newMockSaramaAsyncProducer(msgCallback func(*sarama.ProducerMessage)) *mockSaramaAsyncProducer { + return &mockSaramaAsyncProducer{ + input: make(chan *sarama.ProducerMessage), + messages: []*sarama.ProducerMessage{}, + msgCallback: msgCallback, + } +} + +func (p *mockSaramaAsyncProducer) AsyncClose() { + panic("not implemented") // implement if needed +} + +func (p *mockSaramaAsyncProducer) Close() error { + p.closeOnce.Do(func() { close(p.input) }) + return nil +} + +func (p *mockSaramaAsyncProducer) setupCallbacks() { + if p.msgCallback == nil { + return + } + p.setupCallbacksOnce.Do(func() { + go func(callback func(*sarama.ProducerMessage)) { + for msg := range p.input { + p.messages = append(p.messages, msg) + go callback(msg) + } + }(p.msgCallback) + }) +} + +func (p *mockSaramaAsyncProducer) Input() chan<- *sarama.ProducerMessage { + defer p.setupCallbacks() + return p.input +} + +func (p *mockSaramaAsyncProducer) Successes() <-chan *sarama.ProducerMessage { + panic("not implemented") // implement if needed +} + +func (p *mockSaramaAsyncProducer) Errors() <-chan *sarama.ProducerError { + panic("not implemented") // implement if needed +} + +func (p *mockSaramaAsyncProducer) IsTransactional() bool { + panic("not implemented") // implement if needed +} + +func (p *mockSaramaAsyncProducer) TxnStatus() sarama.ProducerTxnStatusFlag { + panic("not implemented") // implement if needed +} + +func (p *mockSaramaAsyncProducer) BeginTxn() error { + return nil +} + +func (p *mockSaramaAsyncProducer) CommitTxn() error { + return nil +} + +func (p *mockSaramaAsyncProducer) AbortTxn() error { + return nil +} + +func (p *mockSaramaAsyncProducer) AddOffsetsToTxn(offsets map[string][]*sarama.PartitionOffsetMetadata, groupId string) error { + panic("not implemented") // implement if needed +} + +func (p *mockSaramaAsyncProducer) AddMessageToTxn(msg *sarama.ConsumerMessage, groupId string, metadata *string) error { + panic("not implemented") // implement if needed +} + +func (p *mockSaramaAsyncProducer) isClosed() bool { + p.mu.Lock() + defer p.mu.Unlock() + select { + case _, open := <-p.input: + return !open + default: + return false + } +} + +func newTestDevlog() log.Logger { + GinkgoHelper() + l, err := devlog.NewWithDefaults(GinkgoWriter) + Expect(err).To(Succeed()) + return l +} diff --git a/data/service/service/standard.go b/data/service/service/standard.go index 9e7ec5d3c6..70a87965ae 100644 --- a/data/service/service/standard.go +++ b/data/service/service/standard.go @@ -3,9 +3,11 @@ package service import ( "context" "strings" + "time" "github.com/IBM/sarama" "github.com/kelseyhightower/envconfig" + eventsCommon "github.com/tidepool-org/go-common/events" "github.com/tidepool-org/platform/application" @@ -516,14 +518,16 @@ func (s *Standard) initializeAlertsEventsHandler() error { runnerCfg := dataEvents.SaramaRunnerConfig{ Brokers: commonConfig.KafkaBrokers, GroupID: config.KafkaAlertsGroupID, - Logger: s.Logger(), Topics: prefixedTopics, Sarama: commonConfig.SaramaConfig, MessageConsumer: &dataEvents.AlertsEventsConsumer{ Consumer: ec, }, } - runner := &dataEvents.SaramaRunner{Config: runnerCfg} + + eventsRunner := dataEvents.NewCascadingSaramaEventsRunner(runnerCfg, s.Logger(), + []time.Duration{0, 1 * time.Second, 2 * time.Second, 3 * time.Second, 5 * time.Second}) + runner := dataEvents.NewSaramaRunner(eventsRunner) if err := runner.Initialize(); err != nil { return errors.Wrap(err, "Unable to initialize alerts events handler runner") } diff --git a/vendor/github.com/IBM/sarama/mocks/README.md b/vendor/github.com/IBM/sarama/mocks/README.md new file mode 100644 index 0000000000..9f40ae2ff7 --- /dev/null +++ b/vendor/github.com/IBM/sarama/mocks/README.md @@ -0,0 +1,13 @@ +# sarama/mocks + +The `mocks` subpackage includes mock implementations that implement the interfaces of the major sarama types. +You can use them to test your sarama applications using dependency injection. + +The following mock objects are available: + +- [Consumer](https://pkg.go.dev/github.com/IBM/sarama/mocks#Consumer), which will create [PartitionConsumer](https://pkg.go.dev/github.com/IBM/sarama/mocks#PartitionConsumer) mocks. +- [AsyncProducer](https://pkg.go.dev/github.com/IBM/sarama/mocks#AsyncProducer) +- [SyncProducer](https://pkg.go.dev/github.com/IBM/sarama/mocks#SyncProducer) + +The mocks allow you to set expectations on them. When you close the mocks, the expectations will be verified, +and the results will be reported to the `*testing.T` object you provided when creating the mock. diff --git a/vendor/github.com/IBM/sarama/mocks/async_producer.go b/vendor/github.com/IBM/sarama/mocks/async_producer.go new file mode 100644 index 0000000000..89e0e0db99 --- /dev/null +++ b/vendor/github.com/IBM/sarama/mocks/async_producer.go @@ -0,0 +1,272 @@ +package mocks + +import ( + "errors" + "sync" + + "github.com/IBM/sarama" +) + +// AsyncProducer implements sarama's Producer interface for testing purposes. +// Before you can send messages to it's Input channel, you have to set expectations +// so it knows how to handle the input; it returns an error if the number of messages +// received is bigger then the number of expectations set. You can also set a +// function in each expectation so that the message is checked by this function and +// an error is returned if the match fails. +type AsyncProducer struct { + l sync.Mutex + t ErrorReporter + expectations []*producerExpectation + closed chan struct{} + input chan *sarama.ProducerMessage + successes chan *sarama.ProducerMessage + errors chan *sarama.ProducerError + isTransactional bool + txnLock sync.Mutex + txnStatus sarama.ProducerTxnStatusFlag + lastOffset int64 + *TopicConfig +} + +// NewAsyncProducer instantiates a new Producer mock. The t argument should +// be the *testing.T instance of your test method. An error will be written to it if +// an expectation is violated. The config argument is validated and used to determine +// whether it should ack successes on the Successes channel and handle partitioning. +func NewAsyncProducer(t ErrorReporter, config *sarama.Config) *AsyncProducer { + if config == nil { + config = sarama.NewConfig() + } + if err := config.Validate(); err != nil { + t.Errorf("Invalid mock configuration provided: %s", err.Error()) + } + mp := &AsyncProducer{ + t: t, + closed: make(chan struct{}), + expectations: make([]*producerExpectation, 0), + input: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), + successes: make(chan *sarama.ProducerMessage, config.ChannelBufferSize), + errors: make(chan *sarama.ProducerError, config.ChannelBufferSize), + isTransactional: config.Producer.Transaction.ID != "", + txnStatus: sarama.ProducerTxnFlagReady, + TopicConfig: NewTopicConfig(), + } + + go func() { + defer func() { + close(mp.successes) + close(mp.errors) + close(mp.closed) + }() + + partitioners := make(map[string]sarama.Partitioner, 1) + + for msg := range mp.input { + mp.txnLock.Lock() + if mp.IsTransactional() && mp.txnStatus&sarama.ProducerTxnFlagInTransaction == 0 { + mp.t.Errorf("attempt to send message when transaction is not started or is in ending state.") + mp.errors <- &sarama.ProducerError{Err: errors.New("attempt to send message when transaction is not started or is in ending state"), Msg: msg} + continue + } + mp.txnLock.Unlock() + partitioner := partitioners[msg.Topic] + if partitioner == nil { + partitioner = config.Producer.Partitioner(msg.Topic) + partitioners[msg.Topic] = partitioner + } + mp.l.Lock() + if mp.expectations == nil || len(mp.expectations) == 0 { + mp.expectations = nil + mp.t.Errorf("No more expectation set on this mock producer to handle the input message.") + } else { + expectation := mp.expectations[0] + mp.expectations = mp.expectations[1:] + + partition, err := partitioner.Partition(msg, mp.partitions(msg.Topic)) + if err != nil { + mp.t.Errorf("Partitioner returned an error: %s", err.Error()) + mp.errors <- &sarama.ProducerError{Err: err, Msg: msg} + } else { + msg.Partition = partition + if expectation.CheckFunction != nil { + err := expectation.CheckFunction(msg) + if err != nil { + mp.t.Errorf("Check function returned an error: %s", err.Error()) + mp.errors <- &sarama.ProducerError{Err: err, Msg: msg} + } + } + if errors.Is(expectation.Result, errProduceSuccess) { + mp.lastOffset++ + if config.Producer.Return.Successes { + msg.Offset = mp.lastOffset + mp.successes <- msg + } + } else if config.Producer.Return.Errors { + mp.errors <- &sarama.ProducerError{Err: expectation.Result, Msg: msg} + } + } + } + mp.l.Unlock() + } + + mp.l.Lock() + if len(mp.expectations) > 0 { + mp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(mp.expectations)) + } + mp.l.Unlock() + }() + + return mp +} + +//////////////////////////////////////////////// +// Implement Producer interface +//////////////////////////////////////////////// + +// AsyncClose corresponds with the AsyncClose method of sarama's Producer implementation. +// By closing a mock producer, you also tell it that no more input will be provided, so it will +// write an error to the test state if there's any remaining expectations. +func (mp *AsyncProducer) AsyncClose() { + close(mp.input) +} + +// Close corresponds with the Close method of sarama's Producer implementation. +// By closing a mock producer, you also tell it that no more input will be provided, so it will +// write an error to the test state if there's any remaining expectations. +func (mp *AsyncProducer) Close() error { + mp.AsyncClose() + <-mp.closed + return nil +} + +// Input corresponds with the Input method of sarama's Producer implementation. +// You have to set expectations on the mock producer before writing messages to the Input +// channel, so it knows how to handle them. If there is no more remaining expectations and +// a messages is written to the Input channel, the mock producer will write an error to the test +// state object. +func (mp *AsyncProducer) Input() chan<- *sarama.ProducerMessage { + return mp.input +} + +// Successes corresponds with the Successes method of sarama's Producer implementation. +func (mp *AsyncProducer) Successes() <-chan *sarama.ProducerMessage { + return mp.successes +} + +// Errors corresponds with the Errors method of sarama's Producer implementation. +func (mp *AsyncProducer) Errors() <-chan *sarama.ProducerError { + return mp.errors +} + +func (mp *AsyncProducer) IsTransactional() bool { + return mp.isTransactional +} + +func (mp *AsyncProducer) BeginTxn() error { + mp.txnLock.Lock() + defer mp.txnLock.Unlock() + + mp.txnStatus = sarama.ProducerTxnFlagInTransaction + return nil +} + +func (mp *AsyncProducer) CommitTxn() error { + mp.txnLock.Lock() + defer mp.txnLock.Unlock() + + mp.txnStatus = sarama.ProducerTxnFlagReady + return nil +} + +func (mp *AsyncProducer) AbortTxn() error { + mp.txnLock.Lock() + defer mp.txnLock.Unlock() + + mp.txnStatus = sarama.ProducerTxnFlagReady + return nil +} + +func (mp *AsyncProducer) TxnStatus() sarama.ProducerTxnStatusFlag { + mp.txnLock.Lock() + defer mp.txnLock.Unlock() + + return mp.txnStatus +} + +func (mp *AsyncProducer) AddOffsetsToTxn(offsets map[string][]*sarama.PartitionOffsetMetadata, groupId string) error { + return nil +} + +func (mp *AsyncProducer) AddMessageToTxn(msg *sarama.ConsumerMessage, groupId string, metadata *string) error { + return nil +} + +//////////////////////////////////////////////// +// Setting expectations +//////////////////////////////////////////////// + +// ExpectInputWithMessageCheckerFunctionAndSucceed sets an expectation on the mock producer that a +// message will be provided on the input channel. The mock producer will call the given function to +// check the message. If an error is returned it will be made available on the Errors channel +// otherwise the mock will handle the message as if it produced successfully, i.e. it will make it +// available on the Successes channel if the Producer.Return.Successes setting is set to true. +func (mp *AsyncProducer) ExpectInputWithMessageCheckerFunctionAndSucceed(cf MessageChecker) *AsyncProducer { + mp.l.Lock() + defer mp.l.Unlock() + mp.expectations = append(mp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf}) + + return mp +} + +// ExpectInputWithMessageCheckerFunctionAndFail sets an expectation on the mock producer that a +// message will be provided on the input channel. The mock producer will first call the given +// function to check the message. If an error is returned it will be made available on the Errors +// channel otherwise the mock will handle the message as if it failed to produce successfully. This +// means it will make a ProducerError available on the Errors channel. +func (mp *AsyncProducer) ExpectInputWithMessageCheckerFunctionAndFail(cf MessageChecker, err error) *AsyncProducer { + mp.l.Lock() + defer mp.l.Unlock() + mp.expectations = append(mp.expectations, &producerExpectation{Result: err, CheckFunction: cf}) + + return mp +} + +// ExpectInputWithCheckerFunctionAndSucceed sets an expectation on the mock producer that a message +// will be provided on the input channel. The mock producer will call the given function to check +// the message value. If an error is returned it will be made available on the Errors channel +// otherwise the mock will handle the message as if it produced successfully, i.e. it will make +// it available on the Successes channel if the Producer.Return.Successes setting is set to true. +func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndSucceed(cf ValueChecker) *AsyncProducer { + mp.ExpectInputWithMessageCheckerFunctionAndSucceed(messageValueChecker(cf)) + + return mp +} + +// ExpectInputWithCheckerFunctionAndFail sets an expectation on the mock producer that a message +// will be provided on the input channel. The mock producer will first call the given function to +// check the message value. If an error is returned it will be made available on the Errors channel +// otherwise the mock will handle the message as if it failed to produce successfully. This means +// it will make a ProducerError available on the Errors channel. +func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndFail(cf ValueChecker, err error) *AsyncProducer { + mp.ExpectInputWithMessageCheckerFunctionAndFail(messageValueChecker(cf), err) + + return mp +} + +// ExpectInputAndSucceed sets an expectation on the mock producer that a message will be provided +// on the input channel. The mock producer will handle the message as if it is produced successfully, +// i.e. it will make it available on the Successes channel if the Producer.Return.Successes setting +// is set to true. +func (mp *AsyncProducer) ExpectInputAndSucceed() *AsyncProducer { + mp.ExpectInputWithMessageCheckerFunctionAndSucceed(nil) + + return mp +} + +// ExpectInputAndFail sets an expectation on the mock producer that a message will be provided +// on the input channel. The mock producer will handle the message as if it failed to produce +// successfully. This means it will make a ProducerError available on the Errors channel. +func (mp *AsyncProducer) ExpectInputAndFail(err error) *AsyncProducer { + mp.ExpectInputWithMessageCheckerFunctionAndFail(nil, err) + + return mp +} diff --git a/vendor/github.com/IBM/sarama/mocks/consumer.go b/vendor/github.com/IBM/sarama/mocks/consumer.go new file mode 100644 index 0000000000..77bb9195cb --- /dev/null +++ b/vendor/github.com/IBM/sarama/mocks/consumer.go @@ -0,0 +1,441 @@ +package mocks + +import ( + "sync" + "sync/atomic" + + "github.com/IBM/sarama" +) + +// Consumer implements sarama's Consumer interface for testing purposes. +// Before you can start consuming from this consumer, you have to register +// topic/partitions using ExpectConsumePartition, and set expectations on them. +type Consumer struct { + l sync.Mutex + t ErrorReporter + config *sarama.Config + partitionConsumers map[string]map[int32]*PartitionConsumer + metadata map[string][]int32 +} + +// NewConsumer returns a new mock Consumer instance. The t argument should +// be the *testing.T instance of your test method. An error will be written to it if +// an expectation is violated. The config argument can be set to nil; if it is +// non-nil it is validated. +func NewConsumer(t ErrorReporter, config *sarama.Config) *Consumer { + if config == nil { + config = sarama.NewConfig() + } + if err := config.Validate(); err != nil { + t.Errorf("Invalid mock configuration provided: %s", err.Error()) + } + + c := &Consumer{ + t: t, + config: config, + partitionConsumers: make(map[string]map[int32]*PartitionConsumer), + } + return c +} + +/////////////////////////////////////////////////// +// Consumer interface implementation +/////////////////////////////////////////////////// + +// ConsumePartition implements the ConsumePartition method from the sarama.Consumer interface. +// Before you can start consuming a partition, you have to set expectations on it using +// ExpectConsumePartition. You can only consume a partition once per consumer. +func (c *Consumer) ConsumePartition(topic string, partition int32, offset int64) (sarama.PartitionConsumer, error) { + c.l.Lock() + defer c.l.Unlock() + + if c.partitionConsumers[topic] == nil || c.partitionConsumers[topic][partition] == nil { + c.t.Errorf("No expectations set for %s/%d", topic, partition) + return nil, errOutOfExpectations + } + + pc := c.partitionConsumers[topic][partition] + if pc.consumed { + return nil, sarama.ConfigurationError("The topic/partition is already being consumed") + } + + if pc.offset != AnyOffset && pc.offset != offset { + c.t.Errorf("Unexpected offset when calling ConsumePartition for %s/%d. Expected %d, got %d.", topic, partition, pc.offset, offset) + } + + pc.consumed = true + return pc, nil +} + +// Topics returns a list of topics, as registered with SetTopicMetadata +func (c *Consumer) Topics() ([]string, error) { + c.l.Lock() + defer c.l.Unlock() + + if c.metadata == nil { + c.t.Errorf("Unexpected call to Topics. Initialize the mock's topic metadata with SetTopicMetadata.") + return nil, sarama.ErrOutOfBrokers + } + + var result []string + for topic := range c.metadata { + result = append(result, topic) + } + return result, nil +} + +// Partitions returns the list of parititons for the given topic, as registered with SetTopicMetadata +func (c *Consumer) Partitions(topic string) ([]int32, error) { + c.l.Lock() + defer c.l.Unlock() + + if c.metadata == nil { + c.t.Errorf("Unexpected call to Partitions. Initialize the mock's topic metadata with SetTopicMetadata.") + return nil, sarama.ErrOutOfBrokers + } + if c.metadata[topic] == nil { + return nil, sarama.ErrUnknownTopicOrPartition + } + + return c.metadata[topic], nil +} + +func (c *Consumer) HighWaterMarks() map[string]map[int32]int64 { + c.l.Lock() + defer c.l.Unlock() + + hwms := make(map[string]map[int32]int64, len(c.partitionConsumers)) + for topic, partitionConsumers := range c.partitionConsumers { + hwm := make(map[int32]int64, len(partitionConsumers)) + for partition, pc := range partitionConsumers { + hwm[partition] = pc.HighWaterMarkOffset() + } + hwms[topic] = hwm + } + + return hwms +} + +// Close implements the Close method from the sarama.Consumer interface. It will close +// all registered PartitionConsumer instances. +func (c *Consumer) Close() error { + c.l.Lock() + defer c.l.Unlock() + + for _, partitions := range c.partitionConsumers { + for _, partitionConsumer := range partitions { + _ = partitionConsumer.Close() + } + } + + return nil +} + +// Pause implements Consumer. +func (c *Consumer) Pause(topicPartitions map[string][]int32) { + c.l.Lock() + defer c.l.Unlock() + + for topic, partitions := range topicPartitions { + for _, partition := range partitions { + if topicConsumers, ok := c.partitionConsumers[topic]; ok { + if partitionConsumer, ok := topicConsumers[partition]; ok { + partitionConsumer.Pause() + } + } + } + } +} + +// Resume implements Consumer. +func (c *Consumer) Resume(topicPartitions map[string][]int32) { + c.l.Lock() + defer c.l.Unlock() + + for topic, partitions := range topicPartitions { + for _, partition := range partitions { + if topicConsumers, ok := c.partitionConsumers[topic]; ok { + if partitionConsumer, ok := topicConsumers[partition]; ok { + partitionConsumer.Resume() + } + } + } + } +} + +// PauseAll implements Consumer. +func (c *Consumer) PauseAll() { + c.l.Lock() + defer c.l.Unlock() + + for _, partitions := range c.partitionConsumers { + for _, partitionConsumer := range partitions { + partitionConsumer.Pause() + } + } +} + +// ResumeAll implements Consumer. +func (c *Consumer) ResumeAll() { + c.l.Lock() + defer c.l.Unlock() + + for _, partitions := range c.partitionConsumers { + for _, partitionConsumer := range partitions { + partitionConsumer.Resume() + } + } +} + +/////////////////////////////////////////////////// +// Expectation API +/////////////////////////////////////////////////// + +// SetTopicMetadata sets the clusters topic/partition metadata, +// which will be returned by Topics() and Partitions(). +func (c *Consumer) SetTopicMetadata(metadata map[string][]int32) { + c.l.Lock() + defer c.l.Unlock() + + c.metadata = metadata +} + +// ExpectConsumePartition will register a topic/partition, so you can set expectations on it. +// The registered PartitionConsumer will be returned, so you can set expectations +// on it using method chaining. Once a topic/partition is registered, you are +// expected to start consuming it using ConsumePartition. If that doesn't happen, +// an error will be written to the error reporter once the mock consumer is closed. It also expects +// that the message and error channels be written with YieldMessage and YieldError accordingly, +// and be fully consumed once the mock consumer is closed if ExpectMessagesDrainedOnClose or +// ExpectErrorsDrainedOnClose have been called. +func (c *Consumer) ExpectConsumePartition(topic string, partition int32, offset int64) *PartitionConsumer { + c.l.Lock() + defer c.l.Unlock() + + if c.partitionConsumers[topic] == nil { + c.partitionConsumers[topic] = make(map[int32]*PartitionConsumer) + } + + if c.partitionConsumers[topic][partition] == nil { + highWatermarkOffset := offset + if offset == sarama.OffsetOldest { + highWatermarkOffset = 0 + } + + c.partitionConsumers[topic][partition] = &PartitionConsumer{ + highWaterMarkOffset: highWatermarkOffset, + t: c.t, + topic: topic, + partition: partition, + offset: offset, + messages: make(chan *sarama.ConsumerMessage, c.config.ChannelBufferSize), + suppressedMessages: make(chan *sarama.ConsumerMessage, c.config.ChannelBufferSize), + errors: make(chan *sarama.ConsumerError, c.config.ChannelBufferSize), + } + } + + return c.partitionConsumers[topic][partition] +} + +/////////////////////////////////////////////////// +// PartitionConsumer mock type +/////////////////////////////////////////////////// + +// PartitionConsumer implements sarama's PartitionConsumer interface for testing purposes. +// It is returned by the mock Consumers ConsumePartitionMethod, but only if it is +// registered first using the Consumer's ExpectConsumePartition method. Before consuming the +// Errors and Messages channel, you should specify what values will be provided on these +// channels using YieldMessage and YieldError. +type PartitionConsumer struct { + highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG + suppressedHighWaterMarkOffset int64 + l sync.Mutex + t ErrorReporter + topic string + partition int32 + offset int64 + messages chan *sarama.ConsumerMessage + suppressedMessages chan *sarama.ConsumerMessage + errors chan *sarama.ConsumerError + singleClose sync.Once + consumed bool + errorsShouldBeDrained bool + messagesShouldBeDrained bool + paused bool +} + +/////////////////////////////////////////////////// +// PartitionConsumer interface implementation +/////////////////////////////////////////////////// + +// AsyncClose implements the AsyncClose method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) AsyncClose() { + pc.singleClose.Do(func() { + close(pc.suppressedMessages) + close(pc.messages) + close(pc.errors) + }) +} + +// Close implements the Close method from the sarama.PartitionConsumer interface. It will +// verify whether the partition consumer was actually started. +func (pc *PartitionConsumer) Close() error { + if !pc.consumed { + pc.t.Errorf("Expectations set on %s/%d, but no partition consumer was started.", pc.topic, pc.partition) + return errPartitionConsumerNotStarted + } + + if pc.errorsShouldBeDrained && len(pc.errors) > 0 { + pc.t.Errorf("Expected the errors channel for %s/%d to be drained on close, but found %d errors.", pc.topic, pc.partition, len(pc.errors)) + } + + if pc.messagesShouldBeDrained && len(pc.messages) > 0 { + pc.t.Errorf("Expected the messages channel for %s/%d to be drained on close, but found %d messages.", pc.topic, pc.partition, len(pc.messages)) + } + + pc.AsyncClose() + + var ( + closeErr error + wg sync.WaitGroup + ) + + wg.Add(1) + go func() { + defer wg.Done() + + errs := make(sarama.ConsumerErrors, 0) + for err := range pc.errors { + errs = append(errs, err) + } + + if len(errs) > 0 { + closeErr = errs + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + for range pc.messages { + // drain + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + for range pc.suppressedMessages { + // drain + } + }() + + wg.Wait() + return closeErr +} + +// Errors implements the Errors method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) Errors() <-chan *sarama.ConsumerError { + return pc.errors +} + +// Messages implements the Messages method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) Messages() <-chan *sarama.ConsumerMessage { + return pc.messages +} + +func (pc *PartitionConsumer) HighWaterMarkOffset() int64 { + return atomic.LoadInt64(&pc.highWaterMarkOffset) +} + +// Pause implements the Pause method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) Pause() { + pc.l.Lock() + defer pc.l.Unlock() + + pc.suppressedHighWaterMarkOffset = atomic.LoadInt64(&pc.highWaterMarkOffset) + + pc.paused = true +} + +// Resume implements the Resume method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) Resume() { + pc.l.Lock() + defer pc.l.Unlock() + + pc.highWaterMarkOffset = atomic.LoadInt64(&pc.suppressedHighWaterMarkOffset) + for len(pc.suppressedMessages) > 0 { + msg := <-pc.suppressedMessages + pc.messages <- msg + } + + pc.paused = false +} + +// IsPaused implements the IsPaused method from the sarama.PartitionConsumer interface. +func (pc *PartitionConsumer) IsPaused() bool { + pc.l.Lock() + defer pc.l.Unlock() + + return pc.paused +} + +/////////////////////////////////////////////////// +// Expectation API +/////////////////////////////////////////////////// + +// YieldMessage will yield a messages Messages channel of this partition consumer +// when it is consumed. By default, the mock consumer will not verify whether this +// message was consumed from the Messages channel, because there are legitimate +// reasons forthis not to happen. ou can call ExpectMessagesDrainedOnClose so it will +// verify that the channel is empty on close. +func (pc *PartitionConsumer) YieldMessage(msg *sarama.ConsumerMessage) *PartitionConsumer { + pc.l.Lock() + defer pc.l.Unlock() + + msg.Topic = pc.topic + msg.Partition = pc.partition + + if pc.paused { + msg.Offset = atomic.AddInt64(&pc.suppressedHighWaterMarkOffset, 1) - 1 + pc.suppressedMessages <- msg + } else { + msg.Offset = atomic.AddInt64(&pc.highWaterMarkOffset, 1) - 1 + pc.messages <- msg + } + + return pc +} + +// YieldError will yield an error on the Errors channel of this partition consumer +// when it is consumed. By default, the mock consumer will not verify whether this error was +// consumed from the Errors channel, because there are legitimate reasons for this +// not to happen. You can call ExpectErrorsDrainedOnClose so it will verify that +// the channel is empty on close. +func (pc *PartitionConsumer) YieldError(err error) *PartitionConsumer { + pc.errors <- &sarama.ConsumerError{ + Topic: pc.topic, + Partition: pc.partition, + Err: err, + } + + return pc +} + +// ExpectMessagesDrainedOnClose sets an expectation on the partition consumer +// that the messages channel will be fully drained when Close is called. If this +// expectation is not met, an error is reported to the error reporter. +func (pc *PartitionConsumer) ExpectMessagesDrainedOnClose() *PartitionConsumer { + pc.messagesShouldBeDrained = true + + return pc +} + +// ExpectErrorsDrainedOnClose sets an expectation on the partition consumer +// that the errors channel will be fully drained when Close is called. If this +// expectation is not met, an error is reported to the error reporter. +func (pc *PartitionConsumer) ExpectErrorsDrainedOnClose() *PartitionConsumer { + pc.errorsShouldBeDrained = true + + return pc +} diff --git a/vendor/github.com/IBM/sarama/mocks/mocks.go b/vendor/github.com/IBM/sarama/mocks/mocks.go new file mode 100644 index 0000000000..bd9d630ddb --- /dev/null +++ b/vendor/github.com/IBM/sarama/mocks/mocks.go @@ -0,0 +1,110 @@ +/* +Package mocks provides mocks that can be used for testing applications +that use Sarama. The mock types provided by this package implement the +interfaces Sarama exports, so you can use them for dependency injection +in your tests. + +All mock instances require you to set expectations on them before you +can use them. It will determine how the mock will behave. If an +expectation is not met, it will make your test fail. + +NOTE: this package currently does not fall under the API stability +guarantee of Sarama as it is still considered experimental. +*/ +package mocks + +import ( + "errors" + "fmt" + + "github.com/IBM/sarama" +) + +// ErrorReporter is a simple interface that includes the testing.T methods we use to report +// expectation violations when using the mock objects. +type ErrorReporter interface { + Errorf(string, ...interface{}) +} + +// ValueChecker is a function type to be set in each expectation of the producer mocks +// to check the value passed. +type ValueChecker func(val []byte) error + +// MessageChecker is a function type to be set in each expectation of the producer mocks +// to check the message passed. +type MessageChecker func(*sarama.ProducerMessage) error + +// messageValueChecker wraps a ValueChecker into a MessageChecker. +// Failure to encode the message value will return an error and not call +// the wrapped ValueChecker. +func messageValueChecker(f ValueChecker) MessageChecker { + if f == nil { + return nil + } + return func(msg *sarama.ProducerMessage) error { + val, err := msg.Value.Encode() + if err != nil { + return fmt.Errorf("Input message encoding failed: %w", err) + } + return f(val) + } +} + +var ( + errProduceSuccess error = nil + errOutOfExpectations = errors.New("No more expectations set on mock") + errPartitionConsumerNotStarted = errors.New("The partition consumer was never started") +) + +const AnyOffset int64 = -1000 + +type producerExpectation struct { + Result error + CheckFunction MessageChecker +} + +// TopicConfig describes a mock topic structure for the mock producers’ partitioning needs. +type TopicConfig struct { + overridePartitions map[string]int32 + defaultPartitions int32 +} + +// NewTopicConfig makes a configuration which defaults to 32 partitions for every topic. +func NewTopicConfig() *TopicConfig { + return &TopicConfig{ + overridePartitions: make(map[string]int32, 0), + defaultPartitions: 32, + } +} + +// SetDefaultPartitions sets the number of partitions any topic not explicitly configured otherwise +// (by SetPartitions) will have from the perspective of created partitioners. +func (pc *TopicConfig) SetDefaultPartitions(n int32) { + pc.defaultPartitions = n +} + +// SetPartitions sets the number of partitions the partitioners will see for specific topics. This +// only applies to messages produced after setting them. +func (pc *TopicConfig) SetPartitions(partitions map[string]int32) { + for p, n := range partitions { + pc.overridePartitions[p] = n + } +} + +func (pc *TopicConfig) partitions(topic string) int32 { + if n, found := pc.overridePartitions[topic]; found { + return n + } + return pc.defaultPartitions +} + +// NewTestConfig returns a config meant to be used by tests. +// Due to inconsistencies with the request versions the clients send using the default Kafka version +// and the response versions our mocks use, we default to the minimum Kafka version in most tests +func NewTestConfig() *sarama.Config { + config := sarama.NewConfig() + config.Consumer.Retry.Backoff = 0 + config.Producer.Retry.Backoff = 0 + config.Version = sarama.MinVersion + return config +} diff --git a/vendor/github.com/IBM/sarama/mocks/sync_producer.go b/vendor/github.com/IBM/sarama/mocks/sync_producer.go new file mode 100644 index 0000000000..9d103ed0d7 --- /dev/null +++ b/vendor/github.com/IBM/sarama/mocks/sync_producer.go @@ -0,0 +1,264 @@ +package mocks + +import ( + "errors" + "sync" + + "github.com/IBM/sarama" +) + +// SyncProducer implements sarama's SyncProducer interface for testing purposes. +// Before you can use it, you have to set expectations on the mock SyncProducer +// to tell it how to handle calls to SendMessage, so you can easily test success +// and failure scenarios. +type SyncProducer struct { + l sync.Mutex + t ErrorReporter + expectations []*producerExpectation + lastOffset int64 + + *TopicConfig + newPartitioner sarama.PartitionerConstructor + partitioners map[string]sarama.Partitioner + + isTransactional bool + txnLock sync.Mutex + txnStatus sarama.ProducerTxnStatusFlag +} + +// NewSyncProducer instantiates a new SyncProducer mock. The t argument should +// be the *testing.T instance of your test method. An error will be written to it if +// an expectation is violated. The config argument is validated and used to handle +// partitioning. +func NewSyncProducer(t ErrorReporter, config *sarama.Config) *SyncProducer { + if config == nil { + config = sarama.NewConfig() + } + if err := config.Validate(); err != nil { + t.Errorf("Invalid mock configuration provided: %s", err.Error()) + } + return &SyncProducer{ + t: t, + expectations: make([]*producerExpectation, 0), + TopicConfig: NewTopicConfig(), + newPartitioner: config.Producer.Partitioner, + partitioners: make(map[string]sarama.Partitioner, 1), + isTransactional: config.Producer.Transaction.ID != "", + txnStatus: sarama.ProducerTxnFlagReady, + } +} + +//////////////////////////////////////////////// +// Implement SyncProducer interface +//////////////////////////////////////////////// + +// SendMessage corresponds with the SendMessage method of sarama's SyncProducer implementation. +// You have to set expectations on the mock producer before calling SendMessage, so it knows +// how to handle them. You can set a function in each expectation so that the message value +// checked by this function and an error is returned if the match fails. +// If there is no more remaining expectation when SendMessage is called, +// the mock producer will write an error to the test state object. +func (sp *SyncProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) { + sp.l.Lock() + defer sp.l.Unlock() + + if sp.IsTransactional() && sp.txnStatus&sarama.ProducerTxnFlagInTransaction == 0 { + sp.t.Errorf("attempt to send message when transaction is not started or is in ending state.") + return -1, -1, errors.New("attempt to send message when transaction is not started or is in ending state") + } + + if len(sp.expectations) > 0 { + expectation := sp.expectations[0] + sp.expectations = sp.expectations[1:] + topic := msg.Topic + partition, err := sp.partitioner(topic).Partition(msg, sp.partitions(topic)) + if err != nil { + sp.t.Errorf("Partitioner returned an error: %s", err.Error()) + return -1, -1, err + } + msg.Partition = partition + if expectation.CheckFunction != nil { + errCheck := expectation.CheckFunction(msg) + if errCheck != nil { + sp.t.Errorf("Check function returned an error: %s", errCheck.Error()) + return -1, -1, errCheck + } + } + if errors.Is(expectation.Result, errProduceSuccess) { + sp.lastOffset++ + msg.Offset = sp.lastOffset + return 0, msg.Offset, nil + } + return -1, -1, expectation.Result + } + sp.t.Errorf("No more expectation set on this mock producer to handle the input message.") + return -1, -1, errOutOfExpectations +} + +// SendMessages corresponds with the SendMessages method of sarama's SyncProducer implementation. +// You have to set expectations on the mock producer before calling SendMessages, so it knows +// how to handle them. If there is no more remaining expectations when SendMessages is called, +// the mock producer will write an error to the test state object. +func (sp *SyncProducer) SendMessages(msgs []*sarama.ProducerMessage) error { + sp.l.Lock() + defer sp.l.Unlock() + + if len(sp.expectations) >= len(msgs) { + expectations := sp.expectations[0:len(msgs)] + sp.expectations = sp.expectations[len(msgs):] + + for i, expectation := range expectations { + topic := msgs[i].Topic + partition, err := sp.partitioner(topic).Partition(msgs[i], sp.partitions(topic)) + if err != nil { + sp.t.Errorf("Partitioner returned an error: %s", err.Error()) + return err + } + msgs[i].Partition = partition + if expectation.CheckFunction != nil { + errCheck := expectation.CheckFunction(msgs[i]) + if errCheck != nil { + sp.t.Errorf("Check function returned an error: %s", errCheck.Error()) + return errCheck + } + } + if !errors.Is(expectation.Result, errProduceSuccess) { + return expectation.Result + } + sp.lastOffset++ + msgs[i].Offset = sp.lastOffset + } + return nil + } + sp.t.Errorf("Insufficient expectations set on this mock producer to handle the input messages.") + return errOutOfExpectations +} + +func (sp *SyncProducer) partitioner(topic string) sarama.Partitioner { + partitioner := sp.partitioners[topic] + if partitioner == nil { + partitioner = sp.newPartitioner(topic) + sp.partitioners[topic] = partitioner + } + return partitioner +} + +// Close corresponds with the Close method of sarama's SyncProducer implementation. +// By closing a mock syncproducer, you also tell it that no more SendMessage calls will follow, +// so it will write an error to the test state if there's any remaining expectations. +func (sp *SyncProducer) Close() error { + sp.l.Lock() + defer sp.l.Unlock() + + if len(sp.expectations) > 0 { + sp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(sp.expectations)) + } + + return nil +} + +//////////////////////////////////////////////// +// Setting expectations +//////////////////////////////////////////////// + +// ExpectSendMessageWithMessageCheckerFunctionAndSucceed sets an expectation on the mock producer +// that SendMessage will be called. The mock producer will first call the given function to check +// the message. It will cascade the error of the function, if any, or handle the message as if it +// produced successfully, i.e. by returning a valid partition, and offset, and a nil error. +func (sp *SyncProducer) ExpectSendMessageWithMessageCheckerFunctionAndSucceed(cf MessageChecker) *SyncProducer { + sp.l.Lock() + defer sp.l.Unlock() + sp.expectations = append(sp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf}) + + return sp +} + +// ExpectSendMessageWithMessageCheckerFunctionAndFail sets an expectation on the mock producer that +// SendMessage will be called. The mock producer will first call the given function to check the +// message. It will cascade the error of the function, if any, or handle the message as if it +// failed to produce successfully, i.e. by returning the provided error. +func (sp *SyncProducer) ExpectSendMessageWithMessageCheckerFunctionAndFail(cf MessageChecker, err error) *SyncProducer { + sp.l.Lock() + defer sp.l.Unlock() + sp.expectations = append(sp.expectations, &producerExpectation{Result: err, CheckFunction: cf}) + + return sp +} + +// ExpectSendMessageWithCheckerFunctionAndSucceed sets an expectation on the mock producer that SendMessage +// will be called. The mock producer will first call the given function to check the message value. +// It will cascade the error of the function, if any, or handle the message as if it produced +// successfully, i.e. by returning a valid partition, and offset, and a nil error. +func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndSucceed(cf ValueChecker) *SyncProducer { + sp.ExpectSendMessageWithMessageCheckerFunctionAndSucceed(messageValueChecker(cf)) + + return sp +} + +// ExpectSendMessageWithCheckerFunctionAndFail sets an expectation on the mock producer that SendMessage will be +// called. The mock producer will first call the given function to check the message value. +// It will cascade the error of the function, if any, or handle the message as if it failed +// to produce successfully, i.e. by returning the provided error. +func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndFail(cf ValueChecker, err error) *SyncProducer { + sp.ExpectSendMessageWithMessageCheckerFunctionAndFail(messageValueChecker(cf), err) + + return sp +} + +// ExpectSendMessageAndSucceed sets an expectation on the mock producer that SendMessage will be +// called. The mock producer will handle the message as if it produced successfully, i.e. by +// returning a valid partition, and offset, and a nil error. +func (sp *SyncProducer) ExpectSendMessageAndSucceed() *SyncProducer { + sp.ExpectSendMessageWithMessageCheckerFunctionAndSucceed(nil) + + return sp +} + +// ExpectSendMessageAndFail sets an expectation on the mock producer that SendMessage will be +// called. The mock producer will handle the message as if it failed to produce +// successfully, i.e. by returning the provided error. +func (sp *SyncProducer) ExpectSendMessageAndFail(err error) *SyncProducer { + sp.ExpectSendMessageWithMessageCheckerFunctionAndFail(nil, err) + + return sp +} + +func (sp *SyncProducer) IsTransactional() bool { + return sp.isTransactional +} + +func (sp *SyncProducer) BeginTxn() error { + sp.txnLock.Lock() + defer sp.txnLock.Unlock() + + sp.txnStatus = sarama.ProducerTxnFlagInTransaction + return nil +} + +func (sp *SyncProducer) CommitTxn() error { + sp.txnLock.Lock() + defer sp.txnLock.Unlock() + + sp.txnStatus = sarama.ProducerTxnFlagReady + return nil +} + +func (sp *SyncProducer) AbortTxn() error { + sp.txnLock.Lock() + defer sp.txnLock.Unlock() + + sp.txnStatus = sarama.ProducerTxnFlagReady + return nil +} + +func (sp *SyncProducer) TxnStatus() sarama.ProducerTxnStatusFlag { + return sp.txnStatus +} + +func (sp *SyncProducer) AddOffsetsToTxn(offsets map[string][]*sarama.PartitionOffsetMetadata, groupId string) error { + return nil +} + +func (sp *SyncProducer) AddMessageToTxn(msg *sarama.ConsumerMessage, groupId string, metadata *string) error { + return nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index cc790ca23c..0da254b537 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,6 +1,7 @@ # github.com/IBM/sarama v1.43.2 ## explicit; go 1.19 github.com/IBM/sarama +github.com/IBM/sarama/mocks # github.com/ant0ine/go-json-rest v3.3.2+incompatible ## explicit github.com/ant0ine/go-json-rest/rest From 68d30ebd190e89480194d78ffaf6b15b5c4ba797 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Mon, 16 Sep 2024 14:47:28 -0600 Subject: [PATCH 19/54] modifies DelayingConsumer to use a message header instead of a delay Instead of a static delay, uses a "not before" time found in a Kafka message header. Consumption of the message will not be attempted until the time has passed. This allows for more accurate delays, as the time required to process an earlier message doesn't further delay the current message's processing. BACK-2449 --- data/events/events.go | 102 ++++++++++++++++++++++++--- data/events/events_test.go | 139 ++++++++++++++++++++++++++++++++----- 2 files changed, 216 insertions(+), 25 deletions(-) diff --git a/data/events/events.go b/data/events/events.go index cdd511750b..42912f397d 100644 --- a/data/events/events.go +++ b/data/events/events.go @@ -1,9 +1,11 @@ package events import ( + "bytes" "context" "fmt" "os" + "strconv" "sync" "time" @@ -341,9 +343,8 @@ func (r *CascadingSaramaEventsRunner) buildConsumer(ctx context.Context, idx int } } if delay > 0 { - consumer = &DelayingConsumer{ + consumer = &NotBeforeConsumer{ Consumer: consumer, - Delay: delay, Logger: r.Logger, } } @@ -370,28 +371,73 @@ func (r *CascadingSaramaEventsRunner) logger(ctx context.Context) log.Logger { return r.Logger } -// DelayingConsumer injects a delay before consuming a message. -type DelayingConsumer struct { +// NotBeforeConsumer delays consumption until a specified time. +type NotBeforeConsumer struct { Consumer asyncevents.SaramaMessageConsumer - Delay time.Duration Logger log.Logger } -func (c *DelayingConsumer) Consume(ctx context.Context, session sarama.ConsumerGroupSession, +func (c *NotBeforeConsumer) Consume(ctx context.Context, session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) error { + notBefore, err := c.notBeforeFromMsgHeaders(msg) + if err != nil { + c.Logger.WithError(err).Info("Unable to parse kafka header not-before value") + } + delay := time.Until(notBefore) + select { case <-ctx.Done(): if ctxErr := ctx.Err(); ctxErr != context.Canceled { return ctxErr } return nil - case <-time.After(c.Delay): - c.Logger.WithFields(log.Fields{"topic": msg.Topic, "delay": c.Delay}).Debugf("delayed") + case <-time.After(time.Until(notBefore)): + if !notBefore.IsZero() { + fields := log.Fields{"topic": msg.Topic, "not-before": notBefore, "delay": delay} + c.Logger.WithFields(fields).Debugf("delayed") + } return c.Consumer.Consume(ctx, session, msg) } } +// HeaderNotBefore tells consumers not to consume a message before a certain time. +var HeaderNotBefore = []byte("x-tidepool-not-before") + +// NotBeforeTimeFormat specifies the [time.Parse] format to use for HeaderNotBefore. +var NotBeforeTimeFormat = time.RFC3339Nano + +// HeaderFailures counts the number of failures encountered trying to consume the message. +var HeaderFailures = []byte("x-tidepool-failures") + +// FailuresToDelay maps the number of consumption failures to the next delay. +// +// Rather than using a failures header, the name of the topic could be used as a lookup, if +// so desired. +var FailuresToDelay = map[int]time.Duration{ + 0: 0, + 1: 1 * time.Second, + 2: 2 * time.Second, + 3: 3 * time.Second, + 4: 5 * time.Second, +} + +func (c *NotBeforeConsumer) notBeforeFromMsgHeaders(msg *sarama.ConsumerMessage) ( + time.Time, error) { + + for _, header := range msg.Headers { + if bytes.Equal(header.Key, HeaderNotBefore) { + notBefore, err := time.Parse(NotBeforeTimeFormat, string(header.Value)) + if err != nil { + return time.Time{}, fmt.Errorf("parsing not before header: %s", err) + } else { + return notBefore, nil + } + } + } + return time.Time{}, fmt.Errorf("header not found: x-tidepool-not-before") +} + // CascadingConsumer cascades messages that failed to be consumed to another topic. type CascadingConsumer struct { Consumer asyncevents.SaramaMessageConsumer @@ -444,6 +490,7 @@ func (c *CascadingConsumer) withTxn(f func() error) (err error) { return f() } +// cascadeMessage to the next topic. func (c *CascadingConsumer) cascadeMessage(msg *sarama.ConsumerMessage) *sarama.ProducerMessage { pHeaders := make([]sarama.RecordHeader, len(msg.Headers)) for idx, header := range msg.Headers { @@ -453,6 +500,43 @@ func (c *CascadingConsumer) cascadeMessage(msg *sarama.ConsumerMessage) *sarama. Key: sarama.ByteEncoder(msg.Key), Value: sarama.ByteEncoder(msg.Value), Topic: c.NextTopic, - Headers: pHeaders, + Headers: c.updateCascadeHeaders(pHeaders), + } +} + +// updateCascadeHeaders calculates not before and failures header values. +// +// Existing not before and failures headers will be dropped in place of the new ones. +func (c *CascadingConsumer) updateCascadeHeaders(headers []sarama.RecordHeader) []sarama.RecordHeader { + failures := 0 + notBefore := time.Now() + + keep := make([]sarama.RecordHeader, 0, len(headers)) + for _, header := range headers { + switch { + case bytes.Equal(header.Key, HeaderNotBefore): + continue // Drop this header, we'll add a new version below. + case bytes.Equal(header.Key, HeaderFailures): + parsed, err := strconv.ParseInt(string(header.Value), 10, 32) + if err != nil { + c.Logger.WithError(err).Info("Unable to parse consumption failures count") + } else { + failures = int(parsed) + notBefore = notBefore.Add(FailuresToDelay[failures]) + } + continue // Drop this header, we'll add a new version below. + } + keep = append(keep, header) } + + keep = append(keep, sarama.RecordHeader{ + Key: HeaderNotBefore, + Value: []byte(notBefore.Format(NotBeforeTimeFormat)), + }) + keep = append(keep, sarama.RecordHeader{ + Key: HeaderFailures, + Value: []byte(strconv.Itoa(failures + 1)), + }) + + return keep } diff --git a/data/events/events_test.go b/data/events/events_test.go index 5294d9f277..a3bb7de8b1 100644 --- a/data/events/events_test.go +++ b/data/events/events_test.go @@ -1,8 +1,10 @@ package events import ( + "bytes" "context" "fmt" + "strconv" "sync" "sync/atomic" "time" @@ -110,24 +112,32 @@ var _ = DescribeTable("CappedExponentialBinaryDelay", Entry("cap: 1m; tries: 20", time.Minute, 20, time.Minute), ) -var _ = Describe("DelayingConsumer", func() { +var _ = Describe("NotBeforeConsumer", func() { Describe("Consume", func() { - var testMsg = &sarama.ConsumerMessage{ - Topic: "test.topic", + var newTestMsg = func(notBefore time.Time) *sarama.ConsumerMessage { + headers := []*sarama.RecordHeader{} + if !notBefore.IsZero() { + headers = append(headers, &sarama.RecordHeader{ + Key: HeaderNotBefore, + Value: []byte(notBefore.Format(NotBeforeTimeFormat)), + }) + } + return &sarama.ConsumerMessage{Topic: "test.topic", Headers: headers} } - It("delays by the configured duration", func() { + It("delays based on the x-tidepool-not-before header", func() { logger := newTestDevlog() testDelay := 10 * time.Millisecond ctx := context.Background() start := time.Now() - dc := &DelayingConsumer{ + notBefore := start.Add(testDelay) + msg := newTestMsg(notBefore) + dc := &NotBeforeConsumer{ Consumer: &mockSaramaMessageConsumer{Logger: logger}, - Delay: testDelay, Logger: logger, } - err := dc.Consume(ctx, nil, testMsg) + err := dc.Consume(ctx, nil, msg) Expect(err).To(BeNil()) Expect(time.Since(start)).To(BeNumerically(">", testDelay)) @@ -137,9 +147,10 @@ var _ = Describe("DelayingConsumer", func() { logger := newTestDevlog() testDelay := 10 * time.Millisecond abortAfter := 1 * time.Millisecond - dc := &DelayingConsumer{ + notBefore := time.Now().Add(testDelay) + msg := newTestMsg(notBefore) + dc := &NotBeforeConsumer{ Consumer: &mockSaramaMessageConsumer{Delay: time.Minute, Logger: logger}, - Delay: testDelay, Logger: logger, } ctx, cancel := context.WithCancel(context.Background()) @@ -149,7 +160,7 @@ var _ = Describe("DelayingConsumer", func() { }() start := time.Now() - err := dc.Consume(ctx, nil, testMsg) + err := dc.Consume(ctx, nil, msg) Expect(err).To(BeNil()) Expect(time.Since(start)).To(BeNumerically(">", abortAfter)) @@ -158,14 +169,14 @@ var _ = Describe("DelayingConsumer", func() { }) }) -var _ = Describe("ShiftingConsumer", func() { +var _ = Describe("CascadingConsumer", func() { Describe("Consume", func() { var testMsg = &sarama.ConsumerMessage{ Topic: "test.topic", } Context("on failure", func() { - It("shifts topics", func() { + It("cascades topics", func() { t := GinkgoT() logger := newTestDevlog() ctx := context.Background() @@ -195,6 +206,102 @@ var _ = Describe("ShiftingConsumer", func() { Expect(mockProducer.Close()).To(Succeed()) Expect(err).To(BeNil()) }) + + It("increments the failures header", func() { + t := GinkgoT() + logger := newTestDevlog() + ctx := context.Background() + testConfig := mocks.NewTestConfig() + mockProducer := mocks.NewAsyncProducer(t, testConfig) + msg := &sarama.ConsumerMessage{ + Headers: []*sarama.RecordHeader{ + { + Key: HeaderFailures, Value: []byte("3"), + }, + }, + } + nextTopic := "text-next" + sc := &CascadingConsumer{ + Consumer: &mockSaramaMessageConsumer{ + Err: fmt.Errorf("test error"), + Logger: logger, + }, + NextTopic: nextTopic, + Producer: mockProducer, + Logger: logger, + } + + cf := func(msg *sarama.ProducerMessage) error { + failures := 0 + for _, header := range msg.Headers { + if !bytes.Equal(header.Key, HeaderFailures) { + continue + } + parsed, err := strconv.ParseInt(string(header.Value), 10, 32) + Expect(err).To(Succeed()) + failures = int(parsed) + if failures != 4 { + return fmt.Errorf("expected failures == 4, got %d", failures) + } + return nil + } + return fmt.Errorf("expected failures header wasn't found") + } + mockProducer.ExpectInputWithMessageCheckerFunctionAndSucceed(cf) + + err := sc.Consume(ctx, nil, msg) + Expect(mockProducer.Close()).To(Succeed()) + Expect(err).To(BeNil()) + }) + + It("updates the not before header", func() { + t := GinkgoT() + logger := newTestDevlog() + ctx := context.Background() + testConfig := mocks.NewTestConfig() + mockProducer := mocks.NewAsyncProducer(t, testConfig) + msg := &sarama.ConsumerMessage{ + Headers: []*sarama.RecordHeader{ + { + Key: HeaderFailures, Value: []byte("2"), + }, + }, + } + nextTopic := "text-next" + sc := &CascadingConsumer{ + Consumer: &mockSaramaMessageConsumer{ + Err: fmt.Errorf("test error"), + Logger: logger, + }, + NextTopic: nextTopic, + Producer: mockProducer, + Logger: logger, + } + + cf := func(msg *sarama.ProducerMessage) error { + for _, header := range msg.Headers { + if !bytes.Equal(header.Key, HeaderNotBefore) { + continue + } + parsed, err := time.Parse(NotBeforeTimeFormat, string(header.Value)) + if err != nil { + return err + } + until := time.Until(parsed) + delta := 10 * time.Millisecond + if until < 2*time.Second-delta || until > 2*time.Second+delta { + return fmt.Errorf("expected 2 seconds' delay, got: %s", until) + } + return nil + } + return fmt.Errorf("expected failures header wasn't found") + } + mockProducer.ExpectInputWithMessageCheckerFunctionAndSucceed(cf) + + err := sc.Consume(ctx, nil, msg) + Expect(mockProducer.Close()).To(Succeed()) + Expect(err).To(BeNil()) + }) }) Context("on success", func() { @@ -244,8 +351,8 @@ var _ = Describe("ShiftingConsumer", func() { }) }) -var _ = Describe("ShiftingSaramaEventsRunner", func() { - It("shifts through configured delays", func() { +var _ = Describe("CascadingSaramaEventsRunner", func() { + It("cascades through configured delays", func() { ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() testDelays := []time.Duration{0, 1, 2, 3, 5} @@ -256,7 +363,7 @@ var _ = Describe("ShiftingSaramaEventsRunner", func() { Logger: testLogger, } testConfig := SaramaRunnerConfig{ - Topics: []string{"test.shifting"}, + Topics: []string{"test.cascading"}, MessageConsumer: testMessageConsumer, Sarama: mocks.NewTestConfig(), } @@ -338,7 +445,7 @@ var _ = Describe("ShiftingSaramaEventsRunner", func() { }) }) -// testSaramaBuilders injects mocks into the ShiftingSaramaEventsRunner +// testSaramaBuilders injects mocks into the CascadingSaramaEventsRunner type testSaramaBuilders struct { consumerGroup func([]string, string, *sarama.Config) (sarama.ConsumerGroup, error) producer func([]string, *sarama.Config) (sarama.AsyncProducer, error) From d6e0e2cb27f98676f0ac492591cb1cd5127897d8 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Fri, 20 Sep 2024 09:06:44 -0600 Subject: [PATCH 20/54] just a little more explanation of cascading consumer --- data/events/events.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/data/events/events.go b/data/events/events.go index 42912f397d..da9c4f4597 100644 --- a/data/events/events.go +++ b/data/events/events.go @@ -439,6 +439,10 @@ func (c *NotBeforeConsumer) notBeforeFromMsgHeaders(msg *sarama.ConsumerMessage) } // CascadingConsumer cascades messages that failed to be consumed to another topic. +// +// It also sets an adjustable delay via the "not-before" and "failures" headers so that as +// the message moves from topic to topic, the time between processing is increased according +// to [FailuresToDelay]. type CascadingConsumer struct { Consumer asyncevents.SaramaMessageConsumer NextTopic string From 4f8bfbfdc2bb0274f312dc9ac84d08e46b2507ce Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Mon, 7 Oct 2024 13:24:20 -0600 Subject: [PATCH 21/54] don't read topic and consumer group id from runtime configuration These won't be changing at runtime, so there's no need to complicate the initialization by making these configurable. The topic's prefix is configurable, and that's the part that will change from environment to environment at runtime. BACK-2554 --- data/service/service/standard.go | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/data/service/service/standard.go b/data/service/service/standard.go index 70a87965ae..132f8cd952 100644 --- a/data/service/service/standard.go +++ b/data/service/service/standard.go @@ -483,22 +483,13 @@ func (s *Standard) initializeAlertsEventsHandler() error { return err } - // In addition to the CloudEventsConfig, additional specific config values - // are needed. - config := &struct { - KafkaAlertsTopics []string `envconfig:"KAFKA_ALERTS_TOPICS" default:"alerts,deviceData.alerts"` - KafkaAlertsGroupID string `envconfig:"KAFKA_ALERTS_CONSUMER_GROUP" required:"true"` - }{} - if err := envconfig.Process("", config); err != nil { - return errors.Wrap(err, "Unable to process envconfig") - } - + topics := []string{"data.alerts", "data.deviceData.alerts"} // Some kafka topics use a `-` as a prefix. But MongoDB CDC topics are created with // `.`. This code is using CDC topics, so ensuring that a `.` is used for alerts events // lines everything up as expected. topicPrefix := strings.ReplaceAll(commonConfig.KafkaTopicPrefix, "-", ".") - prefixedTopics := make([]string, 0, len(config.KafkaAlertsTopics)) - for _, topic := range config.KafkaAlertsTopics { + prefixedTopics := make([]string, 0, len(topics)) + for _, topic := range topics { prefixedTopics = append(prefixedTopics, topicPrefix+topic) } @@ -517,7 +508,7 @@ func (s *Standard) initializeAlertsEventsHandler() error { runnerCfg := dataEvents.SaramaRunnerConfig{ Brokers: commonConfig.KafkaBrokers, - GroupID: config.KafkaAlertsGroupID, + GroupID: "alerts", Topics: prefixedTopics, Sarama: commonConfig.SaramaConfig, MessageConsumer: &dataEvents.AlertsEventsConsumer{ From 2b0b8f586697cfa4f77a5061ed9722eda5902563 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Tue, 10 Dec 2024 13:55:54 -0700 Subject: [PATCH 22/54] there's no longer a need to inject server session tokens A rebase has picked up work performed by Darin, which removes the need for this token injection. \o/ Yay! --- alerts/client.go | 34 +++++----------------------- alerts/client_test.go | 38 +++----------------------------- data/events/alerts.go | 11 +-------- data/events/alerts_test.go | 28 ----------------------- data/service/service/standard.go | 3 +-- 5 files changed, 11 insertions(+), 103 deletions(-) diff --git a/alerts/client.go b/alerts/client.go index 3198c489af..6352f8d359 100644 --- a/alerts/client.go +++ b/alerts/client.go @@ -6,7 +6,6 @@ import ( "github.com/kelseyhightower/envconfig" - "github.com/tidepool-org/platform/auth" "github.com/tidepool-org/platform/client" "github.com/tidepool-org/platform/errors" platformlog "github.com/tidepool-org/platform/log" @@ -17,22 +16,20 @@ import ( // Client for managing alerts configs. type Client struct { - client PlatformClient - logger platformlog.Logger - tokenProvider auth.ServerSessionTokenProvider + client PlatformClient + logger platformlog.Logger } // NewClient builds a client for interacting with alerts API endpoints. // // If no logger is provided, a null logger is used. -func NewClient(client PlatformClient, tokenProvider auth.ServerSessionTokenProvider, logger platformlog.Logger) *Client { +func NewClient(client PlatformClient, logger platformlog.Logger) *Client { if logger == nil { logger = null.NewLogger() } return &Client{ - client: client, - logger: logger, - tokenProvider: tokenProvider, + client: client, + logger: logger, } } @@ -43,32 +40,13 @@ type PlatformClient interface { requestBody interface{}, responseBody interface{}, inspectors ...request.ResponseInspector) error } -// TokenProvider retrieves session tokens needed for calling the alerts API. -// -// client.External is one implementation -type TokenProvider interface { - // ServerSessionToken provides a server-to-server API authentication token. - ServerSessionToken() (string, error) -} - // request performs common operations before passing a request off to the // underlying platform.Client. func (c *Client) request(ctx context.Context, method, url string, reqBody, resBody any) error { // Platform's client.Client expects a logger to exist in the request's // context. If it doesn't exist, request processing will panic. loggingCtx := platformlog.NewContextWithLogger(ctx, c.logger) - // Make sure the auth token is injected into the request's headers. - return c.requestWithAuth(loggingCtx, method, url, reqBody, resBody) -} - -// requestWithAuth injects an auth token before calling platform.Client.RequestData. -// -// At time of writing, this is the only way to inject credentials into -// platform.Client. It might be nice to be able to use a mutator, but the auth -// is specifically handled by the platform.Client via the context field, and -// if left blank, platform.Client errors. -func (c *Client) requestWithAuth(ctx context.Context, method, url string, reqBody, resBody any) error { - return c.client.RequestData(auth.NewContextWithServerSessionTokenProvider(ctx, c.tokenProvider), method, url, nil, reqBody, resBody) + return c.client.RequestData(loggingCtx, method, url, nil, reqBody, resBody) } // Upsert updates cfg if it exists or creates it if it doesn't. diff --git a/alerts/client_test.go b/alerts/client_test.go index c5a771256f..fb5af7b7c6 100644 --- a/alerts/client_test.go +++ b/alerts/client_test.go @@ -8,18 +8,14 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/tidepool-org/platform/auth" "github.com/tidepool-org/platform/client" "github.com/tidepool-org/platform/log" "github.com/tidepool-org/platform/log/null" "github.com/tidepool-org/platform/platform" ) -const testToken = "auth-me" - var _ = Describe("Client", func() { var test404Server, test200Server *httptest.Server - var testAuthServer func(*string) *httptest.Server BeforeEach(func() { t := GinkgoT() @@ -31,12 +27,6 @@ var _ = Describe("Client", func() { test200Server = testServer(t, func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) }) - testAuthServer = func(token *string) *httptest.Server { - return testServer(t, func(w http.ResponseWriter, r *http.Request) { - *token = r.Header.Get(auth.TidepoolSessionTokenHeaderKey) - w.WriteHeader(http.StatusOK) - }) - } }) Context("Delete", func() { @@ -52,13 +42,6 @@ var _ = Describe("Client", func() { err := client.Delete(ctx, &Config{}) Expect(err).ShouldNot(HaveOccurred()) }) - - It("injects an auth token", func() { - token := "" - client, ctx := newAlertsClientTest(testAuthServer(&token)) - _ = client.Delete(ctx, &Config{}) - Expect(token).To(Equal(testToken)) - }) }) Context("Upsert", func() { @@ -74,26 +57,17 @@ var _ = Describe("Client", func() { err := client.Upsert(ctx, &Config{}) Expect(err).ShouldNot(HaveOccurred()) }) - - It("injects an auth token", func() { - token := "" - client, ctx := newAlertsClientTest(testAuthServer(&token)) - _ = client.Upsert(ctx, &Config{}) - Expect(token).To(Equal(testToken)) - }) }) }) func buildTestClient(s *httptest.Server) *Client { pCfg := &platform.Config{ - Config: &client.Config{ - Address: s.URL, - }, + Config: &client.Config{Address: s.URL}, + ServiceSecret: "auth-me", } - token := mockTokenProvider(testToken) pc, err := platform.NewClient(pCfg, platform.AuthorizeAsService) Expect(err).ToNot(HaveOccurred()) - client := NewClient(pc, token, null.NewLogger()) + client := NewClient(pc, null.NewLogger()) return client } @@ -105,12 +79,6 @@ func contextWithNullLogger() context.Context { return log.NewContextWithLogger(context.Background(), null.NewLogger()) } -type mockTokenProvider string - -func (p mockTokenProvider) ServerSessionToken() (string, error) { - return string(p), nil -} - func testServer(t GinkgoTInterface, handler http.HandlerFunc) *httptest.Server { s := httptest.NewServer(http.HandlerFunc(handler)) t.Cleanup(s.Close) diff --git a/data/events/alerts.go b/data/events/alerts.go index ede74be49c..34da074cf3 100644 --- a/data/events/alerts.go +++ b/data/events/alerts.go @@ -32,7 +32,6 @@ type Consumer struct { Evaluator AlertsEvaluator Permissions permission.Client Pusher Pusher - Tokens alerts.TokenProvider Logger log.Logger } @@ -167,13 +166,12 @@ type AlertsEvaluator interface { } func NewAlertsEvaluator(alerts AlertsClient, data store.DataRepository, - perms permission.Client, tokens alerts.TokenProvider) *evaluator { + perms permission.Client) *evaluator { return &evaluator{ Alerts: alerts, Data: data, Permissions: perms, - Tokens: tokens, } } @@ -182,7 +180,6 @@ type evaluator struct { Alerts AlertsClient Data store.DataRepository Permissions permission.Client - Tokens alerts.TokenProvider } // logger produces a log.Logger. @@ -260,12 +257,6 @@ func (e *evaluator) authDenied(ctx context.Context) func(ac *alerts.Config) bool "userID": ac.UserID, "followedUserID": ac.FollowedUserID, }) - token, err := e.Tokens.ServerSessionToken() - if err != nil { - lgr.WithError(err).Warn("Unable to confirm permissions; skipping") - return false - } - ctx = auth.NewContextWithServerSessionToken(ctx, token) perms, err := e.Permissions.GetUserPermissions(ctx, ac.UserID, ac.FollowedUserID) if err != nil { lgr.WithError(err).Warn("Unable to confirm permissions; skipping") diff --git a/data/events/alerts_test.go b/data/events/alerts_test.go index 626c80a027..e467cacd2f 100644 --- a/data/events/alerts_test.go +++ b/data/events/alerts_test.go @@ -228,7 +228,6 @@ type consumerTestDeps struct { Permissions *mockPermissionsClient Repo *storetest.DataRepository Session *mockConsumerGroupSession - Tokens alerts.TokenProvider } func newConsumerTestDeps(docs []interface{}) (*Consumer, *consumerTestDeps) { @@ -248,14 +247,12 @@ func newConsumerTestDeps(docs []interface{}) (*Consumer, *consumerTestDeps) { dataRepo.GetDataRangeOutputs = []storetest.GetDataRangeOutput{ {Error: nil, Cursor: cur}, } - tokens := &mockAlertsTokenProvider{Token: "test-token"} permissions := newMockPermissionsClient() evaluator := newMockStaticEvaluator() return &Consumer{ Alerts: alertsClient, Evaluator: evaluator, - Tokens: tokens, Data: dataRepo, Permissions: permissions, }, &consumerTestDeps{ @@ -266,7 +263,6 @@ func newConsumerTestDeps(docs []interface{}) (*Consumer, *consumerTestDeps) { Repo: dataRepo, Session: &mockConsumerGroupSession{}, Logger: logger, - Tokens: tokens, Permissions: permissions, } } @@ -280,12 +276,10 @@ func newEvaluatorTestDeps(responses []*store.AlertableResponse) (*evaluator, *ev dataRepo.GetAlertableDataOutputs = append(dataRepo.GetAlertableDataOutputs, out) } permissions := newMockPermissionsClient() - tokens := newMockTokensProvider() return &evaluator{ Alerts: alertsClient, Data: dataRepo, Permissions: permissions, - Tokens: tokens, }, &evaluatorTestDeps{ Alerts: alertsClient, Permissions: permissions, @@ -561,18 +555,6 @@ func (s *mockConsumerGroupSession) Context() context.Context { panic("not implemented") // TODO: Implement } -type mockAlertsTokenProvider struct { - Token string - Error error -} - -func (p *mockAlertsTokenProvider) ServerSessionToken() (string, error) { - if p.Error != nil { - return "", p.Error - } - return p.Token, nil -} - type mockPermissionsClient struct { Error error Perms map[string]permission.Permissions @@ -612,16 +594,6 @@ func (c *mockPermissionsClient) GetUserPermissions(ctx context.Context, requestU } } -type mockTokensProvider struct{} - -func newMockTokensProvider() *mockTokensProvider { - return &mockTokensProvider{} -} - -func (p *mockTokensProvider) ServerSessionToken() (string, error) { - return "test-server-session-token", nil -} - func testAlertsConfigUrgentLow(userID string) *alerts.Config { return &alerts.Config{ UserID: userID, diff --git a/data/service/service/standard.go b/data/service/service/standard.go index 132f8cd952..2937e28662 100644 --- a/data/service/service/standard.go +++ b/data/service/service/standard.go @@ -499,10 +499,9 @@ func (s *Standard) initializeAlertsEventsHandler() error { Alerts: alerts, Data: dataRepo, DeviceTokens: s.AuthClient(), - Evaluator: dataEvents.NewAlertsEvaluator(alerts, dataRepo, s.permissionClient, s.AuthClient()), + Evaluator: dataEvents.NewAlertsEvaluator(alerts, dataRepo, s.permissionClient), Permissions: s.permissionClient, Pusher: s.pusher, - Tokens: s.AuthClient(), Logger: s.Logger(), } From f1653b01bad54650177426f9e0b3c40ba77a8269 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Tue, 10 Dec 2024 15:17:46 -0700 Subject: [PATCH 23/54] removes out-of-date tests These tests, and the functionality they cover were moved into the alerts/client.go in a previous commit. --- data/events/alerts_test.go | 157 ------------------------------------- 1 file changed, 157 deletions(-) diff --git a/data/events/alerts_test.go b/data/events/alerts_test.go index e467cacd2f..8e6fe898ad 100644 --- a/data/events/alerts_test.go +++ b/data/events/alerts_test.go @@ -17,13 +17,11 @@ import ( "github.com/tidepool-org/platform/data/types" "github.com/tidepool-org/platform/data/types/blood" "github.com/tidepool-org/platform/data/types/blood/glucose" - "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" logtest "github.com/tidepool-org/platform/log/test" "github.com/tidepool-org/platform/permission" "github.com/tidepool-org/platform/pointer" - "github.com/tidepool-org/platform/push" ) const ( @@ -41,7 +39,6 @@ var ( ) var _ = Describe("Consumer", func() { - Describe("Consume", func() { It("ignores nil messages", func() { ctx, _ := addLogger(context.Background()) @@ -117,106 +114,6 @@ var _ = Describe("Consumer", func() { }) }) - - // Describe("evaluateUrgentLow", func() { - // It("can't function without datum units", func() { - // ctx, _ := addLogger(context.Background()) - // alert := newTestUrgentLowAlert() - // datum := newTestStaticDatumMmolL(11) - // datum.Blood.Units = nil - // c := &Consumer{ - // Pusher: newMockPusher(), - // DeviceTokens: newMockDeviceTokensClient(), - // } - - // _, err := c.evaluateUrgentLow(ctx, datum, testUserID, alert) - - // Expect(err).To(MatchError("Unable to evaluate datum: Units, Value, or Time is nil")) - // }) - - // It("can't function without datum value", func() { - // ctx, _ := addLogger(context.Background()) - // alert := newTestUrgentLowAlert() - // datum := newTestStaticDatumMmolL(11) - // datum.Blood.Value = nil - // c := &Consumer{ - // Pusher: newMockPusher(), - // DeviceTokens: newMockDeviceTokensClient(), - // } - - // _, err := c.evaluateUrgentLow(ctx, datum, testUserID, alert) - - // Expect(err).To(MatchError("Unable to evaluate datum: Units, Value, or Time is nil")) - // }) - - // It("can't function without datum time", func() { - // ctx, _ := addLogger(context.Background()) - // alert := newTestUrgentLowAlert() - // datum := newTestStaticDatumMmolL(11) - // datum.Blood.Time = nil - // c := &Consumer{ - // Pusher: newMockPusher(), - // DeviceTokens: newMockDeviceTokensClient(), - // } - - // _, err := c.evaluateUrgentLow(ctx, datum, testUserID, alert) - // Expect(err).To(MatchError("Unable to evaluate datum: Units, Value, or Time is nil")) - // }) - - // It("is marked resolved", func() { - // ctx, _ := addLogger(context.Background()) - // datum := newTestStaticDatumMmolL(11) - // alert := newTestUrgentLowAlert() - // alert.Threshold.Value = *datum.Blood.Value - 1 - // userID := "test-user-id" - // c := &Consumer{ - // Pusher: newMockPusher(), - // DeviceTokens: newMockDeviceTokensClient(), - // } - - // updated, err := c.evaluateUrgentLow(ctx, datum, userID, alert) - // Expect(err).To(Succeed()) - // Expect(updated).To(BeTrue()) - // Expect(alert.Resolved).To(BeTemporally("~", time.Now(), time.Second)) - // }) - - // It("is marked both notified and triggered", func() { - // ctx, _ := addLogger(context.Background()) - // datum := newTestStaticDatumMmolL(11) - // alert := newTestUrgentLowAlert() - // alert.Threshold.Value = *datum.Blood.Value + 1 - // userID := "test-user-id" - // c := &Consumer{ - // Pusher: newMockPusher(), - // DeviceTokens: newMockDeviceTokensClient(), - // } - - // updated, err := c.evaluateUrgentLow(ctx, datum, userID, alert) - // Expect(err).To(Succeed()) - // Expect(updated).To(BeTrue()) - // Expect(alert.Sent).To(BeTemporally("~", time.Now(), time.Second)) - // Expect(alert.Triggered).To(BeTemporally("~", time.Now(), time.Second)) - // }) - - // It("sends notifications regardless of previous notification time", func() { - // ctx, _ := addLogger(context.Background()) - // datum := newTestStaticDatumMmolL(11) - // alert := newTestUrgentLowAlert() - // lastTime := time.Now().Add(-10 * time.Second) - // alert.Activity.Sent = lastTime - // alert.Threshold.Value = *datum.Blood.Value + 1 - // userID := "test-user-id" - // c := &Consumer{ - // Pusher: newMockPusher(), - // DeviceTokens: newMockDeviceTokensClient(), - // } - - // updated, err := c.evaluateUrgentLow(ctx, datum, userID, alert) - // Expect(err).To(Succeed()) - // Expect(updated).To(BeTrue()) - // Expect(alert.Sent).To(BeTemporally("~", time.Now(), time.Second)) - // }) - // }) }) type consumerTestDeps struct { @@ -424,60 +321,6 @@ func newTestStaticDatumMmolL(value float64) *glucose.Glucose { } } -func newTestUrgentLowAlert() *alerts.UrgentLowAlert { - return &alerts.UrgentLowAlert{ - Base: alerts.Base{ - Enabled: true, - Activity: alerts.Activity{}, - }, - Threshold: alerts.Threshold{ - Units: nontypesglucose.MmolL, - }, - } -} - -type mockDeviceTokensClient struct { - Error error - Tokens []*devicetokens.DeviceToken -} - -func newMockDeviceTokensClient() *mockDeviceTokensClient { - return &mockDeviceTokensClient{ - Tokens: []*devicetokens.DeviceToken{}, - } -} - -// // testingT is a subset of testing.TB -// type testingT interface { -// Errorf(format string, args ...any) -// Fatalf(format string, args ...any) -// } - -func (m *mockDeviceTokensClient) GetDeviceTokens(ctx context.Context, - userID string) ([]*devicetokens.DeviceToken, error) { - - if m.Error != nil { - return nil, m.Error - } - return m.Tokens, nil -} - -type mockPusher struct { - Pushes []string -} - -func newMockPusher() *mockPusher { - return &mockPusher{ - Pushes: []string{}, - } -} - -func (p *mockPusher) Push(ctx context.Context, - deviceToken *devicetokens.DeviceToken, notification *push.Notification) error { - p.Pushes = append(p.Pushes, notification.Message) - return nil -} - type mockAlertsConfigClient struct { Error error Configs []*alerts.Config From bec3b5ce05f2064adb491399fe29070490e9c9cc Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Wed, 11 Dec 2024 08:25:28 -0700 Subject: [PATCH 24/54] improve test coverage BACK-2449 --- data/events/alerts_test.go | 40 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/data/events/alerts_test.go b/data/events/alerts_test.go index 8e6fe898ad..d3523457e0 100644 --- a/data/events/alerts_test.go +++ b/data/events/alerts_test.go @@ -109,7 +109,25 @@ var _ = Describe("Consumer", func() { }) It("uses the longest delay", func() { + testLogger := logtest.NewLogger() + ctx := log.NewContextWithLogger(context.Background(), testLogger) + eval, deps := newEvaluatorTestDeps([]*store.AlertableResponse{testMongoUrgentLowResponse}) + cfgWithShorterDelay := testAlertsConfigLow(testUserID) + deps.Alerts.Configs = append(deps.Alerts.Configs, cfgWithShorterDelay) + deps.Permissions.Allow(testUserID, permission.Follow, testFollowedUserID) + cfgWithLongerDelay := testAlertsConfigLow(testUserID + "2") + cfgWithLongerDelay.Alerts.Low.Delay = alerts.DurationMinutes(10 * time.Minute) + deps.Alerts.Configs = append(deps.Alerts.Configs, cfgWithLongerDelay) + deps.Permissions.Allow(testUserID+"2", permission.Follow, testFollowedUserID) + + _, err := eval.Evaluate(ctx, testFollowedUserID) + + Expect(err).To(Succeed()) + if Expect(len(deps.Data.GetAlertableDataInputs)).To(Equal(1)) { + Expect(deps.Data.GetAlertableDataInputs[0].Params.Start). + To(BeTemporally("~", time.Now().Add(-10*time.Minute), time.Second)) + } }) }) @@ -180,12 +198,14 @@ func newEvaluatorTestDeps(responses []*store.AlertableResponse) (*evaluator, *ev }, &evaluatorTestDeps{ Alerts: alertsClient, Permissions: permissions, + Data: dataRepo, } } type evaluatorTestDeps struct { Alerts *mockAlertsConfigClient Permissions *mockPermissionsClient + Data *storetest.DataRepository } // mockEvaluator implements Evaluator. @@ -456,3 +476,23 @@ func testAlertsConfigUrgentLow(userID string) *alerts.Config { }, } } + +func testAlertsConfigLow(userID string) *alerts.Config { + return &alerts.Config{ + UserID: userID, + FollowedUserID: testFollowedUserID, + UploadID: testUploadID, + Alerts: alerts.Alerts{ + Low: &alerts.LowAlert{ + Base: alerts.Base{ + Enabled: true, + Activity: alerts.Activity{}, + }, + Threshold: alerts.Threshold{ + Value: 10.0, + Units: nontypesglucose.MgdL, + }, + }, + }, + } +} From d5955b740cd767030a5b6ce46ce176924fd34150 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Wed, 11 Dec 2024 09:57:57 -0700 Subject: [PATCH 25/54] add data set id to alerts Evaluation, improve test coverage BACK-2499 --- alerts/config.go | 10 +- data/events/alerts.go | 27 +++--- data/events/alerts_test.go | 188 +++++++++++++++++++++++++++---------- 3 files changed, 160 insertions(+), 65 deletions(-) diff --git a/alerts/config.go b/alerts/config.go index 20be69333b..1084132c36 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -57,10 +57,12 @@ func (c Config) Validate(validator structure.Validator) { // Evaluate alerts in the context of the provided data. // -// While this method, or the methods it calls, can fail, there's no point in returning an -// error. Instead errors are logged before continuing. This is to ensure that any possible alert -// that should be triggered, will be triggered. -func (c Config) Evaluate(ctx context.Context, gd []*glucose.Glucose, dd []*dosingdecision.DosingDecision) *Notification { +// While this method or the methods it calls can fail, there's no point in returning an +// error. Instead errors are logged before continuing. This is to ensure that any possible +// alert that should be triggered, will be triggered. +func (c Config) Evaluate(ctx context.Context, + gd []*glucose.Glucose, dd []*dosingdecision.DosingDecision) *Notification { + notification := c.Alerts.Evaluate(ctx, gd, dd) if notification != nil { notification.FollowedUserID = c.FollowedUserID diff --git a/data/events/alerts.go b/data/events/alerts.go index 34da074cf3..2308a58d32 100644 --- a/data/events/alerts.go +++ b/data/events/alerts.go @@ -76,7 +76,7 @@ func (c *Consumer) consumeAlertsConfigs(ctx context.Context, ctxLog := c.logger(ctx).WithField("followedUserID", cfg.FollowedUserID) ctx = log.NewContextWithLogger(ctx, ctxLog) - notes, err := c.Evaluator.Evaluate(ctx, cfg.FollowedUserID) + notes, err := c.Evaluator.Evaluate(ctx, cfg.FollowedUserID, cfg.UploadID) if err != nil { format := "Unable to evalaute alerts configs triggered event for user %s" return errors.Wrapf(err, format, cfg.UserID) @@ -103,8 +103,11 @@ func (c *Consumer) consumeDeviceData(ctx context.Context, if datum.UserID == nil { return errors.New("Unable to retrieve alerts configs: userID is nil") } + if datum.UploadID == nil { + return errors.New("Unable to retrieve alerts configs: uploadID is nil") + } ctx = log.NewContextWithLogger(ctx, lgr.WithField("followedUserID", *datum.UserID)) - notes, err := c.Evaluator.Evaluate(ctx, *datum.UserID) + notes, err := c.Evaluator.Evaluate(ctx, *datum.UserID, *datum.UploadID) if err != nil { format := "Unable to evalaute device data triggered event for user %s" return errors.Wrapf(err, format, *datum.UserID) @@ -162,7 +165,7 @@ func (c *Consumer) logger(ctx context.Context) log.Logger { } type AlertsEvaluator interface { - Evaluate(ctx context.Context, followedUserID string) ([]*alerts.Notification, error) + Evaluate(ctx context.Context, followedUserID, dataSetID string) ([]*alerts.Notification, error) } func NewAlertsEvaluator(alerts AlertsClient, data store.DataRepository, @@ -198,10 +201,10 @@ func (e *evaluator) logger(ctx context.Context) log.Logger { } // Evaluate followers' alerts.Configs to generate alert notifications. -func (e *evaluator) Evaluate(ctx context.Context, followedUserID string) ( +func (e *evaluator) Evaluate(ctx context.Context, followedUserID, dataSetID string) ( []*alerts.Notification, error) { - alertsConfigs, err := e.gatherAlertsConfigs(ctx, followedUserID) + alertsConfigs, err := e.gatherAlertsConfigs(ctx, followedUserID, dataSetID) if err != nil { return nil, err } @@ -231,14 +234,21 @@ func (e *evaluator) mapAlertsConfigsByUploadID(cfgs []*alerts.Config) map[string return mapped } +// gatherAlertsConfigs for the given followed user and data set. +// +// Those configs which don't match the data set or whose owners don't have permission are +// removed. func (e *evaluator) gatherAlertsConfigs(ctx context.Context, - followedUserID string) ([]*alerts.Config, error) { + followedUserID, dataSetID string) ([]*alerts.Config, error) { alertsConfigs, err := e.Alerts.List(ctx, followedUserID) if err != nil { return nil, err } alertsConfigs = slices.DeleteFunc(alertsConfigs, e.authDenied(ctx)) + alertsConfigs = slices.DeleteFunc(alertsConfigs, func(c *alerts.Config) bool { + return c.UploadID != dataSetID + }) return alertsConfigs, nil } @@ -297,10 +307,6 @@ func (e *evaluator) gatherData(ctx context.Context, followedUserID, uploadID str func (e *evaluator) generateNotes(ctx context.Context, alertsConfigs []*alerts.Config, resp *store.AlertableResponse) []*alerts.Notification { - if len(alertsConfigs) == 0 { - return nil - } - lgr := e.logger(ctx) notifications := []*alerts.Notification{} for _, alertsConfig := range alertsConfigs { @@ -313,7 +319,6 @@ func (e *evaluator) generateNotes(ctx context.Context, note := alertsConfig.Evaluate(c, resp.Glucose, resp.DosingDecisions) if note != nil { notifications = append(notifications, note) - continue } } diff --git a/data/events/alerts_test.go b/data/events/alerts_test.go index d3523457e0..b3d99df7a7 100644 --- a/data/events/alerts_test.go +++ b/data/events/alerts_test.go @@ -17,11 +17,13 @@ import ( "github.com/tidepool-org/platform/data/types" "github.com/tidepool-org/platform/data/types/blood" "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" logtest "github.com/tidepool-org/platform/log/test" "github.com/tidepool-org/platform/permission" "github.com/tidepool-org/platform/pointer" + "github.com/tidepool-org/platform/push" ) const ( @@ -47,7 +49,7 @@ var _ = Describe("Consumer", func() { Expect(c.Consume(ctx, nil, nil)).To(Succeed()) }) - It("processes alerts config events", func() { + It("consumes alerts config events", func() { cfg := &alerts.Config{ UserID: testUserID, FollowedUserID: testFollowedUserID, @@ -70,16 +72,8 @@ var _ = Describe("Consumer", func() { Expect(deps.Session.MarkCalls).To(Equal(1)) }) - It("processes device data events", func() { - blood := &glucose.Glucose{ - Blood: blood.Blood{ - Units: pointer.FromAny("mmol/L"), - Value: pointer.FromAny(7.2), - Base: types.Base{ - UserID: pointer.FromAny(testFollowedUserID), - }, - }, - } + It("consumes device data events", func() { + blood := newTestStaticDatumMmolL(7.2) kafkaMsg := newAlertsMockConsumerMessage(".data.deviceData.alerts", blood) docs := []interface{}{bson.M{}} c, deps := newConsumerTestDeps(docs) @@ -88,6 +82,53 @@ var _ = Describe("Consumer", func() { Expect(deps.Session.MarkCalls).To(Equal(1)) }) + It("errors out when the datum's UserID is nil", func() { + blood := newTestStaticDatumMmolL(7.2) + blood.UserID = nil + kafkaMsg := newAlertsMockConsumerMessage(".data.deviceData.alerts", blood) + docs := []interface{}{bson.M{}} + c, deps := newConsumerTestDeps(docs) + + Expect(c.Consume(deps.Context, deps.Session, kafkaMsg)). + To(MatchError(ContainSubstring("userID is nil"))) + Expect(deps.Session.MarkCalls).To(Equal(0)) + }) + + It("errors out when the datum's UploadID is nil", func() { + blood := newTestStaticDatumMmolL(7.2) + blood.UploadID = nil + kafkaMsg := newAlertsMockConsumerMessage(".data.deviceData.alerts", blood) + docs := []interface{}{bson.M{}} + c, deps := newConsumerTestDeps(docs) + + Expect(c.Consume(deps.Context, deps.Session, kafkaMsg)). + To(MatchError(ContainSubstring("uploadID is nil"))) + Expect(deps.Session.MarkCalls).To(Equal(0)) + }) + + It("pushes notifications", func() { + blood := newTestStaticDatumMmolL(1.0) + kafkaMsg := newAlertsMockConsumerMessage(".data.deviceData.alerts", blood) + docs := []interface{}{bson.M{}} + c, deps := newConsumerTestDeps(docs) + eval := newMockEvaluator() + eval.Evaluations[testFollowedUserID+testUploadID] = []mockEvaluatorResponse{ + { + Notifications: []*alerts.Notification{ + { + Message: "something", + RecipientUserID: testUserID, + FollowedUserID: testFollowedUserID, + }, + }, + }, + } + c.Evaluator = eval + + Expect(c.Consume(deps.Context, deps.Session, kafkaMsg)).To(Succeed()) + + deps.Logger.AssertInfo("logging push notification") + }) }) Describe("Evaluator", func() { @@ -95,23 +136,38 @@ var _ = Describe("Consumer", func() { It("checks that alerts config owners have permission", func() { testLogger := logtest.NewLogger() ctx := log.NewContextWithLogger(context.Background(), testLogger) - eval, deps := newEvaluatorTestDeps([]*store.AlertableResponse{testMongoUrgentLowResponse}) deps.Permissions.Allow(testUserID, permission.Follow, testFollowedUserID) deps.Permissions.DenyAll(testUserNoPermsID, testFollowedUserID) deps.Alerts.Configs = append(deps.Alerts.Configs, testAlertsConfigUrgentLow(testUserNoPermsID)) deps.Alerts.Configs = append(deps.Alerts.Configs, testAlertsConfigUrgentLow(testUserID)) - notes, err := eval.Evaluate(ctx, testFollowedUserID) + notes, err := eval.Evaluate(ctx, testFollowedUserID, testUploadID) Expect(err).To(Succeed()) Expect(notes).To(ConsistOf(HaveField("RecipientUserID", testUserID))) }) - It("uses the longest delay", func() { + It("checks that alerts configs match the data set id", func() { testLogger := logtest.NewLogger() ctx := log.NewContextWithLogger(context.Background(), testLogger) + eval, deps := newEvaluatorTestDeps([]*store.AlertableResponse{testMongoUrgentLowResponse}) + deps.Permissions.Allow(testUserID+"2", permission.Follow, testFollowedUserID) + deps.Alerts.Configs = append(deps.Alerts.Configs, testAlertsConfigUrgentLow(testUserID+"2")) + deps.Permissions.Allow(testUserID, permission.Follow, testFollowedUserID) + wrongDataSetID := testAlertsConfigUrgentLow(testUserID) + wrongDataSetID.UploadID = "wrong" + deps.Alerts.Configs = append(deps.Alerts.Configs, wrongDataSetID) + notes, err := eval.Evaluate(ctx, testFollowedUserID, testUploadID) + + Expect(err).To(Succeed()) + Expect(notes).To(ConsistOf(HaveField("RecipientUserID", testUserID+"2"))) + }) + + It("uses the longest delay", func() { + testLogger := logtest.NewLogger() + ctx := log.NewContextWithLogger(context.Background(), testLogger) eval, deps := newEvaluatorTestDeps([]*store.AlertableResponse{testMongoUrgentLowResponse}) cfgWithShorterDelay := testAlertsConfigLow(testUserID) deps.Alerts.Configs = append(deps.Alerts.Configs, cfgWithShorterDelay) @@ -121,7 +177,7 @@ var _ = Describe("Consumer", func() { deps.Alerts.Configs = append(deps.Alerts.Configs, cfgWithLongerDelay) deps.Permissions.Allow(testUserID+"2", permission.Follow, testFollowedUserID) - _, err := eval.Evaluate(ctx, testFollowedUserID) + _, err := eval.Evaluate(ctx, testFollowedUserID, testUploadID) Expect(err).To(Succeed()) if Expect(len(deps.Data.GetAlertableDataInputs)).To(Equal(1)) { @@ -135,14 +191,16 @@ var _ = Describe("Consumer", func() { }) type consumerTestDeps struct { - Alerts *mockAlertsConfigClient - Context context.Context - Cursor *mongo.Cursor - Evaluator *mockStaticEvaluator - Logger log.Logger - Permissions *mockPermissionsClient - Repo *storetest.DataRepository - Session *mockConsumerGroupSession + Alerts *mockAlertsConfigClient + Context context.Context + Cursor *mongo.Cursor + DeviceTokens *mockDeviceTokens + Evaluator *mockStaticEvaluator + Logger *logtest.Logger + Permissions *mockPermissionsClient + Pusher Pusher + Repo *storetest.DataRepository + Session *mockConsumerGroupSession } func newConsumerTestDeps(docs []interface{}) (*Consumer, *consumerTestDeps) { @@ -164,20 +222,30 @@ func newConsumerTestDeps(docs []interface{}) (*Consumer, *consumerTestDeps) { } permissions := newMockPermissionsClient() evaluator := newMockStaticEvaluator() + pusher := push.NewLogPusher(logger) + deviceTokens := newMockDeviceTokens() + deviceTokens.Tokens = append(deviceTokens.Tokens, []*devicetokens.DeviceToken{ + {Apple: &devicetokens.AppleDeviceToken{}}, + }) return &Consumer{ - Alerts: alertsClient, - Evaluator: evaluator, - Data: dataRepo, - Permissions: permissions, + Alerts: alertsClient, + Evaluator: evaluator, + Data: dataRepo, + DeviceTokens: deviceTokens, + Permissions: permissions, + Pusher: pusher, }, &consumerTestDeps{ - Alerts: alertsClient, - Context: ctx, - Cursor: cur, - Evaluator: evaluator, - Repo: dataRepo, - Session: &mockConsumerGroupSession{}, - Logger: logger, + Alerts: alertsClient, + Context: ctx, + Cursor: cur, + DeviceTokens: deviceTokens, + Evaluator: evaluator, + Pusher: pusher, + Repo: dataRepo, + Session: &mockConsumerGroupSession{}, + Logger: logger, + //Tokens: tokens, Permissions: permissions, } } @@ -226,17 +294,18 @@ func newMockEvaluator() *mockEvaluator { } } -func (e *mockEvaluator) Evaluate(ctx context.Context, followedUserID string) ( +func (e *mockEvaluator) Evaluate(ctx context.Context, followedUserID, dataSetID string) ( []*alerts.Notification, error) { - if _, found := e.Evaluations[followedUserID]; !found { + key := followedUserID + dataSetID + if _, found := e.Evaluations[key]; !found { return nil, nil } - resp := e.Evaluations[followedUserID][0] - if len(e.Evaluations[followedUserID]) > 1 { - e.Evaluations[followedUserID] = e.Evaluations[followedUserID][1:] + resp := e.Evaluations[key][0] + if len(e.Evaluations[key]) > 1 { + e.Evaluations[key] = e.Evaluations[key][1:] } - e.EvaluateCalls[followedUserID] += 1 + e.EvaluateCalls[key] += 1 if resp.Error != nil { return nil, resp.Error } @@ -263,7 +332,7 @@ func newMockStaticEvaluator() *mockStaticEvaluator { return &mockStaticEvaluator{newMockEvaluator()} } -func (e *mockStaticEvaluator) Evaluate(ctx context.Context, followedUserID string) ( +func (e *mockStaticEvaluator) Evaluate(ctx context.Context, followedUserID, dataSetID string) ( []*alerts.Notification, error) { e.EvaluateCalls[followedUserID] += 1 @@ -283,20 +352,14 @@ func newAlertsMockConsumerMessage(topic string, v any) *sarama.ConsumerMessage { } } -func addLogger(ctx context.Context) (context.Context, log.Logger) { +func addLogger(ctx context.Context) (context.Context, *logtest.Logger) { GinkgoHelper() if ctx == nil { ctx = context.Background() } - lgr := newTestLogger() - return log.NewContextWithLogger(ctx, lgr), lgr -} - -func newTestLogger() log.Logger { - GinkgoHelper() lgr := logtest.NewLogger() - return lgr + return log.NewContextWithLogger(ctx, lgr), lgr } func augmentMockMongoDocs(inDocs []interface{}) []interface{} { @@ -333,7 +396,9 @@ func newTestStaticDatumMmolL(value float64) *glucose.Glucose { return &glucose.Glucose{ Blood: blood.Blood{ Base: types.Base{ - Time: pointer.FromTime(time.Now()), + UserID: pointer.FromAny(testFollowedUserID), + Time: pointer.FromTime(time.Now()), + UploadID: pointer.FromAny(testUploadID), }, Units: pointer.FromString(nontypesglucose.MmolL), Value: pointer.FromFloat64(value), @@ -496,3 +561,26 @@ func testAlertsConfigLow(userID string) *alerts.Config { }, } } + +type mockDeviceTokens struct { + Error error + Tokens [][]*devicetokens.DeviceToken +} + +func newMockDeviceTokens() *mockDeviceTokens { + return &mockDeviceTokens{ + Tokens: [][]*devicetokens.DeviceToken{}, + } +} + +func (t *mockDeviceTokens) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + if t.Error != nil { + return nil, t.Error + } + if len(t.Tokens) > 0 { + ret := t.Tokens[0] + t.Tokens = t.Tokens[1:] + return ret, nil + } + return nil, nil +} From 099a7efa6221dadaf3ffee938c31ebb7aa0b4773 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Thu, 24 Oct 2024 15:39:42 -0600 Subject: [PATCH 26/54] implement no communication alerts - UsersWithoutCommunication endpoint added to data service - UsersWithoutCommunication endpoint added to alerts client - implementing no communication alerts via the task service - evaluation of alerts conditions re-worked - The new system recognizes that some alerts are generated by events (so-called "Data Alerts") while others are polled (no communication). - The new evaluator lives in the alerts package (was data/events) - implemented tracking of sent notifications - Recording repo is implemented to record/index the time of the last received data from a user BACK-2558 --- alerts/client.go | 29 +- alerts/client_test.go | 107 +++- alerts/config.go | 300 ++++++++--- alerts/config_test.go | 395 ++++++++++---- alerts/evaluator.go | 203 +++++++ alerts/evaluator_test.go | 499 ++++++++++++++++++ alerts/tasks.go | 216 ++++++++ alerts/tasks_test.go | 273 ++++++++++ auth/store/mongo/device_tokens_repository.go | 2 +- .../mongo/device_tokens_repository_test.go | 76 +++ auth/store/mongo/store_test.go | 2 +- data/events/alerts.go | 192 +------ data/events/alerts_test.go | 305 +++++------ data/events/events.go | 11 +- data/events/recorder.go | 41 ++ data/service/api/v1/alerts.go | 42 +- data/service/api/v1/alerts_test.go | 252 ++++++++- data/service/api/v1/mocks/context.go | 21 +- .../api/v1/users_datasets_create_test.go | 7 + data/service/context.go | 1 + data/service/context/standard.go | 11 + data/service/service/standard.go | 16 +- data/store/mongo/mongo.go | 15 + data/store/mongo/mongo_alerts.go | 76 ++- data/store/mongo/mongo_data.go | 4 - data/store/mongo/mongo_datum.go | 55 -- data/store/mongo/mongo_recorder.go | 74 +++ data/store/mongo/mongo_test.go | 164 +++++- data/store/store.go | 23 +- data/store/test/data_repository.go | 7 +- log/gocommon_adapter.go | 54 -- log/sarama_test.go | 47 ++ push/logpush_test.go | 50 ++ push/push.go | 7 - push/push_test.go | 44 ++ task/carepartner.go | 18 + task/carepartner_test.go | 14 + task/service/service/service.go | 103 +++- task/service/service/service_test.go | 8 +- task/store/mongo/mongo.go | 6 + task/store/mongo/mongo_test.go | 1 - 41 files changed, 3018 insertions(+), 753 deletions(-) create mode 100644 alerts/evaluator.go create mode 100644 alerts/evaluator_test.go create mode 100644 alerts/tasks.go create mode 100644 alerts/tasks_test.go create mode 100644 auth/store/mongo/device_tokens_repository_test.go create mode 100644 data/events/recorder.go create mode 100644 data/store/mongo/mongo_recorder.go delete mode 100644 log/gocommon_adapter.go create mode 100644 log/sarama_test.go create mode 100644 push/logpush_test.go create mode 100644 task/carepartner.go create mode 100644 task/carepartner_test.go diff --git a/alerts/client.go b/alerts/client.go index 6352f8d359..96edff0d54 100644 --- a/alerts/client.go +++ b/alerts/client.go @@ -3,6 +3,7 @@ package alerts import ( "context" "net/http" + "time" "github.com/kelseyhightower/envconfig" @@ -64,12 +65,12 @@ func (c *Client) Delete(ctx context.Context, cfg *Config) error { // Get a user's alerts configuration for the followed user. func (c *Client) Get(ctx context.Context, followedUserID, userID string) (*Config, error) { url := c.client.ConstructURL("v1", "users", followedUserID, "followers", userID, "alerts") - cfg := &Config{} - err := c.request(ctx, http.MethodGet, url, nil, cfg) + config := &Config{} + err := c.request(ctx, http.MethodGet, url, nil, config) if err != nil { return nil, errors.Wrap(err, "Unable to request alerts config") } - return cfg, nil + return config, nil } // List the alerts configurations that follow the given user. @@ -80,11 +81,33 @@ func (c *Client) List(ctx context.Context, followedUserID string) ([]*Config, er configs := []*Config{} err := c.request(ctx, http.MethodGet, url, nil, &configs) if err != nil { + c.logger.Debugf("unable to request alerts configs list: %+v %T", err, err) return nil, errors.Wrap(err, "Unable to request alerts configs list") } return configs, nil } +// UsersWithoutCommunication are those that haven't communicated in some time. +// +// This method should only be called via an authenticated service session. +func (c *Client) UsersWithoutCommunication(ctx context.Context) ([]LastCommunication, error) { + url := c.client.ConstructURL("v1", "users", "without_communication") + lastComms := []LastCommunication{} + err := c.request(ctx, http.MethodGet, url, nil, &lastComms) + if err != nil { + c.logger.Debugf("getting users without communication: \"%+v\" %T", err, err) + return nil, errors.Wrap(err, "Unable to list users without communication") + } + return lastComms, nil +} + +// LastCommunication records the last time data was received from a user. +type LastCommunication struct { + UserID string `bson:"userId" json:"userId"` + DataSetID string `bson:"dataSetId" json:"dataSetId"` + LastReceivedDeviceData time.Time `bson:"lastReceivedDeviceData" json:"lastReceivedDeviceData"` +} + // ConfigLoader abstracts the method by which config values are loaded. type ConfigLoader interface { Load(*ClientConfig) error diff --git a/alerts/client_test.go b/alerts/client_test.go index fb5af7b7c6..1fe6b739bd 100644 --- a/alerts/client_test.go +++ b/alerts/client_test.go @@ -14,8 +14,14 @@ import ( "github.com/tidepool-org/platform/platform" ) +const testToken = "auth-me" +const testUserID = "test-user-id" +const testFollowedUserID = "test-followed-user-id" +const testDataSetID = "upid_000000000000" + var _ = Describe("Client", func() { - var test404Server, test200Server *httptest.Server + var test404Server *httptest.Server + var test200Server func(string) *httptest.Server BeforeEach(func() { t := GinkgoT() @@ -24,38 +30,91 @@ var _ = Describe("Client", func() { test404Server = testServer(t, func(w http.ResponseWriter, r *http.Request) { http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) }) - test200Server = testServer(t, func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - }) + test200Server = func(resp string) *httptest.Server { + return testServer(t, func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(resp)) + }) + } }) - Context("Delete", func() { - It("returns an error on non-200 responses", func() { + ItReturnsAnErrorOnNon200Responses := func(f func(context.Context, *Client) error) { + GinkgoHelper() + It("returns an error on non-200 respnoses", func() { client, ctx := newAlertsClientTest(test404Server) - err := client.Delete(ctx, &Config{}) + err := f(ctx, client) Expect(err).Should(HaveOccurred()) Expect(err).To(MatchError(ContainSubstring("resource not found"))) }) + } + + ItReturnsANilErrorOnSuccess := func(resp string, f func(context.Context, *Client) error) { + GinkgoHelper() + It("returns a nil error on success", func() { + client, ctx := newAlertsClientTest(test200Server(resp)) + err := f(ctx, client) + Expect(err).To(Succeed()) + }) + } - It("returns nil on success", func() { - client, ctx := newAlertsClientTest(test200Server) - err := client.Delete(ctx, &Config{}) - Expect(err).ShouldNot(HaveOccurred()) + Context("Delete", func() { + ItReturnsAnErrorOnNon200Responses(func(ctx context.Context, client *Client) error { + return client.Delete(ctx, &Config{}) + }) + + ItReturnsANilErrorOnSuccess("", func(ctx context.Context, client *Client) error { + return client.Delete(ctx, &Config{}) }) }) Context("Upsert", func() { - It("returns an error on non-200 responses", func() { - client, ctx := newAlertsClientTest(test404Server) - err := client.Upsert(ctx, &Config{}) - Expect(err).Should(HaveOccurred()) - Expect(err).To(MatchError(ContainSubstring("resource not found"))) + ItReturnsAnErrorOnNon200Responses(func(ctx context.Context, client *Client) error { + return client.Upsert(ctx, &Config{}) + }) + + ItReturnsANilErrorOnSuccess("", func(ctx context.Context, client *Client) error { + return client.Upsert(ctx, &Config{}) + }) + }) + + Context("Get", func() { + ItReturnsAnErrorOnNon200Responses(func(ctx context.Context, client *Client) error { + _, err := client.Get(ctx, testFollowedUserID, testUserID) + return err + }) + + ret := `{ + "userId": "14ee703f-ca9b-4a6b-9ce3-41d886514e7f", + "followedUserId": "ce5863bc-cc0b-4177-97d7-e8de0c558820", + "uploadId": "upid_00000000000000000000000000000000" + }` + ItReturnsANilErrorOnSuccess(ret, func(ctx context.Context, client *Client) error { + _, err := client.Get(ctx, testFollowedUserID, testUserID) + return err + }) + }) + + Context("List", func() { + ItReturnsAnErrorOnNon200Responses(func(ctx context.Context, client *Client) error { + _, err := client.List(ctx, "") + return err }) - It("returns nil on success", func() { - client, ctx := newAlertsClientTest(test200Server) - err := client.Upsert(ctx, &Config{}) - Expect(err).ShouldNot(HaveOccurred()) + ItReturnsANilErrorOnSuccess("[]", func(ctx context.Context, client *Client) error { + _, err := client.List(ctx, "") + return err + }) + }) + + Context("UsersWithoutCommunication", func() { + ItReturnsAnErrorOnNon200Responses(func(ctx context.Context, client *Client) error { + _, err := client.UsersWithoutCommunication(ctx) + return err + }) + + ItReturnsANilErrorOnSuccess("[]", func(ctx context.Context, client *Client) error { + _, err := client.UsersWithoutCommunication(ctx) + return err }) }) }) @@ -75,8 +134,14 @@ func newAlertsClientTest(server *httptest.Server) (*Client, context.Context) { return buildTestClient(server), contextWithNullLogger() } +func contextWithNullLoggerDeluxe() (context.Context, log.Logger) { + lgr := null.NewLogger() + return log.NewContextWithLogger(context.Background(), lgr), lgr +} + func contextWithNullLogger() context.Context { - return log.NewContextWithLogger(context.Background(), null.NewLogger()) + ctx, _ := contextWithNullLoggerDeluxe() + return ctx } func testServer(t GinkgoTInterface, handler http.HandlerFunc) *httptest.Server { diff --git a/alerts/config.go b/alerts/config.go index 1084132c36..56fcd80ca6 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -22,9 +22,8 @@ import ( // Config wraps Alerts to include user relationships. // -// As a wrapper type, Config provides a clear demarcation of what a user -// controls (Alerts) and what is set by the service (the other values in -// Config). +// As a wrapper type, Config provides a clear demarcation of what a user controls (Alerts) +// and what is set by the service (the other values in Config). type Config struct { // UserID receives the configured alerts and owns this Config. UserID string `json:"userId" bson:"userId"` @@ -36,47 +35,88 @@ type Config struct { // UploadID identifies the device dataset for which these alerts apply. UploadID string `json:"uploadId" bson:"uploadId,omitempty"` + // Alerts collects the user settings for each type of alert, and tracks their statuses. Alerts `bson:",inline,omitempty"` } -// Alerts models a user's desired alerts. +// Alerts is a wrapper to collect the user-modifiable parts of a Config. type Alerts struct { - UrgentLow *UrgentLowAlert `json:"urgentLow,omitempty" bson:"urgentLow,omitempty"` - Low *LowAlert `json:"low,omitempty" bson:"low,omitempty"` - High *HighAlert `json:"high,omitempty" bson:"high,omitempty"` - NotLooping *NotLoopingAlert `json:"notLooping,omitempty" bson:"notLooping,omitempty"` - NoCommunication *NoCommunicationAlert `json:"noCommunication,omitempty" bson:"noCommunication,omitempty"` + DataAlerts `bson:",inline,omitempty"` + *NoCommunicationAlert `bson:"noCommunication,omitempty" json:"noCommunication,omitempty"` +} + +// DataAlerts models alerts triggered by incoming data. +type DataAlerts struct { + UrgentLow *UrgentLowAlert `json:"urgentLow,omitempty" bson:"urgentLow,omitempty"` + Low *LowAlert `json:"low,omitempty" bson:"low,omitempty"` + High *HighAlert `json:"high,omitempty" bson:"high,omitempty"` + NotLooping *NotLoopingAlert `json:"notLooping,omitempty" bson:"notLooping,omitempty"` } func (c Config) Validate(validator structure.Validator) { validator.String("userID", &c.UserID).Using(user.IDValidator) validator.String("followedUserID", &c.FollowedUserID).Using(user.IDValidator) validator.String("uploadID", &c.UploadID).Exists().Using(data.SetIDValidator) - c.Alerts.Validate(validator) + c.DataAlerts.Validate(validator) + if c.NoCommunicationAlert != nil { + c.NoCommunicationAlert.Validate(validator) + } } -// Evaluate alerts in the context of the provided data. +// EvaluateData alerts in the context of the provided data. // -// While this method or the methods it calls can fail, there's no point in returning an +// While this method, or the methods it calls, can fail, there's no point in returning an // error. Instead errors are logged before continuing. This is to ensure that any possible // alert that should be triggered, will be triggered. -func (c Config) Evaluate(ctx context.Context, - gd []*glucose.Glucose, dd []*dosingdecision.DosingDecision) *Notification { +func (c *Config) EvaluateData(ctx context.Context, gd []*glucose.Glucose, + dd []*dosingdecision.DosingDecision) (*NotificationWithHook, bool) { - notification := c.Alerts.Evaluate(ctx, gd, dd) + notification, changed := c.DataAlerts.Evaluate(ctx, gd, dd) if notification != nil { notification.FollowedUserID = c.FollowedUserID notification.RecipientUserID = c.UserID } - if lgr := log.LoggerFromContext(ctx); lgr != nil { - lgr.WithField("notification", notification).Info("evaluated alert") + + return notification, changed +} + +// SentFunc allows [Activity] to be updated in response to a notification being sent. +type SentFunc func(time.Time) + +// NotificationWithHook wraps a Notification with a SentFunc. +// +// This separates the responsibilities of the individual alerts (e.g. [LowAlert]), which +// create notifications and track when those notifications were sent, from those types which +// trigger the alerts, (e.g. task service's CarePartnerRunner, or data/events' Kafka +// connector). +type NotificationWithHook struct { + Sent SentFunc + *Notification +} + +func (c *Config) EvaluateNoCommunication(ctx context.Context, last time.Time) ( + *NotificationWithHook, bool) { + + if c.NoCommunicationAlert == nil { + return nil, false } - return notification + lgr := log.LoggerFromContext(ctx).WithFields(log.Fields{ + "UserID": c.UserID, + "DataSetID": c.UploadID, + "FollowedUserID": c.FollowedUserID, + }) + ctx = log.NewContextWithLogger(ctx, lgr) + notification, changed := c.NoCommunicationAlert.Evaluate(ctx, last) + if notification != nil { + notification.FollowedUserID = c.FollowedUserID + notification.RecipientUserID = c.UserID + } + return notification, changed } // LongestDelay of the delays set on enabled alerts. -func (a Alerts) LongestDelay() time.Duration { +func (a DataAlerts) LongestDelay() time.Duration { delays := []time.Duration{} if a.Low != nil && a.Low.Enabled { delays = append(delays, a.Low.Delay.Duration()) @@ -87,16 +127,13 @@ func (a Alerts) LongestDelay() time.Duration { if a.NotLooping != nil && a.NotLooping.Enabled { delays = append(delays, a.NotLooping.Delay.Duration()) } - if a.NoCommunication != nil && a.NoCommunication.Enabled { - delays = append(delays, a.NoCommunication.Delay.Duration()) - } if len(delays) == 0 { return 0 } return slices.Max(delays) } -func (a Alerts) Validate(validator structure.Validator) { +func (a DataAlerts) Validate(validator structure.Validator) { if a.UrgentLow != nil { a.UrgentLow.Validate(validator) } @@ -109,44 +146,45 @@ func (a Alerts) Validate(validator structure.Validator) { if a.NotLooping != nil { a.NotLooping.Validate(validator) } - if a.NoCommunication != nil { - a.NoCommunication.Validate(validator) - } } -// Evaluate a user's data to determine if notifications are indicated. +// Evaluate to determine if notifications are indicated. // -// Evaluations are performed according to priority. The process is -// "short-circuited" at the first indicated notification. -func (a Alerts) Evaluate(ctx context.Context, - gd []*glucose.Glucose, dd []*dosingdecision.DosingDecision) *Notification { - - if a.NoCommunication != nil && a.NoCommunication.Enabled { - if n := a.NoCommunication.Evaluate(ctx, gd); n != nil { - return n - } - } +// Evaluations are performed according to priority. The process is "short-circuited" at the +// first indicated notification. +func (a DataAlerts) Evaluate(ctx context.Context, + gd []*glucose.Glucose, dd []*dosingdecision.DosingDecision) (*NotificationWithHook, bool) { + + changed := false if a.UrgentLow != nil && a.UrgentLow.Enabled { - if n := a.UrgentLow.Evaluate(ctx, gd); n != nil { - return n + if n, c := a.UrgentLow.Evaluate(ctx, gd); n != nil { + return n, c + } else { + changed = changed || c } } if a.Low != nil && a.Low.Enabled { - if n := a.Low.Evaluate(ctx, gd); n != nil { - return n + if n, c := a.Low.Evaluate(ctx, gd); n != nil { + return n, changed || c + } else { + changed = changed || c } } if a.High != nil && a.High.Enabled { - if n := a.High.Evaluate(ctx, gd); n != nil { - return n + if n, c := a.High.Evaluate(ctx, gd); n != nil { + return n, changed || c + } else { + changed = changed || c } } if a.NotLooping != nil && a.NotLooping.Enabled { - if n := a.NotLooping.Evaluate(ctx, dd); n != nil { - return n + if n, c := a.NotLooping.Evaluate(ctx, dd); n != nil { + return n, changed || c + } else { + changed = changed || c } } - return nil + return nil, changed } // Base describes the minimum specifics of a desired alert. @@ -155,7 +193,7 @@ type Base struct { Enabled bool `json:"enabled" bson:"enabled"` // Activity tracks when events related to the alert occurred. - Activity `json:"-" bson:"activity,omitempty"` + Activity `json:"activity" bson:"activity,omitempty"` } func (b Base) Validate(validator structure.Validator) { @@ -169,6 +207,22 @@ func (b Base) Evaluate(ctx context.Context, data []*glucose.Glucose) *Notificati return nil } +// withHook wraps a *Notification with a SentFunc that updates its Sent. +func (b *Base) withHook(n *Notification) *NotificationWithHook { + if n == nil { + return nil + } + return &NotificationWithHook{ + Notification: n, + Sent: func(at time.Time) { + if at.Before(b.Activity.Sent) { + return + } + b.Activity.Sent = at + }, + } +} + type Activity struct { // Triggered records the last time this alert was triggered. Triggered time.Time `json:"triggered" bson:"triggered"` @@ -223,32 +277,38 @@ func (a UrgentLowAlert) Validate(validator structure.Validator) { // Evaluate urgent low condition. // // Assumes data is pre-sorted in descending order by Time. -func (a *UrgentLowAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (notification *Notification) { +func (a *UrgentLowAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) ( + notification *NotificationWithHook, _ bool) { + lgr := log.LoggerFromContext(ctx) if len(data) == 0 { lgr.Debug("no data to evaluate for urgent low") - return nil + return nil, false } datum := data[0] okDatum, okThreshold, err := validateGlucoseAlertDatum(datum, a.Threshold) if err != nil { lgr.WithError(err).Warn("Unable to evaluate urgent low") - return nil + return nil, false } defer func() { logGlucoseAlertEvaluation(lgr, "urgent low", notification, okDatum, okThreshold) }() active := okDatum < okThreshold + changed := false if !active { if a.IsActive() { a.Resolved = time.Now() + changed = true } - return nil + return nil, changed } if !a.IsActive() { a.Triggered = time.Now() + changed = true } - return &Notification{Message: genGlucoseThresholdMessage("below urgent low")} + n := &Notification{Message: genGlucoseThresholdMessage("below urgent low")} + return a.withHook(n), changed } func validateGlucoseAlertDatum(datum *glucose.Glucose, t Threshold) (float64, float64, error) { @@ -275,18 +335,27 @@ func (a NotLoopingAlert) Validate(validator structure.Validator) { } // Evaluate if the device is looping. -func (a NotLoopingAlert) Evaluate(ctx context.Context, decisions []*dosingdecision.DosingDecision) (notifcation *Notification) { +func (a NotLoopingAlert) Evaluate(ctx context.Context, + decisions []*dosingdecision.DosingDecision) ( + notifcation *NotificationWithHook, _ bool) { + // TODO will be implemented in the near future. - return nil + return nil, false } -// DosingDecisionReasonLoop is specified in a [dosingdecision.DosingDecision] to indicate that -// the decision is part of a loop adjustment (as opposed to bolus or something else). +// DosingDecisionReasonLoop is specified in a [dosingdecision.DosingDecision] to indicate +// that the decision is part of a loop adjustment (as opposed to bolus or something else). const DosingDecisionReasonLoop string = "loop" -// NoCommunicationAlert extends Base with a delay. +// NoCommunicationAlert is configured to send notifications when no data is received. +// +// It differs fundamentally from DataAlerts in that it is polled instead of being triggered +// when data is received. type NoCommunicationAlert struct { - Base `bson:",inline"` + Base `bson:",inline"` + // Delay represents the time after which a No Communication alert should be sent. + // + // A value of 0 is the default, and is treated as five minutes. Delay DurationMinutes `json:"delay,omitempty"` } @@ -296,22 +365,50 @@ func (a NoCommunicationAlert) Validate(validator structure.Validator) { validator.Duration("delay", &dur).InRange(0, 6*time.Hour) } -// Evaluate if CGM data is being received by Tidepool. -// -// Assumes data is pre-sorted by Time in descending order. -func (a NoCommunicationAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) *Notification { - var newest time.Time - for _, d := range data { - if d != nil && d.Time != nil && !(*d.Time).IsZero() { - newest = *d.Time - break +// Evaluate if the time since data was last received warrants a notification. +func (a *NoCommunicationAlert) Evaluate(ctx context.Context, + lastReceived time.Time) (_ *NotificationWithHook, changed bool) { + + lgr := log.LoggerFromContext(ctx) + if lastReceived.IsZero() { + err := errors.Newf("Unable to evaluate no communication: time is Zero") + lgr.WithError(err).Debug("Unable to evaluate no communication") + return nil, false + } + + defer func() { + logNoCommunicationEvaluation(lgr, changed, a.IsActive()) + }() + delay := DefaultNoCommunicationDelay + if a.Delay.Duration() > 0 { + delay = a.Delay.Duration() + } + + if time.Since(lastReceived) < delay { + if a.IsActive() { + a.Resolved = time.Now() + return nil, true } + return nil, false } - if time.Since(newest) > a.Delay.Duration() { - return &Notification{Message: NoCommunicationMessage} + if !a.IsActive() { + a.Triggered = time.Now() + return a.withHook(&Notification{Message: NoCommunicationMessage}), true + } + if time.Since(a.Sent) > DefaultNoCommunicationDelay { + return a.withHook(&Notification{Message: NoCommunicationMessage}), false } + return nil, false +} - return nil +const DefaultNoCommunicationDelay = 5 * time.Minute + +func logNoCommunicationEvaluation(lgr log.Logger, changed, isAlerting bool) { + fields := log.Fields{ + "changed": changed, + "isAlerting?": isAlerting, + } + lgr.WithFields(fields).Info("no communication") } const NoCommunicationMessage = "Tidepool is unable to communicate with a user's device" @@ -341,16 +438,20 @@ func (a LowAlert) Validate(validator structure.Validator) { // Evaluate the given data to determine if an alert should be sent. // // Assumes data is pre-sorted in descending order by Time. -func (a *LowAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (notification *Notification) { +func (a *LowAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) ( + notification *NotificationWithHook, _ bool) { + lgr := log.LoggerFromContext(ctx) if len(data) == 0 { lgr.Debug("no data to evaluate for low") - return nil + return nil, false } var eventBegan time.Time var okDatum, okThreshold float64 var err error - defer func() { logGlucoseAlertEvaluation(lgr, "low", notification, okDatum, okThreshold) }() + defer func() { + logGlucoseAlertEvaluation(lgr, "low", notification, okDatum, okThreshold) + }() for _, datum := range data { okDatum, okThreshold, err = validateGlucoseAlertDatum(datum, a.Threshold) if err != nil { @@ -365,18 +466,22 @@ func (a *LowAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (notif eventBegan = *datum.Time } } + changed := false if eventBegan.IsZero() { if a.IsActive() { a.Resolved = time.Now() + changed = true } - return nil + return nil, changed } if !a.IsActive() { if time.Since(eventBegan) > a.Delay.Duration() { a.Triggered = time.Now() + changed = true } } - return &Notification{Message: genGlucoseThresholdMessage("below low")} + n := &Notification{Message: genGlucoseThresholdMessage("below low")} + return a.withHook(n), changed } func genGlucoseThresholdMessage(alertType string) string { @@ -408,16 +513,20 @@ func (a HighAlert) Validate(validator structure.Validator) { // Evaluate the given data to determine if an alert should be sent. // // Assumes data is pre-sorted in descending order by Time. -func (a *HighAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (notification *Notification) { +func (a *HighAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) ( + notification *NotificationWithHook, _ bool) { + lgr := log.LoggerFromContext(ctx) if len(data) == 0 { lgr.Debug("no data to evaluate for high") - return nil + return nil, false } var eventBegan time.Time var okDatum, okThreshold float64 var err error - defer func() { logGlucoseAlertEvaluation(lgr, "high", notification, okDatum, okThreshold) }() + defer func() { + logGlucoseAlertEvaluation(lgr, "high", notification, okDatum, okThreshold) + }() for _, datum := range data { okDatum, okThreshold, err = validateGlucoseAlertDatum(datum, a.Threshold) if err != nil { @@ -432,23 +541,28 @@ func (a *HighAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (noti eventBegan = *datum.Time } } + changed := false if eventBegan.IsZero() { if a.IsActive() { a.Resolved = time.Now() + changed = true } - return nil + return nil, changed } if !a.IsActive() { if time.Since(eventBegan) > a.Delay.Duration() { a.Triggered = time.Now() + changed = true } } - return &Notification{Message: genGlucoseThresholdMessage("above high")} + n := &Notification{Message: genGlucoseThresholdMessage("above high")} + return a.withHook(n), changed } -// logGlucoseAlertEvaluation is called during each glucose-based evaluation for record-keeping. -func logGlucoseAlertEvaluation(lgr log.Logger, alertType string, notification *Notification, - value, threshold float64) { +// logGlucoseAlertEvaluation is called during each glucose-based evaluation for +// record-keeping. +func logGlucoseAlertEvaluation(lgr log.Logger, alertType string, + notification *NotificationWithHook, value, threshold float64) { fields := log.Fields{ "isAlerting?": notification != nil, @@ -499,10 +613,10 @@ type Threshold ValueWithUnits // Validate implements structure.Validatable func (t Threshold) Validate(v structure.Validator) { v.String("units", &t.Units).OneOf(nontypesglucose.MgdL, nontypesglucose.MmolL) - // This is a sanity check. Client software will likely further constrain these values. The - // broadness of these values allows clients to change their own min and max values - // independently, and it sidesteps rounding and conversion conflicts between the backend and - // clients. + // This is a sanity check. Client software will likely further constrain these + // values. The broadness of these values allows clients to change their own min and max + // values independently, and it sidesteps rounding and conversion conflicts between the + // backend and clients. var max, min float64 switch t.Units { case nontypesglucose.MgdL, nontypesglucose.Mgdl: @@ -518,7 +632,7 @@ func (t Threshold) Validate(v structure.Validator) { } } -// Repository abstracts persistent storage for Config data. +// Repository abstracts persistent storage in the alerts collection for Config data. type Repository interface { Get(ctx context.Context, conf *Config) (*Config, error) Upsert(ctx context.Context, conf *Config) error @@ -535,3 +649,13 @@ type Notification struct { RecipientUserID string FollowedUserID string } + +// RecordsRepository encapsulates queries of the records collection for use with alerts. +type RecordsRepository interface { + // RecordReceivedDeviceData upserts the time of last communication from a user. + RecordReceivedDeviceData(context.Context, LastCommunication) error + // UsersWithoutCommunication lists those users that haven't communicated for a time. + UsersWithoutCommunication(context.Context) ([]LastCommunication, error) + + EnsureIndexes() error +} diff --git a/alerts/config_test.go b/alerts/config_test.go index 6ded948bb2..fcb2e972d3 100644 --- a/alerts/config_test.go +++ b/alerts/config_test.go @@ -28,9 +28,9 @@ func TestSuite(t *testing.T) { } const ( - mockUserID1 = "008c7f79-6545-4466-95fb-34e3ba728d38" - mockUserID2 = "b1880201-30d5-4190-92bb-6afcf08ca15e" - mockUploadID = "4d3b1abc280511ef9f41abf13a093b64" + mockUserID1 = "008c7f79-6545-4466-95fb-34e3ba728d38" + mockUserID2 = "b1880201-30d5-4190-92bb-6afcf08ca15e" + mockDataSetID = "4d3b1abc280511ef9f41abf13a093b64" ) var _ = Describe("Config", func() { @@ -72,13 +72,13 @@ var _ = Describe("Config", func() { "enabled": true, "delay": 6 } -}`, mockUserID1, mockUserID2, mockUploadID) +}`, mockUserID1, mockUserID2, mockDataSetID) conf := &Config{} err := request.DecodeObject(context.Background(), nil, buf, conf) Expect(err).ToNot(HaveOccurred()) Expect(conf.UserID).To(Equal(mockUserID1)) Expect(conf.FollowedUserID).To(Equal(mockUserID2)) - Expect(conf.UploadID).To(Equal(mockUploadID)) + Expect(conf.UploadID).To(Equal(mockDataSetID)) Expect(conf.High.Enabled).To(Equal(false)) Expect(conf.High.Repeat).To(Equal(DurationMinutes(30 * time.Minute))) Expect(conf.High.Delay).To(Equal(DurationMinutes(5 * time.Minute))) @@ -94,8 +94,8 @@ var _ = Describe("Config", func() { Expect(conf.UrgentLow.Threshold.Units).To(Equal(nontypesglucose.MgdL)) Expect(conf.NotLooping.Enabled).To(Equal(true)) Expect(conf.NotLooping.Delay).To(Equal(DurationMinutes(4 * time.Minute))) - Expect(conf.NoCommunication.Enabled).To(Equal(true)) - Expect(conf.NoCommunication.Delay).To(Equal(DurationMinutes(6 * time.Minute))) + // Expect(conf.NoCommunication.Enabled).To(Equal(true)) + // Expect(conf.NoCommunication.Delay).To(Equal(DurationMinutes(6 * time.Minute))) }) Context("validations", func() { @@ -103,7 +103,7 @@ var _ = Describe("Config", func() { return Config{ UserID: mockUserID1, FollowedUserID: mockUserID2, - UploadID: mockUploadID, + UploadID: mockDataSetID, } } @@ -132,7 +132,7 @@ var _ = Describe("Config", func() { }) }) - Describe("Evaluate", func() { + Describe("EvaluateData", func() { Context("when a notification is returned", func() { It("injects the userIDs", func() { ctx := contextWithTestLogger() @@ -151,17 +151,45 @@ var _ = Describe("Config", func() { UserID: mockUserID1, FollowedUserID: mockUserID2, Alerts: Alerts{ - UrgentLow: &UrgentLowAlert{ - Base: Base{Enabled: true}, - Threshold: Threshold{ - Value: 10, - Units: nontypesglucose.MmolL, + DataAlerts: DataAlerts{ + UrgentLow: &UrgentLowAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 10, + Units: nontypesglucose.MmolL, + }, + }, + }, + }, + } + + notification, _ := conf.EvaluateData(ctx, mockGlucoseData, nil) + + Expect(notification).ToNot(BeNil()) + Expect(notification.RecipientUserID).To(Equal(mockUserID1)) + Expect(notification.FollowedUserID).To(Equal(mockUserID2)) + }) + }) + }) + + Describe("EvaluateNoCommunication", func() { + Context("when a notification is returned", func() { + It("injects the userIDs", func() { + ctx := contextWithTestLogger() + conf := Config{ + UserID: mockUserID1, + FollowedUserID: mockUserID2, + Alerts: Alerts{ + NoCommunicationAlert: &NoCommunicationAlert{ + Base: Base{ + Enabled: true, }, }, }, } - notification := conf.Evaluate(ctx, mockGlucoseData, nil) + when := time.Now().Add(-time.Second + -DefaultNoCommunicationDelay) + notification, _ := conf.EvaluateNoCommunication(ctx, when) Expect(notification).ToNot(BeNil()) Expect(notification.RecipientUserID).To(Equal(mockUserID1)) @@ -218,6 +246,60 @@ var _ = Describe("Config", func() { }) }) + Context("DataAlerts", func() { + Describe("Evaluate", func() { + var ctxAndData = func() (context.Context, *DataAlerts) { + return contextWithTestLogger(), &DataAlerts{ + UrgentLow: testUrgentLowAlert(), + Low: testLowAlert(), + High: testHighAlert(), + } + } + + It("ripples changed value (from urgent low)", func() { + ctx, dataAlerts := ctxAndData() + + // Generate an urgent low notification. + notification, changed := dataAlerts.Evaluate(ctx, []*glucose.Glucose{testUrgentLowDatum}, nil) + Expect(notification).ToNot(BeNil()) + Expect(changed).To(Equal(true)) + // Now resolve the alert, resulting in changed being true, but without a + // notification. + notification, changed = dataAlerts.Evaluate(ctx, []*glucose.Glucose{testInRangeDatum}, nil) + Expect(notification).To(BeNil()) + Expect(changed).To(Equal(true)) + }) + + It("ripples changed value (from low)", func() { + ctx, dataAlerts := ctxAndData() + + // Generate a low notification. + notification, changed := dataAlerts.Evaluate(ctx, []*glucose.Glucose{testLowDatum}, nil) + Expect(notification).ToNot(BeNil()) + Expect(changed).To(Equal(true)) + // Now resolve the alert, resulting in changed being true, but without a + // notification. + notification, changed = dataAlerts.Evaluate(ctx, []*glucose.Glucose{testInRangeDatum}, nil) + Expect(notification).To(BeNil()) + Expect(changed).To(Equal(true)) + }) + + It("ripples changed value (form high)", func() { + ctx, dataAlerts := ctxAndData() + + // Generate a high notification. + notification, changed := dataAlerts.Evaluate(ctx, []*glucose.Glucose{testHighDatum}, nil) + Expect(notification).ToNot(BeNil()) + Expect(changed).To(Equal(true)) + // Now resolve the alert, resulting in changed being true, but without a + // notification. + notification, changed = dataAlerts.Evaluate(ctx, []*glucose.Glucose{testInRangeDatum}, nil) + Expect(notification).To(BeNil()) + Expect(changed).To(Equal(true)) + }) + }) + }) + var testGlucoseDatum = func(v float64) *glucose.Glucose { return &glucose.Glucose{ Blood: blood.Blood{ @@ -267,16 +349,16 @@ var _ = Describe("Config", func() { It("handles being passed empty data", func() { ctx := contextWithTestLogger() - var notification *Notification + var notification *NotificationWithHook alert := testUrgentLow() Expect(func() { - notification = alert.Evaluate(ctx, []*glucose.Glucose{}) + notification, _ = alert.Evaluate(ctx, []*glucose.Glucose{}) }).ToNot(Panic()) Expect(notification).To(BeNil()) Expect(func() { - notification = alert.Evaluate(ctx, nil) + notification, _ = alert.Evaluate(ctx, nil) }).ToNot(Panic()) Expect(notification).To(BeNil()) }) @@ -356,31 +438,31 @@ var _ = Describe("Config", func() { It("validates glucose data", func() { ctx := contextWithTestLogger() - var notification *Notification + var notification *NotificationWithHook Expect(func() { - notification = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1)}) + notification, _ = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1)}) }).ToNot(Panic()) Expect(notification).ToNot(BeNil()) badUnits := testGlucoseDatum(1) badUnits.Units = nil Expect(func() { - notification = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badUnits}) + notification, _ = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badUnits}) }).ToNot(Panic()) Expect(notification).To(BeNil()) badValue := testGlucoseDatum(1) badValue.Value = nil Expect(func() { - notification = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badValue}) + notification, _ = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badValue}) }).ToNot(Panic()) Expect(notification).To(BeNil()) badTime := testGlucoseDatum(1) badTime.Time = nil Expect(func() { - notification = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badTime}) + notification, _ = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badTime}) }).ToNot(Panic()) Expect(notification).To(BeNil()) @@ -388,6 +470,152 @@ var _ = Describe("Config", func() { }) }) + Context("NoCommunicationAlert", func() { + Context("Evaluate", func() { + testNoCommunication := func() *NoCommunicationAlert { + return &NoCommunicationAlert{} + } + + It("handles being passed a Zero time.Time value", func() { + ctx := contextWithTestLogger() + + alert := testNoCommunication() + + Expect(func() { + alert.Evaluate(ctx, time.Time{}) + }).ToNot(Panic()) + }) + + It("logs evaluation results", func() { + ctx := contextWithTestLogger() + alert := testNoCommunication() + + Expect(func() { + alert.Evaluate(ctx, time.Now().Add(-12*time.Hour)) + }).ToNot(Panic()) + Expect(func() { + lgr := log.LoggerFromContext(ctx).(*logTest.Logger) + lgr.AssertLog(log.InfoLevel, "no communication", log.Fields{ + "changed": true, + "isAlerting?": true, + }) + }).ToNot(Panic()) + }) + + It("honors non-Zero Delay values", func() { + ctx := contextWithTestLogger() + wontTrigger := time.Now().Add(-6 * time.Minute) + willTrigger := time.Now().Add(-12 * time.Hour) + + alert := testNoCommunication() + alert.Delay = DurationMinutes(10 * time.Minute) + + Expect(func() { + alert.Evaluate(ctx, wontTrigger) + }).ToNot(Panic()) + Expect(alert.IsActive()).To(Equal(false)) + Expect(func() { + alert.Evaluate(ctx, willTrigger) + }).ToNot(Panic()) + Expect(alert.IsActive()).To(Equal(true)) + }) + + Context("when currently active", func() { + It("marks itself resolved", func() { + ctx := contextWithTestLogger() + willTrigger := time.Now().Add(-12 * time.Hour) + willResolve := time.Now() + + alert := testNoCommunication() + + Expect(func() { + alert.Evaluate(ctx, willTrigger) + }).ToNot(Panic()) + Expect(alert.Resolved).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, willResolve) + }).ToNot(Panic()) + Expect(alert.Resolved).ToNot(BeZero()) + }) + + It("doesn't re-send before delay", func() { + ctx := contextWithTestLogger() + willTrigger := time.Now().Add(-12 * time.Hour) + + alert := testNoCommunication() + + notification, _ := alert.Evaluate(ctx, willTrigger) + Expect(notification).ToNot(BeNil()) + sentAt := time.Now() + notification.Sent(sentAt) + Expect(alert.Sent).ToNot(BeZero()) + + notification, _ = alert.Evaluate(ctx, willTrigger) + Expect(notification).To(BeNil()) + Expect(alert.Sent).To(BeTemporally("~", sentAt)) + }) + }) + + Context("when currently INactive", func() { + It("doesn't re-mark itself resolved", func() { + ctx := contextWithTestLogger() + willTrigger := time.Now().Add(-12 * time.Hour) + willResolve := time.Now() + + alert := testNoCommunication() + + Expect(func() { + alert.Evaluate(ctx, willTrigger) + }).ToNot(Panic()) + Expect(alert.Resolved).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, willResolve) + }).ToNot(Panic()) + Expect(alert.Resolved).ToNot(BeZero()) + was := alert.Resolved + Expect(func() { + alert.Evaluate(ctx, willTrigger) + }).ToNot(Panic()) + Expect(alert.Resolved).To(Equal(was)) + }) + }) + + It("marks itself triggered", func() { + ctx := contextWithTestLogger() + willTrigger := time.Now().Add(-10*time.Minute + -DefaultNoCommunicationDelay) + willResolve := time.Now() + + alert := testNoCommunication() + + Expect(func() { + alert.Evaluate(ctx, willResolve) + }).ToNot(Panic()) + Expect(alert.Triggered).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, willTrigger) + }).ToNot(Panic()) + Expect(alert.Triggered).ToNot(BeZero()) + }) + + It("validates the time at which data was last received", func() { + ctx := contextWithTestLogger() + validLastReceived := time.Now().Add(-10*time.Minute + -DefaultNoCommunicationDelay) + invalidLastReceived := time.Time{} + var notification *NotificationWithHook + + Expect(func() { + notification, _ = testNoCommunication().Evaluate(ctx, validLastReceived) + }).ToNot(Panic()) + Expect(notification).ToNot(BeNil()) + + Expect(func() { + notification, _ = testNoCommunication().Evaluate(ctx, invalidLastReceived) + }).ToNot(Panic()) + Expect(notification).To(BeNil()) + }) + }) + }) + Context("LowAlert", func() { Context("Threshold", func() { It("accepts values in mmol/L", func() { @@ -458,16 +686,16 @@ var _ = Describe("Config", func() { It("handles being passed empty data", func() { ctx := contextWithTestLogger() - var notification *Notification + var notification *NotificationWithHook alert := testLow() Expect(func() { - notification = alert.Evaluate(ctx, []*glucose.Glucose{}) + notification, _ = alert.Evaluate(ctx, []*glucose.Glucose{}) }).ToNot(Panic()) Expect(notification).To(BeNil()) Expect(func() { - notification = alert.Evaluate(ctx, nil) + notification, _ = alert.Evaluate(ctx, nil) }).ToNot(Panic()) Expect(notification).To(BeNil()) }) @@ -547,31 +775,31 @@ var _ = Describe("Config", func() { It("validates glucose data", func() { ctx := contextWithTestLogger() - var notification *Notification + var notification *NotificationWithHook Expect(func() { - notification = testLow().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1)}) + notification, _ = testLow().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1)}) }).ToNot(Panic()) Expect(notification).ToNot(BeNil()) badUnits := testGlucoseDatum(1) badUnits.Units = nil Expect(func() { - notification = testLow().Evaluate(ctx, []*glucose.Glucose{badUnits}) + notification, _ = testLow().Evaluate(ctx, []*glucose.Glucose{badUnits}) }).ToNot(Panic()) Expect(notification).To(BeNil()) badValue := testGlucoseDatum(1) badValue.Value = nil Expect(func() { - notification = testLow().Evaluate(ctx, []*glucose.Glucose{badValue}) + notification, _ = testLow().Evaluate(ctx, []*glucose.Glucose{badValue}) }).ToNot(Panic()) Expect(notification).To(BeNil()) badTime := testGlucoseDatum(1) badTime.Time = nil Expect(func() { - notification = testLow().Evaluate(ctx, []*glucose.Glucose{badTime}) + notification, _ = testLow().Evaluate(ctx, []*glucose.Glucose{badTime}) }).ToNot(Panic()) Expect(notification).To(BeNil()) }) @@ -641,16 +869,16 @@ var _ = Describe("Config", func() { It("handles being passed empty data", func() { ctx := contextWithTestLogger() - var notification *Notification + var notification *NotificationWithHook alert := testHigh() Expect(func() { - notification = alert.Evaluate(ctx, []*glucose.Glucose{}) + notification, _ = alert.Evaluate(ctx, []*glucose.Glucose{}) }).ToNot(Panic()) Expect(notification).To(BeNil()) Expect(func() { - notification = alert.Evaluate(ctx, nil) + notification, _ = alert.Evaluate(ctx, nil) }).ToNot(Panic()) Expect(notification).To(BeNil()) }) @@ -730,31 +958,31 @@ var _ = Describe("Config", func() { It("validates glucose data", func() { ctx := contextWithTestLogger() - var notification *Notification + var notification *NotificationWithHook Expect(func() { - notification = testHigh().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(21)}) + notification, _ = testHigh().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(21)}) }).ToNot(Panic()) Expect(notification).ToNot(BeNil()) badUnits := testGlucoseDatum(1) badUnits.Units = nil Expect(func() { - notification = testHigh().Evaluate(ctx, []*glucose.Glucose{badUnits}) + notification, _ = testHigh().Evaluate(ctx, []*glucose.Glucose{badUnits}) }).ToNot(Panic()) Expect(notification).To(BeNil()) badValue := testGlucoseDatum(1) badValue.Value = nil Expect(func() { - notification = testHigh().Evaluate(ctx, []*glucose.Glucose{badValue}) + notification, _ = testHigh().Evaluate(ctx, []*glucose.Glucose{badValue}) }).ToNot(Panic()) Expect(notification).To(BeNil()) badTime := testGlucoseDatum(1) badTime.Time = nil Expect(func() { - notification = testHigh().Evaluate(ctx, []*glucose.Glucose{badTime}) + notification, _ = testHigh().Evaluate(ctx, []*glucose.Glucose{badTime}) }).ToNot(Panic()) Expect(notification).To(BeNil()) }) @@ -877,7 +1105,7 @@ var _ = Describe("Config", func() { "value": 80 } } -}`, mockUserID1, mockUserID2, mockUploadID) +}`, mockUserID1, mockUserID2, mockDataSetID) conf := &Config{} err := request.DecodeObject(context.Background(), nil, buf, conf) Expect(err).To(Succeed()) @@ -897,7 +1125,7 @@ var _ = Describe("Config", func() { "value": 47.5 } } -}`, mockUserID1, mockUserID2, mockUploadID, nontypesglucose.MgdL) +}`, mockUserID1, mockUserID2, mockDataSetID, nontypesglucose.MgdL) cfg := &Config{} err := request.DecodeObject(context.Background(), nil, buf, cfg) Expect(err).To(MatchError("value -11m0s is not greater than or equal to 15m0s")) @@ -915,7 +1143,7 @@ var _ = Describe("Config", func() { "value": 1 } } -}`, mockUserID1, mockUserID2, mockUploadID, nontypesglucose.MgdL) +}`, mockUserID1, mockUserID2, mockDataSetID, nontypesglucose.MgdL) cfg := &Config{} err := request.DecodeObject(context.Background(), nil, buf, cfg) Expect(err).To(MatchError("json is malformed")) @@ -923,11 +1151,6 @@ var _ = Describe("Config", func() { }) var ( - testNoCommunicationAlert = func() *NoCommunicationAlert { - return &NoCommunicationAlert{ - Base: Base{Enabled: true}, - } - } testLowAlert = func() *LowAlert { return &LowAlert{ Base: Base{Enabled: true}, @@ -960,7 +1183,7 @@ var ( Base: Base{Enabled: true}, } } - testNoCommunicationDatum = &glucose.Glucose{ + testHighDatum = &glucose.Glucose{ Blood: blood.Blood{ Base: types.Base{ Time: pointer.FromAny(time.Now()), @@ -969,31 +1192,31 @@ var ( Value: pointer.FromAny(11.0), }, } - testHighDatum = &glucose.Glucose{ + testLowDatum = &glucose.Glucose{ Blood: blood.Blood{ Base: types.Base{ Time: pointer.FromAny(time.Now()), }, Units: pointer.FromAny(nontypesglucose.MmolL), - Value: pointer.FromAny(11.0), + Value: pointer.FromAny(3.9), }, } - testLowDatum = &glucose.Glucose{ + testUrgentLowDatum = &glucose.Glucose{ Blood: blood.Blood{ Base: types.Base{ Time: pointer.FromAny(time.Now()), }, Units: pointer.FromAny(nontypesglucose.MmolL), - Value: pointer.FromAny(3.9), + Value: pointer.FromAny(2.9), }, } - testUrgentLowDatum = &glucose.Glucose{ + testInRangeDatum = &glucose.Glucose{ Blood: blood.Blood{ Base: types.Base{ Time: pointer.FromAny(time.Now()), }, Units: pointer.FromAny(nontypesglucose.MmolL), - Value: pointer.FromAny(2.9), + Value: pointer.FromAny(6.0), }, } ) @@ -1001,20 +1224,17 @@ var ( var _ = Describe("Alerts", func() { Describe("LongestDelay", func() { It("does what it says", func() { - noComm := testNoCommunicationAlert() - noComm.Delay = DurationMinutes(10 * time.Minute) low := testLowAlert() - low.Delay = DurationMinutes(5 * time.Minute) + low.Delay = DurationMinutes(10 * time.Minute) high := testHighAlert() high.Delay = DurationMinutes(5 * time.Minute) notLooping := testNotLoopingAlert() notLooping.Delay = DurationMinutes(5 * time.Minute) - a := Alerts{ - NoCommunication: noComm, - Low: low, - High: high, - NotLooping: notLooping, + a := DataAlerts{ + Low: low, + High: high, + NotLooping: notLooping, } delay := a.LongestDelay() @@ -1023,9 +1243,6 @@ var _ = Describe("Alerts", func() { }) It("ignores disabled alerts", func() { - noComm := testNoCommunicationAlert() - noComm.Delay = DurationMinutes(10 * time.Minute) - noComm.Enabled = false low := testLowAlert() low.Delay = DurationMinutes(7 * time.Minute) high := testHighAlert() @@ -1033,11 +1250,10 @@ var _ = Describe("Alerts", func() { notLooping := testNotLoopingAlert() notLooping.Delay = DurationMinutes(5 * time.Minute) - a := Alerts{ - NoCommunication: noComm, - Low: low, - High: high, - NotLooping: notLooping, + a := DataAlerts{ + Low: low, + High: high, + NotLooping: notLooping, } delay := a.LongestDelay() @@ -1046,11 +1262,10 @@ var _ = Describe("Alerts", func() { }) It("returns a Zero Duration when no alerts are set", func() { - a := Alerts{ - NoCommunication: nil, - Low: nil, - High: nil, - NotLooping: nil, + a := DataAlerts{ + Low: nil, + High: nil, + NotLooping: nil, } delay := a.LongestDelay() @@ -1060,24 +1275,6 @@ var _ = Describe("Alerts", func() { }) Describe("Evaluate", func() { - Context("when not communicating", func() { - It("returns only NoCommunication alerts", func() { - ctx := contextWithTestLogger() - data := []*glucose.Glucose{testNoCommunicationDatum} - data[0].Value = pointer.FromAny(0.0) - a := Alerts{ - NoCommunication: testNoCommunicationAlert(), - UrgentLow: testUrgentLowAlert(), - Low: testLowAlert(), - High: testHighAlert(), - } - - notification := a.Evaluate(ctx, data, nil) - - Expect(notification).To(HaveField("Message", ContainSubstring(NoCommunicationMessage))) - }) - }) - It("logs decisions", func() { Skip("TODO logAlertEvaluation") }) @@ -1085,11 +1282,11 @@ var _ = Describe("Alerts", func() { It("detects low data", func() { ctx := contextWithTestLogger() data := []*glucose.Glucose{testLowDatum} - a := Alerts{ + a := DataAlerts{ Low: testLowAlert(), } - notification := a.Evaluate(ctx, data, nil) + notification, _ := a.Evaluate(ctx, data, nil) Expect(notification).ToNot(BeNil()) Expect(notification.Message).To(ContainSubstring("below low threshold")) @@ -1098,11 +1295,11 @@ var _ = Describe("Alerts", func() { It("detects high data", func() { ctx := contextWithTestLogger() data := []*glucose.Glucose{testHighDatum} - a := Alerts{ + a := DataAlerts{ High: testHighAlert(), } - notification := a.Evaluate(ctx, data, nil) + notification, _ := a.Evaluate(ctx, data, nil) Expect(notification).ToNot(BeNil()) Expect(notification.Message).To(ContainSubstring("above high threshold")) @@ -1112,12 +1309,12 @@ var _ = Describe("Alerts", func() { It("prefers urgent low", func() { ctx := contextWithTestLogger() data := []*glucose.Glucose{testUrgentLowDatum} - a := Alerts{ + a := DataAlerts{ Low: testLowAlert(), UrgentLow: testUrgentLowAlert(), } - notification := a.Evaluate(ctx, data, nil) + notification, _ := a.Evaluate(ctx, data, nil) Expect(notification).ToNot(BeNil()) Expect(notification.Message).To(ContainSubstring("below urgent low threshold")) diff --git a/alerts/evaluator.go b/alerts/evaluator.go new file mode 100644 index 0000000000..371f42bdd8 --- /dev/null +++ b/alerts/evaluator.go @@ -0,0 +1,203 @@ +package alerts + +import ( + "cmp" + "context" + "slices" + "time" + + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/data/types/dosingdecision" + "github.com/tidepool-org/platform/log" + "github.com/tidepool-org/platform/permission" +) + +// DataRepository encapsulates queries of the data collection for use with alerts. +type DataRepository interface { + // GetAlertableData queries for the data used to evaluate alerts configurations. + GetAlertableData(ctx context.Context, params GetAlertableDataParams) (*GetAlertableDataResponse, error) +} + +type GetAlertableDataParams struct { + // UserID of the user that owns the data. + UserID string + // UploadID of the device data set to query. + // + // The term DataSetID should be preferred, but UploadID already existed in some places. + UploadID string + // Start limits the data to those recorded after this time. + Start time.Time + // End limits the data to those recorded before this time. + End time.Time +} + +type GetAlertableDataResponse struct { + DosingDecisions []*dosingdecision.DosingDecision + Glucose []*glucose.Glucose +} + +type Evaluator struct { + Alerts Repository + Data DataRepository + Logger log.Logger + Permissions permission.Client +} + +func NewEvaluator(alerts Repository, dataRepo DataRepository, permissions permission.Client, + logger log.Logger) *Evaluator { + + return &Evaluator{ + Alerts: alerts, + Data: dataRepo, + Logger: logger, + Permissions: permissions, + } +} + +// EvaluateData generates alert notifications in response to a user uploading data. +func (e *Evaluator) EvaluateData(ctx context.Context, followedUserID, dataSetID string) ( + []*NotificationWithHook, error) { + + configs, err := e.gatherConfigs(ctx, followedUserID, dataSetID) + if err != nil { + return nil, err + } + + configsByDataSetID := e.mapConfigsByDataSetID(configs) + + notifications := []*NotificationWithHook{} + for dsID, cfgs := range configsByDataSetID { + resp, err := e.gatherData(ctx, followedUserID, dsID, cfgs) + if err != nil { + return nil, err + } + notifications = slices.Concat(notifications, e.generateNotes(ctx, cfgs, resp)) + } + + return notifications, nil +} + +func (e *Evaluator) mapConfigsByDataSetID(cfgs []*Config) map[string][]*Config { + mapped := map[string][]*Config{} + for _, cfg := range cfgs { + if _, found := mapped[cfg.UploadID]; !found { + mapped[cfg.UploadID] = []*Config{} + } + mapped[cfg.UploadID] = append(mapped[cfg.UploadID], cfg) + } + return mapped +} + +func (e *Evaluator) gatherConfigs(ctx context.Context, followedUserID, dataSetID string) ( + []*Config, error) { + + configs, err := e.Alerts.List(ctx, followedUserID) + if err != nil { + return nil, err + } + configs = slices.DeleteFunc(configs, e.authDenied(ctx)) + configs = slices.DeleteFunc(configs, func(config *Config) bool { + return config.UploadID != dataSetID + }) + return configs, nil +} + +// authDenied builds a function for slices.DeleteFunc to remove unauthorized users' Configs. +// +// This would catch the unintended case where a follower's permission was revoked, but their +// [Config] wasn't deleted. +// +// A closure is used to inject information from the evaluator into the resulting function. +func (e *Evaluator) authDenied(ctx context.Context) func(*Config) bool { + return func(c *Config) bool { + if c == nil { + return true + } + logger := e.Logger.WithFields(log.Fields{ + "userID": c.UserID, + "followedUserID": c.FollowedUserID, + }) + perms, err := e.Permissions.GetUserPermissions(ctx, c.UserID, c.FollowedUserID) + if err != nil { + logger.WithError(err).Warn("Unable to confirm permissions; skipping") + return true + } + if _, found := perms[permission.Follow]; !found { + logger.Debug("permission denied: skipping") + return true + } + return false + } +} + +func (e *Evaluator) gatherData(ctx context.Context, followedUserID, dataSetID string, + configs []*Config) (*GetAlertableDataResponse, error) { + + if len(configs) == 0 { + return nil, nil + } + + longestDelay := slices.MaxFunc(configs, func(i, j *Config) int { + return cmp.Compare(i.LongestDelay(), j.LongestDelay()) + }).LongestDelay() + longestDelay = max(5*time.Minute, longestDelay) + params := GetAlertableDataParams{ + UserID: followedUserID, + UploadID: dataSetID, + Start: time.Now().Add(-longestDelay), + } + resp, err := e.Data.GetAlertableData(ctx, params) + if err != nil { + return nil, err + } + + return resp, nil +} + +func (e *Evaluator) generateNotes(ctx context.Context, configs []*Config, + resp *GetAlertableDataResponse) []*NotificationWithHook { + + if len(configs) == 0 { + return nil + } + + notifications := []*NotificationWithHook{} + for _, config := range configs { + lgr := e.Logger.WithFields(log.Fields{ + "userID": config.UserID, + "followedUserID": config.FollowedUserID, + "uploadID": config.UploadID, + }) + evalCtx := log.NewContextWithLogger(ctx, lgr) + notification, changed := config.EvaluateData(evalCtx, resp.Glucose, resp.DosingDecisions) + if notification != nil { + if notification.Sent != nil { + notification.Sent = e.wrapWithUpsert(evalCtx, lgr, config, notification.Sent) + } + notifications = append(notifications, notification) + continue + } else if changed { + // No notification was generated, so no further changes are expected. However, + // there were activity changes that need persisting. + err := e.Alerts.Upsert(ctx, config) + if err != nil { + lgr.WithError(err).Error("Unable to save changed alerts config") + continue + } + } + } + + return notifications +} + +// wrapWithUpsert to upsert the Config that triggered the Notification after it's sent. +func (e *Evaluator) wrapWithUpsert(ctx context.Context, + lgr log.Logger, config *Config, original SentFunc) SentFunc { + + return func(at time.Time) { + original(at) + if err := e.Alerts.Upsert(ctx, config); err != nil { + lgr.WithError(err).Error("Unable to save changed alerts config") + } + } +} diff --git a/alerts/evaluator_test.go b/alerts/evaluator_test.go new file mode 100644 index 0000000000..52a1449756 --- /dev/null +++ b/alerts/evaluator_test.go @@ -0,0 +1,499 @@ +package alerts + +import ( + "context" + "errors" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + nontypesglucose "github.com/tidepool-org/platform/data/blood/glucose" + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/data/types/dosingdecision" + "github.com/tidepool-org/platform/permission" + "github.com/tidepool-org/platform/pointer" +) + +var _ = Describe("Evaluator", func() { + Describe("EvaluateData", func() { + It("handles data for users without any followers gracefully", func() { + ctx, lgr := contextWithNullLoggerDeluxe() + alertsRepo := newMockAlertsClient() + + evaluator := NewEvaluator(alertsRepo, nil, nil, lgr) + notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) + + Expect(notifications).To(BeEmpty()) + Expect(err).To(Succeed()) + }) + + It("handles data queries that return empty results (perm denied)", func() { + ctx, lgr := contextWithNullLoggerDeluxe() + alertsRepo := newMockAlertsClient() + alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{ + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + }, + }) + dataRepo := newMockDataRepo() + perms := newMockPermissionClient() + + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr) + notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) + + Expect(notifications).To(BeEmpty()) + Expect(err).To(Succeed()) + }) + + It("filters users without permission", func() { + // This simulates the case when permission is revoked, but the corresponding + // alerts.Config isn't yet deleted. + ctx, lgr := contextWithNullLoggerDeluxe() + alertsRepo := newMockAlertsClient() + alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{ + { + UserID: testUserID + "-2", + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + Alerts: Alerts{ + DataAlerts: DataAlerts{ + High: &HighAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 10.0, + Units: nontypesglucose.MmolL, + }, + }, + }, + }, + }, + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + Alerts: Alerts{ + DataAlerts: DataAlerts{ + High: &HighAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 10.0, + Units: nontypesglucose.MmolL, + }, + }, + }, + }, + }, + }) + dataRepo := newMockDataRepo() + dataRepo.AlertableData = []*GetAlertableDataResponse{ + { + Glucose: []*glucose.Glucose{testHighDatum}, + }, + } + perms := newMockPermissionClient() + perms.Allow(testUserID, permission.Follow, testFollowedUserID) + // This user still has a config, but has had their follow permission revoked. + perms.Allow(testUserID+"-2", permission.Read, testFollowedUserID) + + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr) + notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) + + Expect(err).To(Succeed()) + if Expect(len(notifications)).To(Equal(1)) { + Expect(notifications[0].RecipientUserID).To(Equal(testUserID)) + } + }) + + It("handles data queries that return empty results (no data)", func() { + ctx, lgr := contextWithNullLoggerDeluxe() + alertsRepo := newMockAlertsClient() + alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{ + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + }, + }) + dataRepo := newMockDataRepo() + perms := newMockPermissionClient() + perms.AlwaysAllow = true + + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr) + notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) + + Expect(notifications).To(BeEmpty()) + Expect(err).To(Succeed()) + }) + + It("returns notifications", func() { + ctx, lgr := contextWithNullLoggerDeluxe() + alertsRepo := newMockAlertsClient() + alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{ + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + Alerts: Alerts{ + DataAlerts: DataAlerts{ + UrgentLow: &UrgentLowAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 3.0, + Units: nontypesglucose.MmolL, + }, + }, + }, + }, + }, + }) + dataRepo := newMockDataRepo() + dataRepo.AlertableData = []*GetAlertableDataResponse{ + { + Glucose: []*glucose.Glucose{testUrgentLowDatum}, + }, + } + perms := newMockPermissionClient() + perms.AlwaysAllow = true + + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr) + notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) + + if Expect(notifications).To(HaveLen(1)) { + msgFound := strings.Contains(notifications[0].Message, "below urgent low") + Expect(msgFound).To(BeTrue()) + } + Expect(err).To(Succeed()) + }) + + It("queries data based on the longest delay", func() { + ctx, lgr := contextWithNullLoggerDeluxe() + alertsRepo := newMockAlertsClient() + alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{ + { + UserID: testUserID + "-2", + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + Alerts: Alerts{ + DataAlerts: DataAlerts{ + High: &HighAlert{ + Base: Base{Enabled: true}, + Delay: DurationMinutes(6), + Threshold: Threshold{ + Value: 10.0, + Units: nontypesglucose.MmolL, + }, + }, + }, + }, + }, + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + Alerts: Alerts{ + DataAlerts: DataAlerts{ + High: &HighAlert{ + Base: Base{Enabled: true}, + Delay: DurationMinutes(3), + Threshold: Threshold{ + Value: 10.0, + Units: nontypesglucose.MmolL, + }, + }, + }, + }, + }, + }) + highDatum := testHighDatum + highDatum.Blood.Base.Time = pointer.FromAny(time.Now().Add(-10 * time.Minute)) + dataRepo := newMockDataRepo() + dataRepo.AlertableData = []*GetAlertableDataResponse{ + { + Glucose: []*glucose.Glucose{highDatum}, + }, + } + perms := newMockPermissionClient() + perms.AlwaysAllow = true + + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr) + notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) + + if Expect(notifications).To(HaveLen(2)) { + msgFound := strings.Contains(notifications[0].Message, "above high") + Expect(msgFound).To(BeTrue()) + } + Expect(err).To(Succeed()) + }) + + It("wraps notifications so that changes are persisted when notifications are pushed", func() { + ctx, lgr := contextWithNullLoggerDeluxe() + startOfTest := time.Now() + alertsRepo := newMockAlertsClient() + alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{ + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + Alerts: Alerts{ + DataAlerts: DataAlerts{ + UrgentLow: &UrgentLowAlert{ + Base: Base{ + Enabled: true, + Activity: Activity{ + Triggered: time.Now().Add(-10 * time.Minute), + }, + }, + Threshold: Threshold{ + Value: 3.0, + Units: nontypesglucose.MmolL, + }, + }, + }, + }, + }, + }) + dataRepo := newMockDataRepo() + dataRepo.AlertableData = []*GetAlertableDataResponse{ + { + Glucose: []*glucose.Glucose{testUrgentLowDatum}, + }, + } + perms := newMockPermissionClient() + perms.AlwaysAllow = true + + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr) + notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) + Expect(err).To(Succeed()) + for _, notification := range notifications { + Expect(func() { notification.Sent(time.Now()) }).ToNot(Panic()) + } + + Expect(len(notifications)).To(Equal(1)) + if Expect(len(alertsRepo.UpsertCalls)).To(Equal(1)) { + activity := alertsRepo.UpsertCalls[0].UrgentLow.Activity + Expect(activity.Sent).To(BeTemporally(">", startOfTest)) + } + }) + + It("persists changes when there's no new Notification", func() { + // For example if an alert is resolved, that change should be persisted, even + // when there isn't a notification generated. + ctx, lgr := contextWithNullLoggerDeluxe() + startOfTest := time.Now() + alertsRepo := newMockAlertsClient() + alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{ + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + Alerts: Alerts{ + DataAlerts: DataAlerts{ + UrgentLow: &UrgentLowAlert{ + Base: Base{ + Enabled: true, + Activity: Activity{ + Triggered: time.Now().Add(-10 * time.Minute), + }, + }, + Threshold: Threshold{ + Value: 3.0, + Units: nontypesglucose.MmolL, + }, + }, + }, + }, + }, + }) + dataRepo := newMockDataRepo() + dataRepo.AlertableData = []*GetAlertableDataResponse{ + { + Glucose: []*glucose.Glucose{testInRangeDatum}, + }, + } + perms := newMockPermissionClient() + perms.AlwaysAllow = true + + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr) + notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) + + Expect(err).To(Succeed()) + Expect(len(notifications)).To(Equal(0)) + if Expect(len(alertsRepo.UpsertCalls)).To(Equal(1)) { + activity := alertsRepo.UpsertCalls[0].UrgentLow.Activity + Expect(activity.Resolved).To(BeTemporally(">", startOfTest)) + } + }) + + Context("when the user has multiple data sets", func() { + It("ignores Configs that don't match the data set id", func() { + ctx, lgr := contextWithNullLoggerDeluxe() + resp1 := newTestAlertsConfig(testUserID, testDataSetID) + resp2 := newTestAlertsConfig(testUserID+"2", testDataSetID+"2") + alertsRepo := newMockAlertsClient() + alertsRepo.ListResponses = append(alertsRepo.ListResponses, + []*Config{resp1, resp2}) + dataRepo := newMockDataRepo() + dataRepo.AlertableData = []*GetAlertableDataResponse{ + {Glucose: []*glucose.Glucose{testUrgentLowDatum}}, + } + perms := newMockPermissionClient() + perms.AlwaysAllow = true + + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr) + notifications, err := evaluator.EvaluateData(ctx, + testFollowedUserID, testDataSetID) + + Expect(err).To(Succeed()) + if Expect(len(notifications)).To(Equal(1)) { + recipientUserID := notifications[0].Notification.RecipientUserID + Expect(recipientUserID).To(Equal(testUserID)) + } + }) + }) + }) +}) + +func newTestAlertsConfig(userID, dataSetID string) *Config { + return &Config{ + UserID: userID, + FollowedUserID: testFollowedUserID, + UploadID: dataSetID, + Alerts: Alerts{ + DataAlerts: DataAlerts{ + UrgentLow: testUrgentLowAlert(), + }, + }, + } +} + +type mockAlertsClient struct { + UsersWithoutCommsError error + UsersWithoutCommsResponses [][]LastCommunication + ListResponses [][]*Config + ListError error + UpsertError error + UpsertCalls []*Config +} + +func newMockAlertsClient() *mockAlertsClient { + return &mockAlertsClient{ + UsersWithoutCommsResponses: [][]LastCommunication{}, + ListResponses: [][]*Config{}, + UpsertCalls: []*Config{}, + } +} + +func (c *mockAlertsClient) Get(ctx context.Context, conf *Config) (*Config, error) { + return nil, nil +} + +func (c *mockAlertsClient) Upsert(ctx context.Context, conf *Config) error { + c.UpsertCalls = append(c.UpsertCalls, conf) + if c.UpsertError != nil { + return c.UpsertError + } + return nil +} + +func (c *mockAlertsClient) Delete(ctx context.Context, conf *Config) error { + return nil +} + +func (c *mockAlertsClient) List(ctx context.Context, userID string) ([]*Config, error) { + if c.ListError != nil { + return nil, c.ListError + } + if len(c.ListResponses) > 0 { + ret := c.ListResponses[0] + c.ListResponses = c.ListResponses[1:] + return ret, nil + } + return []*Config{}, nil +} + +func (c *mockAlertsClient) UsersWithoutCommunication(context.Context) ( + []LastCommunication, error) { + + if c.UsersWithoutCommsError != nil { + return nil, c.UsersWithoutCommsError + } + if len(c.UsersWithoutCommsResponses) > 0 { + ret := c.UsersWithoutCommsResponses[0] + c.UsersWithoutCommsResponses = c.UsersWithoutCommsResponses[1:] + return ret, nil + } + return nil, nil +} + +func (c *mockAlertsClient) EnsureIndexes() error { + return nil +} + +type mockDataRepo struct { + AlertableData []*GetAlertableDataResponse +} + +func newMockDataRepo() *mockDataRepo { + return &mockDataRepo{ + AlertableData: []*GetAlertableDataResponse{}, + } +} + +func (r *mockDataRepo) GetAlertableData(ctx context.Context, params GetAlertableDataParams) ( + *GetAlertableDataResponse, error) { + + if len(r.AlertableData) > 0 { + ret := r.AlertableData[0] + r.AlertableData = r.AlertableData[1:] + return ret, nil + } + + return &GetAlertableDataResponse{ + DosingDecisions: []*dosingdecision.DosingDecision{}, + Glucose: []*glucose.Glucose{}, + }, nil +} + +type mockPermissionClient struct { + AlwaysAllow bool + Perms map[string]permission.Permissions +} + +func newMockPermissionClient() *mockPermissionClient { + return &mockPermissionClient{ + Perms: map[string]permission.Permissions{}, + } +} + +func (c *mockPermissionClient) GetUserPermissions(ctx context.Context, + requestUserID string, targetUserID string) (permission.Permissions, error) { + + if c.AlwaysAllow { + return map[string]permission.Permission{ + permission.Follow: {}, + permission.Read: {}, + }, nil + } + + if p, ok := c.Perms[c.Key(requestUserID, targetUserID)]; ok { + return p, nil + } else { + return nil, errors.New("test error NOT FOUND") + } +} + +func (c *mockPermissionClient) Allow(requestUserID, perm, targetUserID string) { + key := c.Key(requestUserID, targetUserID) + if _, found := c.Perms[key]; !found { + c.Perms[key] = permission.Permissions{} + } + c.Perms[key][perm] = permission.Permission{} +} + +func (c *mockPermissionClient) Key(requesterUserID, targetUserID string) string { + return requesterUserID + targetUserID +} diff --git a/alerts/tasks.go b/alerts/tasks.go new file mode 100644 index 0000000000..77d8477a7b --- /dev/null +++ b/alerts/tasks.go @@ -0,0 +1,216 @@ +package alerts + +import ( + "context" + "slices" + "time" + + "github.com/tidepool-org/platform/auth" + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/log" + "github.com/tidepool-org/platform/permission" + "github.com/tidepool-org/platform/push" + "github.com/tidepool-org/platform/task" +) + +type CarePartnerRunner struct { + logger log.Logger + + alerts AlertsClient + authClient auth.ServerSessionTokenProvider + deviceTokens auth.DeviceTokensClient + permissions permission.Client + pusher Pusher +} + +// AlertsClient abstracts the alerts collection for the CarePartnerRunner. +// +// One implementation is [Client]. +type AlertsClient interface { + List(_ context.Context, followedUserID string) ([]*Config, error) + Upsert(context.Context, *Config) error + // UsersWithoutCommunication returns a slice of user ids for those users that haven't + // uploaded data recently. + UsersWithoutCommunication(context.Context) ([]LastCommunication, error) +} + +func NewCarePartnerRunner(logger log.Logger, alerts AlertsClient, + deviceTokens auth.DeviceTokensClient, pusher Pusher, permissions permission.Client, + authClient auth.ServerSessionTokenProvider) (*CarePartnerRunner, error) { + + return &CarePartnerRunner{ + logger: logger, + alerts: alerts, + authClient: authClient, + deviceTokens: deviceTokens, + pusher: pusher, + permissions: permissions, + }, nil +} + +func (r *CarePartnerRunner) GetRunnerType() string { + return task.CarePartnerType +} + +func (r *CarePartnerRunner) GetRunnerTimeout() time.Duration { + return 30 * time.Second +} + +func (r *CarePartnerRunner) GetRunnerDeadline() time.Time { + return time.Now().Add(30 * time.Second) +} + +func (r *CarePartnerRunner) GetRunnerDurationMaximum() time.Duration { + return 30 * time.Second +} + +func (r *CarePartnerRunner) Run(ctx context.Context, tsk *task.Task) { + r.logger.Info("care partner no communication check") + ctx = auth.NewContextWithServerSessionTokenProvider(ctx, r.authClient) + start := time.Now() + if err := r.evaluateLastComms(ctx); err != nil { + r.logger.WithError(err).Warn("running care partner no communication check") + } + r.scheduleNextRun(tsk, start) +} + +func (r *CarePartnerRunner) evaluateLastComms(ctx context.Context) error { + lastComms, err := r.alerts.UsersWithoutCommunication(ctx) + if err != nil { + return errors.Wrap(err, "listing users without communication") + } + + for _, lastComm := range lastComms { + if err := r.evaluateLastComm(ctx, lastComm); err != nil { + r.logger.WithError(err). + WithField("followedUserID", lastComm.UserID). + Info("unable to evaluate no communication") + continue + } + } + + return nil +} + +func (r *CarePartnerRunner) evaluateLastComm(ctx context.Context, + lastComm LastCommunication) error { + + alertsConfigs, err := r.alerts.List(ctx, lastComm.UserID) + if err != nil { + return errors.Wrap(err, "listing follower alerts configs") + } + alertsConfigs = slices.DeleteFunc(alertsConfigs, func(config *Config) bool { + return config.UploadID != lastComm.DataSetID + }) + alertsConfigs = slices.DeleteFunc(alertsConfigs, r.authDenied(ctx)) + notifications := []*NotificationWithHook{} + toUpdate := map[*Config]struct{}{} + for _, alertsConfig := range alertsConfigs { + lastData := lastComm.LastReceivedDeviceData + notification, changed := alertsConfig.EvaluateNoCommunication(ctx, lastData) + if notification != nil { + notifications = append(notifications, notification) + } + if changed || notification != nil { + toUpdate[alertsConfig] = struct{}{} + } + } + r.pushNotifications(ctx, notifications) + // Only after notifications have been pushed should they be saved. The alerts configs + // could change during evaluation or in response to their notification being pushed. + for alertConfig := range toUpdate { + if err := r.alerts.Upsert(ctx, alertConfig); err != nil { + r.logger.WithError(err). + WithField("UserID", alertConfig.UserID). + WithField("FollowedUserID", alertConfig.FollowedUserID). + Info("Unable to upsert alerts config") + } + } + + return nil +} + +func (r *CarePartnerRunner) authDenied(ctx context.Context) func(*Config) bool { + return func(c *Config) bool { + if c == nil { + return true + } + logger := r.logger.WithFields(log.Fields{ + "userID": c.UserID, + "followedUserID": c.FollowedUserID, + }) + perms, err := r.permissions.GetUserPermissions(ctx, c.UserID, c.FollowedUserID) + if err != nil { + logger.WithError(err).Warn("Unable to confirm permissions; skipping") + return true + } + if _, found := perms[permission.Follow]; !found { + logger.Debug("permission denied: skipping") + return true + } + return false + } +} + +func (r *CarePartnerRunner) pushNotifications(ctx context.Context, + notifications []*NotificationWithHook) { + + for _, notification := range notifications { + lgr := r.logger.WithField("recipientUserID", notification.RecipientUserID) + tokens, err := r.deviceTokens.GetDeviceTokens(ctx, notification.RecipientUserID) + if err != nil { + lgr.WithError(err).Info("unable to retrieve device tokens") + } + if len(tokens) == 0 { + lgr.Debug("no device tokens found, won't push any notifications") + } + pushNote := ToPushNotification(notification.Notification) + for _, token := range tokens { + err := r.pusher.Push(ctx, token, pushNote) + if err != nil { + lgr.WithError(err).Info("unable to push notification") + } else { + notification.Sent(time.Now()) + } + } + } +} + +func (r *CarePartnerRunner) scheduleNextRun(tsk *task.Task, lastStart time.Time) { + // Ideally, we would start the next run 1 second after this run... + nextDesiredRun := lastStart.Add(time.Second) + now := time.Now() + if nextDesiredRun.Before(now) { + r.logger.Info("care partner is bumping nextDesiredRun") + // nextDesiredRun, when added to time.Now in tsk.RepeatAvailableAfter, must + // result in a time in the future or the task will be marked failed (and not run + // again). + // + // One workaround is to take a guess at how long it will take Run() to return + // and the task queue to evaluate the task's AvailableAfter time. Maybe the task + // queue could be re-worked to accept a value that indicates "as soon as + // possible"? Or if it accepted a time.Duration, then one could pass it + // time.Nanosecond to get closer to "ASAP", and then the Zero value might mean + // don't repeat. Or the Zero value could mean repeat ASAP. Or a negative value + // could mean repeat now. Whatever. It would prevent the task from being marked + // a failure for not being able to guess when the value would be read. Which + // wasn't its intent I'm sure, it just wasn't designed for tasks with the level + // of resolution and repetition expected for this purpose. + nextDesiredRun = now.Add(25 * time.Millisecond) + } + tsk.RepeatAvailableAfter(time.Until(nextDesiredRun)) +} + +// Pusher is a service-agnostic interface for sending push notifications. +type Pusher interface { + // Push a notification to a device. + Push(context.Context, *devicetokens.DeviceToken, *push.Notification) error +} + +// ToPushNotification converts Notification to push.Notification. +func ToPushNotification(notification *Notification) *push.Notification { + return &push.Notification{ + Message: notification.Message, + } +} diff --git a/alerts/tasks_test.go b/alerts/tasks_test.go new file mode 100644 index 0000000000..13cf565ee4 --- /dev/null +++ b/alerts/tasks_test.go @@ -0,0 +1,273 @@ +package alerts + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/log" + logtest "github.com/tidepool-org/platform/log/test" + "github.com/tidepool-org/platform/permission" + "github.com/tidepool-org/platform/push" + "github.com/tidepool-org/platform/task" +) + +var _ = Describe("CarePartnerRunner", func() { + Describe("Run", func() { + It("schedules its next run", func() { + runner, test := newCarePartnerRunnerTest() + + runner.Run(test.Ctx, test.Task) + + Expect(test.Task.AvailableTime).ToNot(BeZero()) + Expect(test.Task.DeadlineTime).To(BeNil()) + Expect(test.Task.State).To(Equal(task.TaskStatePending)) + }) + + Context("continues after logging errors", func() { + It("retrieving users without communication", func() { + runner, test := newCarePartnerRunnerTest() + test.Alerts.UsersWithoutCommsError = fmt.Errorf("test error") + + runner.Run(test.Ctx, test.Task) + + test.Logger.AssertWarn("running care partner no communication check") + }) + + It("retrieving an alerts config", func() { + runner, test := newCarePartnerRunnerTest() + test.Alerts.ListError = fmt.Errorf("test error") + + runner.Run(test.Ctx, test.Task) + + test.Logger.AssertInfo("unable to evaluate no communication", log.Fields{ + "followedUserID": testFollowedUserID, + }) + }) + + It("upserting alerts configs", func() { + runner, test := newCarePartnerRunnerTest() + test.Alerts.UpsertError = fmt.Errorf("test error") + + runner.Run(test.Ctx, test.Task) + + test.Logger.AssertInfo("Unable to upsert alerts config", log.Fields{ + "UserID": testUserID, + "FollowedUserID": testFollowedUserID, + }) + }) + + It("retrieving device tokens", func() { + runner, test := newCarePartnerRunnerTest() + test.Tokens.GetError = fmt.Errorf("test error") + + runner.Run(test.Ctx, test.Task) + + test.Logger.AssertInfo("unable to retrieve device tokens", log.Fields{ + "recipientUserID": testUserID, + }) + }) + + It("pushes notifications", func() { + runner, test := newCarePartnerRunnerTest() + test.Pusher.PushErrors = append(test.Pusher.PushErrors, fmt.Errorf("test error")) + + runner.Run(test.Ctx, test.Task) + + Expect(len(test.Pusher.PushCalls)).To(Equal(1)) + test.Logger.AssertInfo("unable to push notification", log.Fields{ + "recipientUserID": testUserID, + }) + }) + }) + + It("ignores Configs that don't match the data set id", func() { + runner, test := newCarePartnerRunnerTest() + firstResp := test.Alerts.UsersWithoutCommsResponses[0] + test.Alerts.UsersWithoutCommsResponses[0] = append(firstResp, LastCommunication{ + UserID: firstResp[0].UserID, + DataSetID: "non-matching", + LastReceivedDeviceData: firstResp[0].LastReceivedDeviceData, + }) + + runner.Run(test.Ctx, test.Task) + + Expect(len(test.Pusher.PushCalls)).To(Equal(1)) + }) + + It("pushes to each token", func() { + runner, test := newCarePartnerRunnerTest() + test.Tokens.GetResponses[0] = append(test.Tokens.GetResponses[0], + test.Tokens.GetResponses[0][0]) + + runner.Run(test.Ctx, test.Task) + + Expect(len(test.Pusher.PushCalls)).To(Equal(2)) + }) + + It("pushes to each token, even if the first experiences an error", func() { + runner, test := newCarePartnerRunnerTest() + test.Tokens.GetResponses[0] = append(test.Tokens.GetResponses[0], + test.Tokens.GetResponses[0][0]) + test.Pusher.PushErrors = append([]error{fmt.Errorf("test error")}, test.Pusher.PushErrors...) + + runner.Run(test.Ctx, test.Task) + + Expect(len(test.Pusher.PushCalls)).To(Equal(2)) + }) + + It("ignores Configs that don't have permission", func() { + runner, test := newCarePartnerRunnerTest() + // disable permissions, no configs should be used + test.Permissions.AlwaysAllow = false + + runner.Run(test.Ctx, test.Task) + Expect(len(test.Pusher.PushCalls)).To(Equal(0)) + + // reset, add a user *with* perms, and check that it works + runner, test = newCarePartnerRunnerTest() + userIDWithPerm := testUserID + "2" + test.Permissions.AlwaysAllow = false + test.Permissions.Allow(userIDWithPerm, permission.Follow, testFollowedUserID) + test.Alerts.ListResponses[0] = append(test.Alerts.ListResponses[0], + &Config{ + UserID: userIDWithPerm, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + Alerts: Alerts{ + NoCommunicationAlert: &NoCommunicationAlert{}, + }, + }, + ) + runner.Run(test.Ctx, test.Task) + Expect(len(test.Pusher.PushCalls)).To(Equal(1)) + }) + }) +}) + +type carePartnerRunnerTest struct { + Alerts *mockAlertsClient + Ctx context.Context + Logger *logtest.Logger + Permissions *mockPermissionClient + Pusher *mockPusher + Task *task.Task + Tokens *mockDeviceTokensClient +} + +func newCarePartnerRunnerTest() (*CarePartnerRunner, *carePartnerRunnerTest) { + alerts := newMockAlertsClient() + lgr := logtest.NewLogger() + ctx := log.NewContextWithLogger(context.Background(), lgr) + pusher := newMockPusher() + tsk := &task.Task{} + tokens := newMockDeviceTokensClient() + perms := newMockPermissionClient() + authClient := newMockAuthTokenProvider() + perms.AlwaysAllow = true + + runner, err := NewCarePartnerRunner(lgr, alerts, tokens, pusher, perms, authClient) + Expect(err).To(Succeed()) + + alerts.UsersWithoutCommsResponses = [][]LastCommunication{ + { + { + UserID: testFollowedUserID, + DataSetID: testDataSetID, + LastReceivedDeviceData: time.Now().Add(-12 * time.Hour), + }, + }, + } + alerts.ListResponses = [][]*Config{ + { + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + Alerts: Alerts{ + NoCommunicationAlert: &NoCommunicationAlert{}, + }, + }, + }, + } + tokens.GetResponses = [][]*devicetokens.DeviceToken{ + { + { + Apple: &devicetokens.AppleDeviceToken{}, + }, + }, + } + + return runner, &carePartnerRunnerTest{ + Alerts: alerts, + Ctx: ctx, + Logger: lgr, + Permissions: perms, + Pusher: pusher, + Task: tsk, + Tokens: tokens, + } +} + +type mockDeviceTokensClient struct { + GetError error + GetResponses [][]*devicetokens.DeviceToken +} + +func newMockDeviceTokensClient() *mockDeviceTokensClient { + return &mockDeviceTokensClient{ + GetResponses: [][]*devicetokens.DeviceToken{}, + } +} + +func (c *mockDeviceTokensClient) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + if c.GetError != nil { + return nil, c.GetError + } + if len(c.GetResponses) > 0 { + ret := c.GetResponses[0] + c.GetResponses = c.GetResponses[1:] + return ret, nil + } + return nil, nil +} + +type mockPusher struct { + PushCalls []pushCall + PushErrors []error +} + +type pushCall struct { + Token *devicetokens.DeviceToken + Notification *push.Notification +} + +func newMockPusher() *mockPusher { + return &mockPusher{} +} + +func (p *mockPusher) Push(_ context.Context, + token *devicetokens.DeviceToken, notification *push.Notification) error { + + p.PushCalls = append(p.PushCalls, pushCall{token, notification}) + if len(p.PushErrors) > 0 { + err := p.PushErrors[0] + p.PushErrors = p.PushErrors[1:] + return err + } + return nil +} + +type mockAuthTokenProvider struct{} + +func newMockAuthTokenProvider() *mockAuthTokenProvider { + return &mockAuthTokenProvider{} +} + +func (p *mockAuthTokenProvider) ServerSessionToken() (string, error) { + return "", nil +} diff --git a/auth/store/mongo/device_tokens_repository.go b/auth/store/mongo/device_tokens_repository.go index 8cc60fffbb..d2bfad7a41 100644 --- a/auth/store/mongo/device_tokens_repository.go +++ b/auth/store/mongo/device_tokens_repository.go @@ -38,7 +38,7 @@ func (r *deviceTokenRepo) Upsert(ctx context.Context, doc *devicetokens.Document return errors.New("UserID is empty") } if doc.TokenKey == "" { - return errors.New("TokenID is empty") + return errors.New("TokenKey is empty") } opts := options.Update().SetUpsert(true) diff --git a/auth/store/mongo/device_tokens_repository_test.go b/auth/store/mongo/device_tokens_repository_test.go new file mode 100644 index 0000000000..6eb45b2221 --- /dev/null +++ b/auth/store/mongo/device_tokens_repository_test.go @@ -0,0 +1,76 @@ +package mongo + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/tidepool-org/platform/auth/store" + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/store/structured/mongo" + storeStructuredMongoTest "github.com/tidepool-org/platform/store/structured/mongo/test" +) + +const testUserID = "857ec1d7-8777-4877-a308-96a23c066524" + +var _ = Describe("deviceTokenRepo", Label("mongodb", "slow", "integration"), func() { + It("retrieves all for the given user id", func() { + test := newDeviceTokensRepoTest() + + docs, err := test.Repo.GetAllByUserID(test.Ctx, testUserID) + Expect(err).To(Succeed()) + + if Expect(docs).To(HaveLen(2)) { + for _, doc := range docs { + Expect(doc.UserID).To(Equal(testUserID)) + } + } + }) + + It("ensures indexes", func() { + test := newDeviceTokensRepoTest() + Expect(test.Repo.EnsureIndexes()).To(Succeed()) + }) +}) + +type deviceTokensRepoTest struct { + Ctx context.Context + Repo store.DeviceTokenRepository + Config *mongo.Config + Store *Store +} + +func newDeviceTokensRepoTest() *deviceTokensRepoTest { + test := &deviceTokensRepoTest{ + Ctx: context.Background(), + Config: storeStructuredMongoTest.NewConfig(), + } + store, err := NewStore(test.Config) + Expect(err).To(Succeed()) + test.Store = store + test.Repo = store.NewDeviceTokenRepository() + + testDocs := []*devicetokens.Document{ + { + UserID: testUserID, + TokenKey: "a", + DeviceToken: devicetokens.DeviceToken{}, + }, + { + UserID: testUserID, + TokenKey: "b", + DeviceToken: devicetokens.DeviceToken{}, + }, + { + UserID: "not" + testUserID, + TokenKey: "c", + DeviceToken: devicetokens.DeviceToken{}, + }, + } + for _, testDoc := range testDocs { + Expect(test.Repo.Upsert(test.Ctx, testDoc)).To(Succeed()) + } + + return test +} diff --git a/auth/store/mongo/store_test.go b/auth/store/mongo/store_test.go index d12b0fba0f..34eec13f1b 100644 --- a/auth/store/mongo/store_test.go +++ b/auth/store/mongo/store_test.go @@ -133,7 +133,7 @@ var _ = Describe("Store", func() { doc.UserID = "user-id" doc.TokenKey = "" err = repository.Upsert(ctx, doc) - Expect(err).To(MatchError("TokenID is empty")) + Expect(err).To(MatchError("TokenKey is empty")) }) It("updates the existing document, instead of creating a duplicate", func() { diff --git a/data/events/alerts.go b/data/events/alerts.go index 2308a58d32..2e7397dc4c 100644 --- a/data/events/alerts.go +++ b/data/events/alerts.go @@ -1,10 +1,8 @@ package events import ( - "cmp" "context" "os" - "slices" "strings" "time" @@ -13,7 +11,6 @@ import ( "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/auth" - "github.com/tidepool-org/platform/data/store" "github.com/tidepool-org/platform/data/types/blood/glucose" "github.com/tidepool-org/platform/data/types/dosingdecision" "github.com/tidepool-org/platform/devicetokens" @@ -27,11 +24,12 @@ import ( type Consumer struct { Alerts AlertsClient - Data store.DataRepository + Data alerts.DataRepository DeviceTokens auth.DeviceTokensClient Evaluator AlertsEvaluator Permissions permission.Client Pusher Pusher + Recorder EventsRecorder Logger log.Logger } @@ -76,7 +74,7 @@ func (c *Consumer) consumeAlertsConfigs(ctx context.Context, ctxLog := c.logger(ctx).WithField("followedUserID", cfg.FollowedUserID) ctx = log.NewContextWithLogger(ctx, ctxLog) - notes, err := c.Evaluator.Evaluate(ctx, cfg.FollowedUserID, cfg.UploadID) + notes, err := c.Evaluator.EvaluateData(ctx, cfg.FollowedUserID, cfg.UploadID) if err != nil { format := "Unable to evalaute alerts configs triggered event for user %s" return errors.Wrapf(err, format, cfg.UserID) @@ -91,7 +89,7 @@ func (c *Consumer) consumeAlertsConfigs(ctx context.Context, } func (c *Consumer) consumeDeviceData(ctx context.Context, - session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) (err error) { + session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) error { datum := &Glucose{} if err := unmarshalMessageValue(msg.Value, datum); err != nil { @@ -104,10 +102,19 @@ func (c *Consumer) consumeDeviceData(ctx context.Context, return errors.New("Unable to retrieve alerts configs: userID is nil") } if datum.UploadID == nil { - return errors.New("Unable to retrieve alerts configs: uploadID is nil") + return errors.New("Unable to retrieve alerts configs: DataSetID is nil") } ctx = log.NewContextWithLogger(ctx, lgr.WithField("followedUserID", *datum.UserID)) - notes, err := c.Evaluator.Evaluate(ctx, *datum.UserID, *datum.UploadID) + lastComm := alerts.LastCommunication{ + UserID: *datum.UserID, + LastReceivedDeviceData: time.Now(), + DataSetID: *datum.UploadID, + } + err := c.Recorder.RecordReceivedDeviceData(ctx, lastComm) + if err != nil { + lgr.WithError(err).Info("Unable to record device data received") + } + notes, err := c.Evaluator.EvaluateData(ctx, *datum.UserID, *datum.UploadID) if err != nil { format := "Unable to evalaute device data triggered event for user %s" return errors.Wrapf(err, format, *datum.UserID) @@ -123,7 +130,7 @@ func (c *Consumer) consumeDeviceData(ctx context.Context, return nil } -func (c *Consumer) pushNotes(ctx context.Context, notifications []*alerts.Notification) { +func (c *Consumer) pushNotes(ctx context.Context, notifications []*alerts.NotificationWithHook) { lgr := c.logger(ctx) // Notes could be pushed into a Kafka topic to have a more durable retry, @@ -137,10 +144,13 @@ func (c *Consumer) pushNotes(ctx context.Context, notifications []*alerts.Notifi if len(tokens) == 0 { lgr.Debug("no device tokens found, won't push any notifications") } - pushNote := push.FromAlertsNotification(notification) + pushNote := alerts.ToPushNotification(notification.Notification) for _, token := range tokens { - if err := c.Pusher.Push(ctx, token, pushNote); err != nil { + err := c.Pusher.Push(ctx, token, pushNote) + if err != nil { lgr.WithError(err).Info("Unable to push notification") + } else { + notification.Sent(time.Now()) } } } @@ -165,164 +175,8 @@ func (c *Consumer) logger(ctx context.Context) log.Logger { } type AlertsEvaluator interface { - Evaluate(ctx context.Context, followedUserID, dataSetID string) ([]*alerts.Notification, error) -} - -func NewAlertsEvaluator(alerts AlertsClient, data store.DataRepository, - perms permission.Client) *evaluator { - - return &evaluator{ - Alerts: alerts, - Data: data, - Permissions: perms, - } -} - -// evaluator implements AlertsEvaluator. -type evaluator struct { - Alerts AlertsClient - Data store.DataRepository - Permissions permission.Client -} - -// logger produces a log.Logger. -// -// It tries a number of options before falling back to a null Logger. -func (e *evaluator) logger(ctx context.Context) log.Logger { - // A context's Logger is preferred, as it has the most... context. - if ctxLgr := log.LoggerFromContext(ctx); ctxLgr != nil { - return ctxLgr - } - fallback, err := logjson.NewLogger(os.Stderr, log.DefaultLevelRanks(), log.DefaultLevel()) - if err != nil { - fallback = lognull.NewLogger() - } - return fallback -} - -// Evaluate followers' alerts.Configs to generate alert notifications. -func (e *evaluator) Evaluate(ctx context.Context, followedUserID, dataSetID string) ( - []*alerts.Notification, error) { - - alertsConfigs, err := e.gatherAlertsConfigs(ctx, followedUserID, dataSetID) - if err != nil { - return nil, err - } - - alertsConfigsByUploadID := e.mapAlertsConfigsByUploadID(alertsConfigs) - - notifications := []*alerts.Notification{} - for uploadID, cfgs := range alertsConfigsByUploadID { - resp, err := e.gatherData(ctx, followedUserID, uploadID, cfgs) - if err != nil { - return nil, err - } - notifications = slices.Concat(notifications, e.generateNotes(ctx, cfgs, resp)) - } - - return notifications, nil -} - -func (e *evaluator) mapAlertsConfigsByUploadID(cfgs []*alerts.Config) map[string][]*alerts.Config { - mapped := map[string][]*alerts.Config{} - for _, cfg := range cfgs { - if _, found := mapped[cfg.UploadID]; !found { - mapped[cfg.UploadID] = []*alerts.Config{} - } - mapped[cfg.UploadID] = append(mapped[cfg.UploadID], cfg) - } - return mapped -} - -// gatherAlertsConfigs for the given followed user and data set. -// -// Those configs which don't match the data set or whose owners don't have permission are -// removed. -func (e *evaluator) gatherAlertsConfigs(ctx context.Context, - followedUserID, dataSetID string) ([]*alerts.Config, error) { - - alertsConfigs, err := e.Alerts.List(ctx, followedUserID) - if err != nil { - return nil, err - } - alertsConfigs = slices.DeleteFunc(alertsConfigs, e.authDenied(ctx)) - alertsConfigs = slices.DeleteFunc(alertsConfigs, func(c *alerts.Config) bool { - return c.UploadID != dataSetID - }) - return alertsConfigs, nil -} - -// authDenied builds functions that enable slices.DeleteFunc to remove -// unauthorized users' alerts.Configs. -// -// Via a closure it's able to inject information from the Context and the -// evaluator itself into the resulting function. -func (e *evaluator) authDenied(ctx context.Context) func(ac *alerts.Config) bool { - lgr := e.logger(ctx) - return func(ac *alerts.Config) bool { - if ac == nil { - return true - } - lgr = lgr.WithFields(log.Fields{ - "userID": ac.UserID, - "followedUserID": ac.FollowedUserID, - }) - perms, err := e.Permissions.GetUserPermissions(ctx, ac.UserID, ac.FollowedUserID) - if err != nil { - lgr.WithError(err).Warn("Unable to confirm permissions; skipping") - return true - } - if _, found := perms[permission.Follow]; !found { - lgr.Debug("permission denied: skipping") - return true - } - return false - } -} - -func (e *evaluator) gatherData(ctx context.Context, followedUserID, uploadID string, - alertsConfigs []*alerts.Config) (*store.AlertableResponse, error) { - - if len(alertsConfigs) == 0 { - return nil, nil - } - - longestDelay := slices.MaxFunc(alertsConfigs, func(i, j *alerts.Config) int { - return cmp.Compare(i.LongestDelay(), j.LongestDelay()) - }).LongestDelay() - longestDelay = max(5*time.Minute, longestDelay) - params := store.AlertableParams{ - UserID: followedUserID, - UploadID: uploadID, - Start: time.Now().Add(-longestDelay), - } - resp, err := e.Data.GetAlertableData(ctx, params) - if err != nil { - return nil, err - } - - return resp, nil -} - -func (e *evaluator) generateNotes(ctx context.Context, - alertsConfigs []*alerts.Config, resp *store.AlertableResponse) []*alerts.Notification { - - lgr := e.logger(ctx) - notifications := []*alerts.Notification{} - for _, alertsConfig := range alertsConfigs { - l := lgr.WithFields(log.Fields{ - "userID": alertsConfig.UserID, - "followedUserID": alertsConfig.FollowedUserID, - "uploadID": alertsConfig.UploadID, - }) - c := log.NewContextWithLogger(ctx, l) - note := alertsConfig.Evaluate(c, resp.Glucose, resp.DosingDecisions) - if note != nil { - notifications = append(notifications, note) - } - } - - return notifications + // EvaluateData to check if notifications should be sent in response to new data. + EvaluateData(ctx context.Context, followedUserID, dataSetID string) ([]*alerts.NotificationWithHook, error) } func unmarshalMessageValue[A any](b []byte, payload *A) error { diff --git a/data/events/alerts_test.go b/data/events/alerts_test.go index b3d99df7a7..51e66863e9 100644 --- a/data/events/alerts_test.go +++ b/data/events/alerts_test.go @@ -2,6 +2,7 @@ package events import ( "context" + "sync" "time" "github.com/IBM/sarama" @@ -12,7 +13,6 @@ import ( "github.com/tidepool-org/platform/alerts" nontypesglucose "github.com/tidepool-org/platform/data/blood/glucose" - "github.com/tidepool-org/platform/data/store" storetest "github.com/tidepool-org/platform/data/store/test" "github.com/tidepool-org/platform/data/types" "github.com/tidepool-org/platform/data/types/blood" @@ -30,14 +30,7 @@ const ( testUserID = "test-user-id" testFollowedUserID = "test-followed-user-id" testUserNoPermsID = "test-user-no-perms" - testUploadID = "test-upload-id" -) - -var ( - testMongoUrgentLowResponse = &store.AlertableResponse{ - Glucose: []*glucose.Glucose{ - newTestStaticDatumMmolL(1.0)}, - } + testDataSetID = "test-data-set-id" ) var _ = Describe("Consumer", func() { @@ -54,12 +47,14 @@ var _ = Describe("Consumer", func() { UserID: testUserID, FollowedUserID: testFollowedUserID, Alerts: alerts.Alerts{ - Low: &alerts.LowAlert{ - Base: alerts.Base{ - Enabled: true}, - Threshold: alerts.Threshold{ - Value: 101.1, - Units: "mg/dL", + DataAlerts: alerts.DataAlerts{ + Low: &alerts.LowAlert{ + Base: alerts.Base{ + Enabled: true}, + Threshold: alerts.Threshold{ + Value: 101.1, + Units: "mg/dL", + }, }, }, }, @@ -72,6 +67,16 @@ var _ = Describe("Consumer", func() { Expect(deps.Session.MarkCalls).To(Equal(1)) }) + It("records device data events", func() { + blood := newTestStaticDatumMmolL(7.2) + kafkaMsg := newAlertsMockConsumerMessage(".data.deviceData.alerts", blood) + docs := []interface{}{bson.M{}} + c, deps := newConsumerTestDeps(docs) + + Expect(c.Consume(deps.Context, deps.Session, kafkaMsg)).To(Succeed()) + Expect(deps.Recorder.NumCallsFor(testFollowedUserID)).To(Equal(1)) + }) + It("consumes device data events", func() { blood := newTestStaticDatumMmolL(7.2) kafkaMsg := newAlertsMockConsumerMessage(".data.deviceData.alerts", blood) @@ -102,7 +107,7 @@ var _ = Describe("Consumer", func() { c, deps := newConsumerTestDeps(docs) Expect(c.Consume(deps.Context, deps.Session, kafkaMsg)). - To(MatchError(ContainSubstring("uploadID is nil"))) + To(MatchError(ContainSubstring("DataSetID is nil"))) Expect(deps.Session.MarkCalls).To(Equal(0)) }) @@ -112,13 +117,16 @@ var _ = Describe("Consumer", func() { docs := []interface{}{bson.M{}} c, deps := newConsumerTestDeps(docs) eval := newMockEvaluator() - eval.Evaluations[testFollowedUserID+testUploadID] = []mockEvaluatorResponse{ + eval.Evaluations[testFollowedUserID+testDataSetID] = []mockEvaluatorResponse{ { - Notifications: []*alerts.Notification{ + Notifications: []*alerts.NotificationWithHook{ { - Message: "something", - RecipientUserID: testUserID, - FollowedUserID: testFollowedUserID, + Notification: &alerts.Notification{ + Message: "something", + RecipientUserID: testUserID, + FollowedUserID: testFollowedUserID, + }, + Sent: func(time.Time) {}, }, }, }, @@ -126,67 +134,26 @@ var _ = Describe("Consumer", func() { c.Evaluator = eval Expect(c.Consume(deps.Context, deps.Session, kafkaMsg)).To(Succeed()) - - deps.Logger.AssertInfo("logging push notification") }) }) - Describe("Evaluator", func() { - Describe("Evaluate", func() { - It("checks that alerts config owners have permission", func() { + Describe("Reporter", func() { + Describe("Record", func() { + It("records the metadata for the user id", func() { testLogger := logtest.NewLogger() ctx := log.NewContextWithLogger(context.Background(), testLogger) - eval, deps := newEvaluatorTestDeps([]*store.AlertableResponse{testMongoUrgentLowResponse}) - deps.Permissions.Allow(testUserID, permission.Follow, testFollowedUserID) - deps.Permissions.DenyAll(testUserNoPermsID, testFollowedUserID) - deps.Alerts.Configs = append(deps.Alerts.Configs, testAlertsConfigUrgentLow(testUserNoPermsID)) - deps.Alerts.Configs = append(deps.Alerts.Configs, testAlertsConfigUrgentLow(testUserID)) - - notes, err := eval.Evaluate(ctx, testFollowedUserID, testUploadID) - - Expect(err).To(Succeed()) - Expect(notes).To(ConsistOf(HaveField("RecipientUserID", testUserID))) - }) - - It("checks that alerts configs match the data set id", func() { - testLogger := logtest.NewLogger() - ctx := log.NewContextWithLogger(context.Background(), testLogger) - eval, deps := newEvaluatorTestDeps([]*store.AlertableResponse{testMongoUrgentLowResponse}) - deps.Permissions.Allow(testUserID+"2", permission.Follow, testFollowedUserID) - deps.Alerts.Configs = append(deps.Alerts.Configs, testAlertsConfigUrgentLow(testUserID+"2")) - deps.Permissions.Allow(testUserID, permission.Follow, testFollowedUserID) - wrongDataSetID := testAlertsConfigUrgentLow(testUserID) - wrongDataSetID.UploadID = "wrong" - deps.Alerts.Configs = append(deps.Alerts.Configs, wrongDataSetID) - - notes, err := eval.Evaluate(ctx, testFollowedUserID, testUploadID) - - Expect(err).To(Succeed()) - Expect(notes).To(ConsistOf(HaveField("RecipientUserID", testUserID+"2"))) - }) - - It("uses the longest delay", func() { - testLogger := logtest.NewLogger() - ctx := log.NewContextWithLogger(context.Background(), testLogger) - eval, deps := newEvaluatorTestDeps([]*store.AlertableResponse{testMongoUrgentLowResponse}) - cfgWithShorterDelay := testAlertsConfigLow(testUserID) - deps.Alerts.Configs = append(deps.Alerts.Configs, cfgWithShorterDelay) - deps.Permissions.Allow(testUserID, permission.Follow, testFollowedUserID) - cfgWithLongerDelay := testAlertsConfigLow(testUserID + "2") - cfgWithLongerDelay.Alerts.Low.Delay = alerts.DurationMinutes(10 * time.Minute) - deps.Alerts.Configs = append(deps.Alerts.Configs, cfgWithLongerDelay) - deps.Permissions.Allow(testUserID+"2", permission.Follow, testFollowedUserID) - - _, err := eval.Evaluate(ctx, testFollowedUserID, testUploadID) - - Expect(err).To(Succeed()) - if Expect(len(deps.Data.GetAlertableDataInputs)).To(Equal(1)) { - Expect(deps.Data.GetAlertableDataInputs[0].Params.Start). - To(BeTemporally("~", time.Now().Add(-10*time.Minute), time.Second)) + mockRepo := newMockRecorderRepository() + rec := NewRecorder(mockRepo) + lastComm := alerts.LastCommunication{ + UserID: testFollowedUserID, + LastReceivedDeviceData: time.Now(), + DataSetID: "test", } + err := rec.RecordReceivedDeviceData(ctx, lastComm) + Expect(err).To(Succeed()) + Expect(mockRepo.NumCallsFor(testFollowedUserID)).To(Equal(1)) }) }) - }) }) @@ -199,6 +166,7 @@ type consumerTestDeps struct { Logger *logtest.Logger Permissions *mockPermissionsClient Pusher Pusher + Recorder *mockRecorder Repo *storetest.DataRepository Session *mockConsumerGroupSession } @@ -210,7 +178,9 @@ func newConsumerTestDeps(docs []interface{}) (*Consumer, *consumerTestDeps) { { UserID: testUserID, FollowedUserID: testFollowedUserID, - Alerts: alerts.Alerts{}, + Alerts: alerts.Alerts{ + DataAlerts: alerts.DataAlerts{}, + }, }, }, nil) dataRepo := storetest.NewDataRepository() @@ -224,9 +194,7 @@ func newConsumerTestDeps(docs []interface{}) (*Consumer, *consumerTestDeps) { evaluator := newMockStaticEvaluator() pusher := push.NewLogPusher(logger) deviceTokens := newMockDeviceTokens() - deviceTokens.Tokens = append(deviceTokens.Tokens, []*devicetokens.DeviceToken{ - {Apple: &devicetokens.AppleDeviceToken{}}, - }) + recorder := newMockRecorder() return &Consumer{ Alerts: alertsClient, @@ -235,6 +203,7 @@ func newConsumerTestDeps(docs []interface{}) (*Consumer, *consumerTestDeps) { DeviceTokens: deviceTokens, Permissions: permissions, Pusher: pusher, + Recorder: recorder, }, &consumerTestDeps{ Alerts: alertsClient, Context: ctx, @@ -245,37 +214,11 @@ func newConsumerTestDeps(docs []interface{}) (*Consumer, *consumerTestDeps) { Repo: dataRepo, Session: &mockConsumerGroupSession{}, Logger: logger, - //Tokens: tokens, - Permissions: permissions, - } -} - -func newEvaluatorTestDeps(responses []*store.AlertableResponse) (*evaluator, *evaluatorTestDeps) { - alertsClient := newMockAlertsConfigClient(nil, nil) - dataRepo := storetest.NewDataRepository() - dataRepo.GetLastUpdatedForUserOutputs = []storetest.GetLastUpdatedForUserOutput{} - for _, r := range responses { - out := storetest.GetAlertableDataOutput{Response: r} - dataRepo.GetAlertableDataOutputs = append(dataRepo.GetAlertableDataOutputs, out) - } - permissions := newMockPermissionsClient() - return &evaluator{ - Alerts: alertsClient, - Data: dataRepo, - Permissions: permissions, - }, &evaluatorTestDeps{ - Alerts: alertsClient, - Permissions: permissions, - Data: dataRepo, + Recorder: recorder, + Permissions: permissions, } } -type evaluatorTestDeps struct { - Alerts *mockAlertsConfigClient - Permissions *mockPermissionsClient - Data *storetest.DataRepository -} - // mockEvaluator implements Evaluator. type mockEvaluator struct { Evaluations map[string][]mockEvaluatorResponse @@ -283,7 +226,7 @@ type mockEvaluator struct { } type mockEvaluatorResponse struct { - Notifications []*alerts.Notification + Notifications []*alerts.NotificationWithHook Error error } @@ -294,8 +237,8 @@ func newMockEvaluator() *mockEvaluator { } } -func (e *mockEvaluator) Evaluate(ctx context.Context, followedUserID, dataSetID string) ( - []*alerts.Notification, error) { +func (e *mockEvaluator) EvaluateData(ctx context.Context, followedUserID, dataSetID string) ( + []*alerts.NotificationWithHook, error) { key := followedUserID + dataSetID if _, found := e.Evaluations[key]; !found { @@ -332,8 +275,8 @@ func newMockStaticEvaluator() *mockStaticEvaluator { return &mockStaticEvaluator{newMockEvaluator()} } -func (e *mockStaticEvaluator) Evaluate(ctx context.Context, followedUserID, dataSetID string) ( - []*alerts.Notification, error) { +func (e *mockStaticEvaluator) EvaluateData(ctx context.Context, followedUserID, dataSetID string) ( + []*alerts.NotificationWithHook, error) { e.EvaluateCalls[followedUserID] += 1 return nil, nil @@ -392,20 +335,22 @@ func newMockMongoCursor(docs []interface{}) *mongo.Cursor { return cur } -func newTestStaticDatumMmolL(value float64) *glucose.Glucose { - return &glucose.Glucose{ - Blood: blood.Blood{ - Base: types.Base{ - UserID: pointer.FromAny(testFollowedUserID), - Time: pointer.FromTime(time.Now()), - UploadID: pointer.FromAny(testUploadID), - }, - Units: pointer.FromString(nontypesglucose.MmolL), - Value: pointer.FromFloat64(value), - }, +type mockPusher struct { + Pushes []string +} + +func newMockPusher() *mockPusher { + return &mockPusher{ + Pushes: []string{}, } } +func (p *mockPusher) Push(ctx context.Context, + deviceToken *devicetokens.DeviceToken, notification *push.Notification) error { + p.Pushes = append(p.Pushes, notification.Message) + return nil +} + type mockAlertsConfigClient struct { Error error Configs []*alerts.Config @@ -522,65 +467,93 @@ func (c *mockPermissionsClient) GetUserPermissions(ctx context.Context, requestU } } -func testAlertsConfigUrgentLow(userID string) *alerts.Config { - return &alerts.Config{ - UserID: userID, - FollowedUserID: testFollowedUserID, - UploadID: testUploadID, - Alerts: alerts.Alerts{ - UrgentLow: &alerts.UrgentLowAlert{ - Base: alerts.Base{ - Enabled: true, - Activity: alerts.Activity{}, - }, - Threshold: alerts.Threshold{ - Value: 10.0, - Units: nontypesglucose.MgdL, - }, - }, - }, +type mockRecorder struct { + recordCalls map[string]int + recordCallsMu sync.Mutex +} + +func newMockRecorder() *mockRecorder { + return &mockRecorder{ + recordCalls: map[string]int{}, } } -func testAlertsConfigLow(userID string) *alerts.Config { - return &alerts.Config{ - UserID: userID, - FollowedUserID: testFollowedUserID, - UploadID: testUploadID, - Alerts: alerts.Alerts{ - Low: &alerts.LowAlert{ - Base: alerts.Base{ - Enabled: true, - Activity: alerts.Activity{}, - }, - Threshold: alerts.Threshold{ - Value: 10.0, - Units: nontypesglucose.MgdL, - }, - }, - }, +func (r *mockRecorder) RecordReceivedDeviceData(ctx context.Context, + lastComm alerts.LastCommunication) error { + + r.recordCallsMu.Lock() + defer r.recordCallsMu.Unlock() + r.recordCalls[lastComm.UserID]++ + return nil +} + +func (r *mockRecorder) NumCallsFor(userID string) int { + r.recordCallsMu.Lock() + defer r.recordCallsMu.Unlock() + return r.recordCalls[userID] +} + +type mockRecorderRepository struct { + recordCalls map[string]int + recordCallsMu sync.Mutex +} + +func newMockRecorderRepository() *mockRecorderRepository { + return &mockRecorderRepository{ + recordCalls: map[string]int{}, } } +func (r *mockRecorderRepository) RecordReceivedDeviceData(ctx context.Context, + lastComm alerts.LastCommunication) error { + + r.recordCallsMu.Lock() + defer r.recordCallsMu.Unlock() + r.recordCalls[lastComm.UserID]++ + return nil +} + +func (r *mockRecorderRepository) UsersWithoutCommunication(ctx context.Context) ( + []alerts.LastCommunication, error) { + + return nil, nil +} + +func (r *mockRecorderRepository) NumCallsFor(userID string) int { + r.recordCallsMu.Lock() + defer r.recordCallsMu.Unlock() + return r.recordCalls[userID] +} + +func (r *mockRecorderRepository) EnsureIndexes() error { return nil } + type mockDeviceTokens struct { - Error error - Tokens [][]*devicetokens.DeviceToken + Tokens map[string][]*devicetokens.DeviceToken } func newMockDeviceTokens() *mockDeviceTokens { return &mockDeviceTokens{ - Tokens: [][]*devicetokens.DeviceToken{}, + Tokens: map[string][]*devicetokens.DeviceToken{}, } } func (t *mockDeviceTokens) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { - if t.Error != nil { - return nil, t.Error - } - if len(t.Tokens) > 0 { - ret := t.Tokens[0] - t.Tokens = t.Tokens[1:] - return ret, nil + if tokens, found := t.Tokens[userID]; found { + return tokens, nil } return nil, nil } + +func newTestStaticDatumMmolL(value float64) *glucose.Glucose { + return &glucose.Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + UserID: pointer.FromAny(testFollowedUserID), + Time: pointer.FromTime(time.Now()), + UploadID: pointer.FromAny(testDataSetID), + }, + Units: pointer.FromString(nontypesglucose.MmolL), + Value: pointer.FromFloat64(value), + }, + } +} diff --git a/data/events/events.go b/data/events/events.go index da9c4f4597..54c73b2bc7 100644 --- a/data/events/events.go +++ b/data/events/events.go @@ -10,10 +10,10 @@ import ( "time" "github.com/IBM/sarama" - "github.com/tidepool-org/go-common/asyncevents" ev "github.com/tidepool-org/go-common/events" + "github.com/tidepool-org/platform/alerts" dataSourceStoreStructured "github.com/tidepool-org/platform/data/source/store/structured" dataStore "github.com/tidepool-org/platform/data/store" summaryStore "github.com/tidepool-org/platform/data/summary/store" @@ -544,3 +544,12 @@ func (c *CascadingConsumer) updateCascadeHeaders(headers []sarama.RecordHeader) return keep } + +type EventsRecorder interface { + // RecordReceivedDeviceData to support sending care partner alerts. + // + // Metadata about when we last received data for any given user is + // used to determine if alerts should be sent to the care partners + // of a given user. + RecordReceivedDeviceData(context.Context, alerts.LastCommunication) error +} diff --git a/data/events/recorder.go b/data/events/recorder.go new file mode 100644 index 0000000000..4bcee29d48 --- /dev/null +++ b/data/events/recorder.go @@ -0,0 +1,41 @@ +package events + +import ( + "context" + + "github.com/tidepool-org/platform/alerts" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/log" + lognull "github.com/tidepool-org/platform/log/null" +) + +type Recorder struct { + Repo alerts.RecordsRepository +} + +func NewRecorder(repo alerts.RecordsRepository) *Recorder { + return &Recorder{ + Repo: repo, + } +} + +func (r *Recorder) RecordReceivedDeviceData(ctx context.Context, + lastComm alerts.LastCommunication) error { + + logger := r.log(ctx).WithFields(log.Fields{ + "userID": lastComm.UserID, + "dataSetID": lastComm.DataSetID, + }) + logger.Info("recording received data") + if err := r.Repo.RecordReceivedDeviceData(ctx, lastComm); err != nil { + return errors.Wrap(err, "Unable to record metadata on reception of device data") + } + return nil +} + +func (r *Recorder) log(ctx context.Context) log.Logger { + if ctxLogger := log.LoggerFromContext(ctx); ctxLogger != nil { + return ctxLogger + } + return lognull.NewLogger() +} diff --git a/data/service/api/v1/alerts.go b/data/service/api/v1/alerts.go index a0aa2a354e..1326e983d2 100644 --- a/data/service/api/v1/alerts.go +++ b/data/service/api/v1/alerts.go @@ -25,6 +25,7 @@ func AlertsRoutes() []service.Route { service.Post("/v1/users/:userId/followers/:followerUserId/alerts", UpsertAlert, api.RequireAuth), service.Delete("/v1/users/:userId/followers/:followerUserId/alerts", DeleteAlert, api.RequireAuth), service.Get("/v1/users/:userId/followers/alerts", ListAlerts, api.RequireServer), + service.Get("/v1/users/without_communication", GetUsersWithoutCommunication, api.RequireServer), } } @@ -115,13 +116,8 @@ func UpsertAlert(dCtx service.Context) { return } - incomingCfg := &alerts.Config{} - var bodyReceiver interface{} = &incomingCfg.Alerts - if authDetails.IsService() && authDetails.UserID() == "" { - // Accept upload id only from services. - bodyReceiver = incomingCfg - } - if err := request.DecodeRequestBody(r.Request, bodyReceiver); err != nil { + cfg := &alerts.Config{} + if err := request.DecodeRequestBody(r.Request, cfg); err != nil { dCtx.RespondWithError(platform.ErrorJSONMalformed()) return } @@ -132,12 +128,6 @@ func UpsertAlert(dCtx service.Context) { return } - cfg := &alerts.Config{ - UserID: path.UserID, - FollowedUserID: path.FollowedUserID, - UploadID: incomingCfg.UploadID, - Alerts: incomingCfg.Alerts, - } if err := repo.Upsert(ctx, cfg); err != nil { dCtx.RespondWithError(platform.ErrorInternalServerFailure()) lgr.WithError(err).Error("upserting alerts config") @@ -181,6 +171,30 @@ func ListAlerts(dCtx service.Context) { responder.Data(http.StatusOK, alerts) } +func GetUsersWithoutCommunication(dCtx service.Context) { + r := dCtx.Request() + ctx := r.Context() + + authDetails := request.GetAuthDetails(ctx) + lgr := log.LoggerFromContext(ctx) + if err := checkAuthentication(authDetails); err != nil { + lgr.Debug("authentication failed") + dCtx.RespondWithError(platform.ErrorUnauthorized()) + return + } + lastComms, err := dCtx.RecordsRepository().UsersWithoutCommunication(ctx) + if err != nil { + lgr.WithError(err).Debug("unable to list users without communication") + dCtx.RespondWithError(platform.ErrorInternalServerFailure()) + return + } + + lgr.WithField("found", len(lastComms)).WithField("lastComms", lastComms).Debug("/v1/users/without_communication") + + responder := request.MustNewResponder(dCtx.Response(), r) + responder.Data(http.StatusOK, lastComms) +} + // checkUserIDConsistency verifies the userIDs in a request. // // For safety reasons, if these values don't agree, return an error. @@ -197,7 +211,7 @@ func checkUserIDConsistency(details request.AuthDetails, userIDFromPath string) // checkAuthentication ensures that the request has an authentication token. func checkAuthentication(details request.AuthDetails) error { - if details.Token() == "" { + if details.HasToken() && details.Token() == "" { return platformerrors.New("unauthorized") } if details.IsUser() { diff --git a/data/service/api/v1/alerts_test.go b/data/service/api/v1/alerts_test.go index d48be38a6f..cdf829208b 100644 --- a/data/service/api/v1/alerts_test.go +++ b/data/service/api/v1/alerts_test.go @@ -3,7 +3,10 @@ package v1 import ( "bytes" "context" + "encoding/json" + "fmt" "net/http" + "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -17,10 +20,15 @@ import ( "github.com/tidepool-org/platform/service/test" ) +var testUserID = mocks.TestUserID1 +var testFollowedUserID = mocks.TestUserID2 + +const testDataSetID = "upid_000000000000" + func permsNoFollow() map[string]map[string]permission.Permissions { return map[string]map[string]permission.Permissions{ mocks.TestUserID1: { - mocks.TestUserID2: { + testFollowedUserID: { permission.Read: map[string]interface{}{}, }, }, @@ -32,11 +40,11 @@ var _ = Describe("Alerts endpoints", func() { testAuthenticationRequired := func(f func(dataservice.Context)) { t := GinkgoT() body := bytes.NewBuffer(mocks.MustMarshalJSON(t, alerts.Config{ - UserID: mocks.TestUserID1, - FollowedUserID: mocks.TestUserID2, + UserID: testUserID, + FollowedUserID: testFollowedUserID, })) dCtx := mocks.NewContext(t, "", "", body) - dCtx.MockAlertsRepository = newMockRepo() + dCtx.MockAlertsRepository = newMockAlertsRepo() badDetails := test.NewMockAuthDetails(request.MethodSessionToken, "", "") dCtx.WithAuthDetails(badDetails) @@ -49,11 +57,12 @@ var _ = Describe("Alerts endpoints", func() { testUserHasFollowPermission := func(f func(dataservice.Context)) { t := GinkgoT() body := bytes.NewBuffer(mocks.MustMarshalJSON(t, alerts.Config{ - UserID: mocks.TestUserID1, - FollowedUserID: mocks.TestUserID2, + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, })) dCtx := mocks.NewContext(t, "", "", body) - dCtx.MockAlertsRepository = newMockRepo() + dCtx.MockAlertsRepository = newMockAlertsRepo() dCtx.MockPermissionClient = mocks.NewPermission(permsNoFollow(), nil, nil) f(dCtx) @@ -69,7 +78,7 @@ var _ = Describe("Alerts endpoints", func() { dCtx.WithAuthDetails(details) } dCtx.RESTRequest.PathParams["followerUserId"] = "bad" - repo := newMockRepo() + repo := newMockAlertsRepo() dCtx.MockAlertsRepository = repo f(dCtx) @@ -82,7 +91,7 @@ var _ = Describe("Alerts endpoints", func() { t := GinkgoT() body := bytes.NewBuffer([]byte(`"improper JSON data"`)) dCtx := mocks.NewContext(t, "", "", body) - repo := newMockRepo() + repo := newMockAlertsRepo() dCtx.MockAlertsRepository = repo f(dCtx) @@ -103,6 +112,24 @@ var _ = Describe("Alerts endpoints", func() { It("rejects users without alerting permissions", func() { testUserHasFollowPermission(DeleteAlert) }) + + It("succeeds", func() { + t := GinkgoT() + repo := newMockAlertsRepo() + repo.AlertsForUserID[testFollowedUserID] = []*alerts.Config{ + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + }, + } + dCtx := mocks.NewContext(t, "", "", nil) + dCtx.MockAlertsRepository = repo + rec := dCtx.Recorder() + + DeleteAlert(dCtx) + + Expect(rec.Code).To(Equal(http.StatusOK)) + }) }) Describe("Upsert", func() { @@ -121,8 +148,65 @@ var _ = Describe("Alerts endpoints", func() { It("rejects users without alerting permissions", func() { testUserHasFollowPermission(UpsertAlert) }) + + It("succeeds", func() { + t := GinkgoT() + repo := newMockAlertsRepo() + testCfg, _ := json.Marshal(testConfig()) + dCtx := mocks.NewContext(t, "", "", bytes.NewBuffer(testCfg)) + dCtx.MockAlertsRepository = repo + rec := dCtx.Recorder() + + UpsertAlert(dCtx) + + Expect(rec.Code).To(Equal(http.StatusOK)) + }) }) + Describe("ListAlerts", func() { + It("rejects unauthenticated users", func() { + testAuthenticationRequired(ListAlerts) + }) + + It("requires that the user's token matches the userID path param", func() { + testTokenUserIDMustMatchPathParam(ListAlerts, nil) + }) + + It("errors when no Config exists", func() { + t := GinkgoT() + repo := newMockAlertsRepo() + dCtx := mocks.NewContext(t, "", "", nil) + dCtx.MockAlertsRepository = repo + dCtx.WithAuthDetails(mocks.ServiceAuthDetails()) + rec := dCtx.Recorder() + + ListAlerts(dCtx) + + Expect(rec.Code).To(Equal(http.StatusNotFound)) + }) + + It("succeeds", func() { + t := GinkgoT() + repo := newMockAlertsRepo() + dCtx := mocks.NewContext(t, "", "", nil) + dCtx.MockAlertsRepository = repo + dCtx.WithAuthDetails(mocks.ServiceAuthDetails()) + rec := dCtx.Recorder() + repo.AlertsForUserID[testFollowedUserID] = []*alerts.Config{ + {FollowedUserID: "foo", UserID: "bar"}, + } + + ListAlerts(dCtx) + + Expect(rec.Code).To(Equal(http.StatusOK), rec.Body.String()) + got := []*alerts.Config{} + Expect(json.NewDecoder(rec.Body).Decode(&got)).To(Succeed()) + if Expect(len(got)).To(Equal(1)) { + Expect(got[0].UserID).To(Equal("bar")) + Expect(got[0].FollowedUserID).To(Equal("foo")) + } + }) + }) Describe("Get", func() { It("rejects unauthenticated users", func() { testAuthenticationRequired(GetAlert) @@ -132,14 +216,14 @@ var _ = Describe("Alerts endpoints", func() { testTokenUserIDMustMatchPathParam(GetAlert, nil) }) - It("errors when Config doesn't exist", func() { + It("errors when no Config exists", func() { t := GinkgoT() body := bytes.NewBuffer(mocks.MustMarshalJSON(t, alerts.Config{ - UserID: mocks.TestUserID1, - FollowedUserID: mocks.TestUserID2, + UserID: testUserID, + FollowedUserID: testFollowedUserID, })) dCtx := mocks.NewContext(t, "", "", body) - repo := newMockRepo() + repo := newMockAlertsRepo() repo.ReturnsError(mongo.ErrNoDocuments) dCtx.MockAlertsRepository = repo @@ -151,23 +235,106 @@ var _ = Describe("Alerts endpoints", func() { It("rejects users without alerting permissions", func() { testUserHasFollowPermission(func(dCtx dataservice.Context) { - dCtx.Request().PathParams["userId"] = mocks.TestUserID2 + dCtx.Request().PathParams["userId"] = testFollowedUserID GetAlert(dCtx) }) }) + + It("succeeds", func() { + t := GinkgoT() + url := fmt.Sprintf("/v1/users/%s/followers/%s/alerts", testFollowedUserID, testUserID) + dCtx := mocks.NewContext(t, "GET", url, nil) + repo := newMockAlertsRepo() + repo.GetAlertsResponses[testUserID+testFollowedUserID] = &alerts.Config{ + FollowedUserID: "foo", + UserID: "bar", + } + dCtx.MockAlertsRepository = repo + + GetAlert(dCtx) + + rec := dCtx.Recorder() + Expect(rec.Code).To(Equal(http.StatusOK)) + got := &alerts.Config{} + Expect(json.NewDecoder(rec.Body).Decode(got)).To(Succeed()) + Expect(got.UserID).To(Equal("bar")) + Expect(got.FollowedUserID).To(Equal("foo")) + }) + }) + + Describe("GetUsersWithoutCommunication", func() { + It("rejects unauthenticated users", func() { + testAuthenticationRequired(GetUsersWithoutCommunication) + }) + + It("succeeds, even when there are no users found", func() { + t := GinkgoT() + dCtx := mocks.NewContext(t, "", "", nil) + alertsRepo := newMockAlertsRepo() + dCtx.MockAlertsRepository = alertsRepo + dCtx.MockRecordsRepository = newMockRecordsRepo() + GetUsersWithoutCommunication(dCtx) + + rec := dCtx.Recorder() + Expect(rec.Code).To(Equal(http.StatusOK)) + }) + + It("errors when the upstream repo errors", func() { + t := GinkgoT() + dCtx := mocks.NewContext(t, "", "", nil) + alertsRepo := newMockAlertsRepo() + dCtx.MockAlertsRepository = alertsRepo + recordsRepo := newMockRecordsRepo() + recordsRepo.UsersWithoutCommunicationError = fmt.Errorf("test error") + dCtx.MockRecordsRepository = recordsRepo + + GetUsersWithoutCommunication(dCtx) + + rec := dCtx.Recorder() + Expect(rec.Code).To(Equal(http.StatusInternalServerError)) + }) + + It("succeeds, even when there are no users found", func() { + t := GinkgoT() + dCtx := mocks.NewContext(t, "", "", nil) + alertsRepo := newMockAlertsRepo() + dCtx.MockAlertsRepository = alertsRepo + recordsRepo := newMockRecordsRepo() + testTime := time.Unix(123, 456) + recordsRepo.UsersWithoutCommunicationResponses = [][]alerts.LastCommunication{ + { + { + LastReceivedDeviceData: testTime, + }, + }, + } + dCtx.MockRecordsRepository = recordsRepo + + GetUsersWithoutCommunication(dCtx) + + rec := dCtx.Recorder() + Expect(rec.Code).To(Equal(http.StatusOK)) + got := []alerts.LastCommunication{} + Expect(json.NewDecoder(rec.Body).Decode(&got)).To(Succeed()) + if Expect(len(got)).To(Equal(1)) { + Expect(got[0].LastReceivedDeviceData).To(BeTemporally("==", testTime)) + } + }) }) }) type mockRepo struct { - UserID string - Error error - AlertsForUserID map[string][]*alerts.Config + UserID string + Error error + AlertsForUserID map[string][]*alerts.Config + GetAlertsResponses map[string]*alerts.Config } -func newMockRepo() *mockRepo { +func newMockAlertsRepo() *mockRepo { return &mockRepo{ - AlertsForUserID: make(map[string][]*alerts.Config), + AlertsForUserID: map[string][]*alerts.Config{}, + GetAlertsResponses: map[string]*alerts.Config{}, } } @@ -192,6 +359,9 @@ func (r *mockRepo) Get(ctx context.Context, conf *alerts.Config) (*alerts.Config if conf != nil { r.UserID = conf.UserID } + if resp, found := r.GetAlertsResponses[conf.UserID+conf.FollowedUserID]; found { + return resp, nil + } return &alerts.Config{}, nil } @@ -220,3 +390,47 @@ func (r *mockRepo) List(ctx context.Context, userID string) ([]*alerts.Config, e func (r *mockRepo) EnsureIndexes() error { return nil } + +type mockRecordsRepo struct { + UsersWithoutCommunicationResponses [][]alerts.LastCommunication + UsersWithoutCommunicationError error +} + +func newMockRecordsRepo() *mockRecordsRepo { + return &mockRecordsRepo{ + UsersWithoutCommunicationResponses: [][]alerts.LastCommunication{}, + } +} + +func (r *mockRecordsRepo) RecordReceivedDeviceData(_ context.Context, + _ alerts.LastCommunication) error { + + return nil +} + +func (r *mockRecordsRepo) UsersWithoutCommunication(_ context.Context) ( + []alerts.LastCommunication, error) { + + if r.UsersWithoutCommunicationError != nil { + return nil, r.UsersWithoutCommunicationError + } + + if len(r.UsersWithoutCommunicationResponses) > 0 { + ret := r.UsersWithoutCommunicationResponses[0] + r.UsersWithoutCommunicationResponses = r.UsersWithoutCommunicationResponses[1:] + return ret, nil + } + return nil, nil +} + +func (r *mockRecordsRepo) EnsureIndexes() error { + return nil +} + +func testConfig() *alerts.Config { + return &alerts.Config{ + UserID: testUserID, + FollowedUserID: testFollowedUserID, + UploadID: testDataSetID, + } +} diff --git a/data/service/api/v1/mocks/context.go b/data/service/api/v1/mocks/context.go index 86c804b906..8b7741adc2 100644 --- a/data/service/api/v1/mocks/context.go +++ b/data/service/api/v1/mocks/context.go @@ -10,6 +10,8 @@ import ( "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/data/service/context" + log "github.com/tidepool-org/platform/log" + logtest "github.com/tidepool-org/platform/log/test" "github.com/tidepool-org/platform/permission" "github.com/tidepool-org/platform/request" servicecontext "github.com/tidepool-org/platform/service/context" @@ -22,17 +24,20 @@ type Context struct { T likeT // authDetails should be updated via the WithAuthDetails method. - authDetails *test.MockAuthDetails - RESTRequest *rest.Request - ResponseWriter rest.ResponseWriter - recorder *httptest.ResponseRecorder - MockAlertsRepository alerts.Repository - MockPermissionClient permission.Client + authDetails *test.MockAuthDetails + RESTRequest *rest.Request + ResponseWriter rest.ResponseWriter + recorder *httptest.ResponseRecorder + MockAlertsRepository alerts.Repository + MockPermissionClient permission.Client + MockRecordsRepository alerts.RecordsRepository } func NewContext(t likeT, method, url string, body io.Reader) *Context { details := DefaultAuthDetails() ctx := request.NewContextWithAuthDetails(stdcontext.Background(), details) + lgr := logtest.NewLogger() + ctx = log.NewContextWithLogger(ctx, lgr) r, err := http.NewRequestWithContext(ctx, method, url, body) if err != nil { t.Fatalf("error creating request: %s", err) @@ -99,3 +104,7 @@ func (c *Context) AlertsRepository() alerts.Repository { func (c *Context) PermissionClient() permission.Client { return c.MockPermissionClient } + +func (c *Context) RecordsRepository() alerts.RecordsRepository { + return c.MockRecordsRepository +} diff --git a/data/service/api/v1/users_datasets_create_test.go b/data/service/api/v1/users_datasets_create_test.go index 26004f79fb..da16ef557e 100644 --- a/data/service/api/v1/users_datasets_create_test.go +++ b/data/service/api/v1/users_datasets_create_test.go @@ -5,6 +5,7 @@ import ( "net/http" "strings" + dataService "github.com/tidepool-org/platform/data/service" "github.com/tidepool-org/platform/data/summary/reporters" "github.com/tidepool-org/platform/clinics" @@ -84,6 +85,8 @@ type testingT interface { Fatalf(format string, args ...any) } +var _ dataService.Context = (*mockDataServiceContext)(nil) + type mockDataServiceContext struct { t testingT @@ -224,3 +227,7 @@ func (c *mockDataServiceContext) DataSourceClient() dataSource.Client { func (c *mockDataServiceContext) SummaryReporter() *reporters.PatientRealtimeDaysReporter { panic("not implemented") } + +func (c *mockDataServiceContext) RecordsRepository() alerts.RecordsRepository { + panic("not implemented") +} diff --git a/data/service/context.go b/data/service/context.go index 7ea41f3005..5c5e334c1a 100644 --- a/data/service/context.go +++ b/data/service/context.go @@ -29,6 +29,7 @@ type Context interface { SummaryRepository() dataStore.SummaryRepository SyncTaskRepository() syncTaskStore.SyncTaskRepository AlertsRepository() alerts.Repository + RecordsRepository() alerts.RecordsRepository SummarizerRegistry() *summary.SummarizerRegistry SummaryReporter() *reporters.PatientRealtimeDaysReporter diff --git a/data/service/context/standard.go b/data/service/context/standard.go index 79d7a6c95d..995a48df66 100644 --- a/data/service/context/standard.go +++ b/data/service/context/standard.go @@ -41,6 +41,7 @@ type Standard struct { clinicsClient clinics.Client dataSourceClient dataSource.Client alertsRepository alerts.Repository + recordsRepository alerts.RecordsRepository } func WithContext(authClient auth.Client, metricClient metric.Client, permissionClient permission.Client, @@ -129,6 +130,9 @@ func (s *Standard) Close() { if s.alertsRepository != nil { s.alertsRepository = nil } + if s.recordsRepository != nil { + s.recordsRepository = nil + } } func (s *Standard) AuthClient() auth.Client { @@ -208,3 +212,10 @@ func (s *Standard) AlertsRepository() alerts.Repository { } return s.alertsRepository } + +func (s *Standard) RecordsRepository() alerts.RecordsRepository { + if s.recordsRepository == nil { + s.recordsRepository = s.dataStore.NewRecorderRepository() + } + return s.recordsRepository +} diff --git a/data/service/service/standard.go b/data/service/service/standard.go index 2937e28662..4988459148 100644 --- a/data/service/service/standard.go +++ b/data/service/service/standard.go @@ -10,6 +10,7 @@ import ( eventsCommon "github.com/tidepool-org/go-common/events" + "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/application" "github.com/tidepool-org/platform/clinics" dataDeduplicatorDeduplicator "github.com/tidepool-org/platform/data/deduplicator/deduplicator" @@ -493,16 +494,21 @@ func (s *Standard) initializeAlertsEventsHandler() error { prefixedTopics = append(prefixedTopics, topicPrefix+topic) } - alerts := s.dataStore.NewAlertsRepository() - dataRepo := s.dataStore.NewDataRepository() + alertsRepo := s.dataStore.NewAlertsRepository() + dataRepo := s.dataStore.NewAlertsDataRepository() + recorderRepo := s.dataStore.NewRecorderRepository() + + alertsEvaluator := alerts.NewEvaluator(alertsRepo, dataRepo, s.permissionClient, s.Logger()) + ec := &dataEvents.Consumer{ - Alerts: alerts, + Alerts: alertsRepo, + Evaluator: alertsEvaluator, Data: dataRepo, DeviceTokens: s.AuthClient(), - Evaluator: dataEvents.NewAlertsEvaluator(alerts, dataRepo, s.permissionClient), + Logger: s.Logger(), Permissions: s.permissionClient, Pusher: s.pusher, - Logger: s.Logger(), + Recorder: dataEvents.NewRecorder(recorderRepo), } runnerCfg := dataEvents.SaramaRunnerConfig{ diff --git a/data/store/mongo/mongo.go b/data/store/mongo/mongo.go index 8ebfa97239..41c2316616 100644 --- a/data/store/mongo/mongo.go +++ b/data/store/mongo/mongo.go @@ -29,6 +29,7 @@ func (s *Store) EnsureIndexes() error { dataRepository := s.NewDataRepository() summaryRepository := s.NewSummaryRepository() alertsRepository := s.NewAlertsRepository() + recorderRepository := s.NewRecorderRepository() if err := dataRepository.EnsureIndexes(); err != nil { return err @@ -42,6 +43,10 @@ func (s *Store) EnsureIndexes() error { return err } + if err := recorderRepository.EnsureIndexes(); err != nil { + return err + } + return nil } @@ -66,3 +71,13 @@ func (s *Store) NewAlertsRepository() alerts.Repository { r := alertsRepo(*s.Store.GetRepository("alerts")) return &r } + +func (s *Store) NewRecorderRepository() alerts.RecordsRepository { + r := recorderRepo(*s.Store.GetRepository("records")) + return &r +} + +func (s *Store) NewAlertsDataRepository() alerts.DataRepository { + r := alertsDataRepo(*s.Store.GetRepository("deviceData")) + return &r +} diff --git a/data/store/mongo/mongo_alerts.go b/data/store/mongo/mongo_alerts.go index 489db755fe..ba37b52f2b 100644 --- a/data/store/mongo/mongo_alerts.go +++ b/data/store/mongo/mongo_alerts.go @@ -3,12 +3,16 @@ package mongo import ( "context" "fmt" + "time" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" "github.com/tidepool-org/platform/alerts" + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/data/types/blood/glucose/continuous" + "github.com/tidepool-org/platform/data/types/dosingdecision" "github.com/tidepool-org/platform/errors" structuredmongo "github.com/tidepool-org/platform/store/structured/mongo" ) @@ -17,9 +21,21 @@ import ( type alertsRepo structuredmongo.Repository // Upsert will create or update the given Config. +// +// Once set, UploadID, UserID, and FollowedUserID cannot be changed. This is to prevent a +// user from granting themselves access to another data set. func (r *alertsRepo) Upsert(ctx context.Context, conf *alerts.Config) error { opts := options.Update().SetUpsert(true) - _, err := r.UpdateOne(ctx, r.filter(conf), bson.M{"$set": conf}, opts) + filter := bson.D{ + {Key: "userId", Value: conf.UserID}, + {Key: "followedUserId", Value: conf.FollowedUserID}, + {Key: "uploadId", Value: conf.UploadID}, + } + doc := bson.M{ + "$set": conf.Alerts, + "$setOnInsert": filter, + } + _, err := r.UpdateOne(ctx, filter, doc, opts) if err != nil { return fmt.Errorf("upserting alerts.Config: %w", err) } @@ -85,8 +101,60 @@ func (r *alertsRepo) EnsureIndexes() error { } func (r *alertsRepo) filter(cfg *alerts.Config) interface{} { - return &alerts.Config{ - UserID: cfg.UserID, - FollowedUserID: cfg.FollowedUserID, + return bson.D{ + {Key: "userId", Value: cfg.UserID}, + {Key: "followedUserId", Value: cfg.FollowedUserID}, + } +} + +type alertsDataRepo structuredmongo.Repository + +func (d *alertsDataRepo) GetAlertableData(ctx context.Context, + params alerts.GetAlertableDataParams) (*alerts.GetAlertableDataResponse, error) { + + if params.End.IsZero() { + params.End = time.Now() + } + + cursor, err := d.getAlertableData(ctx, params, dosingdecision.Type) + if err != nil { + return nil, err + } + dosingDecisions := []*dosingdecision.DosingDecision{} + if err := cursor.All(ctx, &dosingDecisions); err != nil { + return nil, errors.Wrap(err, "Unable to load alertable dosing documents") + } + cursor, err = d.getAlertableData(ctx, params, continuous.Type) + if err != nil { + return nil, err + } + glucoseData := []*glucose.Glucose{} + if err := cursor.All(ctx, &glucoseData); err != nil { + return nil, errors.Wrap(err, "Unable to load alertable glucose documents") + } + response := &alerts.GetAlertableDataResponse{ + DosingDecisions: dosingDecisions, + Glucose: glucoseData, + } + + return response, nil +} + +func (d *alertsDataRepo) getAlertableData(ctx context.Context, + params alerts.GetAlertableDataParams, typ string) (*mongo.Cursor, error) { + + selector := bson.M{ + "_active": true, + "uploadId": params.UploadID, + "type": typ, + "_userId": params.UserID, + "time": bson.M{"$gte": params.Start, "$lte": params.End}, + } + findOptions := options.Find().SetSort(bson.D{{Key: "time", Value: -1}}) + cursor, err := d.Find(ctx, selector, findOptions) + if err != nil { + format := "Unable to find alertable %s data in dataset %s" + return nil, errors.Wrapf(err, format, typ, params.UploadID) } + return cursor, nil } diff --git a/data/store/mongo/mongo_data.go b/data/store/mongo/mongo_data.go index 4076a968af..c827999bc9 100644 --- a/data/store/mongo/mongo_data.go +++ b/data/store/mongo/mongo_data.go @@ -195,10 +195,6 @@ func (d *DataRepository) DestroyDataForUserByID(ctx context.Context, userID stri return nil } -func (d *DataRepository) mongoClient() *mongo.Client { - return d.DatumRepository.Database().Client() -} - func isTypeUpload(typ []string) bool { return slices.Contains(typ, strings.ToLower(upload.Type)) } diff --git a/data/store/mongo/mongo_datum.go b/data/store/mongo/mongo_datum.go index 977c6f813b..159c7ec93f 100644 --- a/data/store/mongo/mongo_datum.go +++ b/data/store/mongo/mongo_datum.go @@ -11,14 +11,9 @@ import ( "go.mongodb.org/mongo-driver/mongo/options" "github.com/tidepool-org/platform/data" - "github.com/tidepool-org/platform/data/store" "github.com/tidepool-org/platform/data/summary/types" baseDatum "github.com/tidepool-org/platform/data/types" - "github.com/tidepool-org/platform/data/types/blood/glucose" - "github.com/tidepool-org/platform/data/types/blood/glucose/continuous" - "github.com/tidepool-org/platform/data/types/dosingdecision" "github.com/tidepool-org/platform/data/types/upload" - platerrors "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" storeStructuredMongo "github.com/tidepool-org/platform/store/structured/mongo" structureValidator "github.com/tidepool-org/platform/structure/validator" @@ -646,56 +641,6 @@ func (d *DatumRepository) GetDataRange(ctx context.Context, userId string, typ [ return cursor, nil } -func (d *DatumRepository) GetAlertableData(ctx context.Context, - params store.AlertableParams) (*store.AlertableResponse, error) { - - if params.End.IsZero() { - params.End = time.Now() - } - - cursor, err := d.getAlertableData(ctx, params, dosingdecision.Type) - if err != nil { - return nil, err - } - dosingDecisions := []*dosingdecision.DosingDecision{} - if err := cursor.All(ctx, &dosingDecisions); err != nil { - return nil, platerrors.Wrap(err, "Unable to load alertable dosing documents") - } - cursor, err = d.getAlertableData(ctx, params, continuous.Type) - if err != nil { - return nil, err - } - glucoseData := []*glucose.Glucose{} - if err := cursor.All(ctx, &glucoseData); err != nil { - return nil, platerrors.Wrap(err, "Unable to load alertable glucose documents") - } - response := &store.AlertableResponse{ - DosingDecisions: dosingDecisions, - Glucose: glucoseData, - } - - return response, nil -} - -func (d *DatumRepository) getAlertableData(ctx context.Context, - params store.AlertableParams, typ string) (*mongo.Cursor, error) { - - selector := bson.M{ - "_active": true, - "uploadId": params.UploadID, - "type": typ, - "_userId": params.UserID, - "time": bson.M{"$gte": params.Start, "$lte": params.End}, - } - findOptions := options.Find().SetSort(bson.D{{Key: "time", Value: -1}}) - cursor, err := d.Find(ctx, selector, findOptions) - if err != nil { - format := "Unable to find alertable %s data in dataset %s" - return nil, platerrors.Wrapf(err, format, typ, params.UploadID) - } - return cursor, nil -} - func (d *DatumRepository) getTimeRange(ctx context.Context, userId string, typ []string, status *data.UserDataStatus) (err error) { timestamp := time.Now().UTC() futureCutoff := timestamp.AddDate(0, 0, 1) diff --git a/data/store/mongo/mongo_recorder.go b/data/store/mongo/mongo_recorder.go new file mode 100644 index 0000000000..fa3000f8bd --- /dev/null +++ b/data/store/mongo/mongo_recorder.go @@ -0,0 +1,74 @@ +package mongo + +import ( + "context" + "fmt" + "time" + + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + + "github.com/tidepool-org/platform/alerts" + "github.com/tidepool-org/platform/errors" + structuredmongo "github.com/tidepool-org/platform/store/structured/mongo" +) + +// recorderRepo implements RecorderRepository, writing data to a MongoDB collection. +type recorderRepo structuredmongo.Repository + +func (r *recorderRepo) RecordReceivedDeviceData(ctx context.Context, + lastComm alerts.LastCommunication) error { + + opts := options.Update().SetUpsert(true) + _, err := r.UpdateOne(ctx, r.filter(lastComm), bson.M{"$set": lastComm}, opts) + if err != nil { + return fmt.Errorf("upserting alerts.LastCommunication: %w", err) + } + return nil +} + +func (r *recorderRepo) EnsureIndexes() error { + repo := structuredmongo.Repository(*r) + return (&repo).CreateAllIndexes(context.Background(), []mongo.IndexModel{ + { + Keys: bson.D{ + {Key: "lastReceivedDeviceData", Value: 1}, + }, + Options: options.Index(). + SetName("LastReceivedDeviceData"), + }, + { + Keys: bson.D{ + {Key: "dataSetId", Value: 1}, + }, + Options: options.Index(). + SetUnique(true). + SetName("DataSetIdUnique"), + }, + }) +} + +func (r *recorderRepo) filter(lastComm alerts.LastCommunication) map[string]any { + return map[string]any{ + "userId": lastComm.UserID, + "dataSetId": lastComm.DataSetID, + } +} + +func (d *recorderRepo) UsersWithoutCommunication(ctx context.Context) ([]alerts.LastCommunication, error) { + start := time.Now().Add(-5 * time.Minute) + selector := bson.M{ + "lastReceivedDeviceData": bson.M{"$lte": start}, + } + findOptions := options.Find().SetSort(bson.D{{Key: "lastReceivedDeviceData", Value: 1}}) + cursor, err := d.Find(ctx, selector, findOptions) + if err != nil { + return nil, errors.Wrapf(err, "Unable to list users without communication") + } + records := []alerts.LastCommunication{} + if err := cursor.All(ctx, &records); err != nil { + return nil, errors.Wrapf(err, "Unable to iterate users without communication cursor") + } + return records, nil +} diff --git a/data/store/mongo/mongo_test.go b/data/store/mongo/mongo_test.go index 7b0a0fd2a4..a241cf3c0b 100644 --- a/data/store/mongo/mongo_test.go +++ b/data/store/mongo/mongo_test.go @@ -2,6 +2,7 @@ package mongo_test import ( "context" + "encoding/json" "fmt" "math/rand" "sync" @@ -17,6 +18,7 @@ import ( "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/data" + "github.com/tidepool-org/platform/data/service/api/v1/mocks" dataStore "github.com/tidepool-org/platform/data/store" dataStoreMongo "github.com/tidepool-org/platform/data/store/mongo" dataTest "github.com/tidepool-org/platform/data/test" @@ -238,8 +240,10 @@ func DataSetDatumAsInterface(dataSetDatum data.Datum) interface{} { var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { var repository dataStore.DataRepository + var alertsDataRepository alerts.DataRepository var summaryRepository dataStore.SummaryRepository var alertsRepository alerts.Repository + var recordsRepository alerts.RecordsRepository var logger = logTest.NewLogger() var store *dataStoreMongo.Store @@ -266,6 +270,7 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { var dataSetCollection *mongo.Collection var summaryCollection *mongo.Collection var alertsCollection *mongo.Collection + var recordsCollection *mongo.Collection var collectionsOnce sync.Once BeforeEach(func() { @@ -274,6 +279,7 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { dataSetCollection = store.GetCollection("deviceDataSets") summaryCollection = store.GetCollection("summary") alertsCollection = store.GetCollection("alerts") + recordsCollection = store.GetCollection("records") }) }) @@ -289,6 +295,8 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { Expect(err).To(Succeed()) _, err = alertsCollection.DeleteMany(ctx, all) Expect(err).To(Succeed()) + _, err = recordsCollection.DeleteMany(ctx, all) + Expect(err).To(Succeed()) }) Context("EnsureIndexes", func() { @@ -445,14 +453,24 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { }) }) + Context("NewRecordsRepository", func() { + It("returns a new repository", func() { + recordsRepository = store.NewRecorderRepository() + Expect(recordsRepository).ToNot(BeNil()) + }) + }) + Context("with a new repository", func() { BeforeEach(func() { repository = store.NewDataRepository() summaryRepository = store.NewSummaryRepository() alertsRepository = store.NewAlertsRepository() + alertsDataRepository = store.NewAlertsDataRepository() + recordsRepository = store.NewRecorderRepository() Expect(repository).ToNot(BeNil()) Expect(summaryRepository).ToNot(BeNil()) Expect(alertsRepository).ToNot(BeNil()) + Expect(alertsDataRepository).ToNot(BeNil()) }) Context("with persisted data sets", func() { @@ -2409,13 +2427,15 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { Expect(repository.CreateDataSet(ctx, testSet)).To(Succeed()) testSetData := testDataSetData(testSet) Expect(repository.CreateDataSetData(ctx, testSet, testSetData)).To(Succeed()) + alertsDataRepository = store.NewAlertsDataRepository() + Expect(alertsDataRepository).ToNot(BeNil()) - params := dataStore.AlertableParams{ + params := alerts.GetAlertableDataParams{ Start: time.Now().Add(-time.Hour), UserID: testUserID, UploadID: *testSet.UploadID, } - resp, err := repository.GetAlertableData(ctx, params) + resp, err := alertsDataRepository.GetAlertableData(ctx, params) Expect(err).To(Succeed()) Expect(resp).ToNot(BeNil()) @@ -2427,6 +2447,11 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { Context("alerts", func() { BeforeEach(func() { + var err error + ctx := context.Background() + all := bson.D{} + _, err = alertsCollection.DeleteMany(ctx, all) + Expect(err).To(Succeed()) alertsRepository = store.NewAlertsRepository() Expect(alertsRepository).ToNot(BeNil()) }) @@ -2471,10 +2496,69 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { res := store.GetCollection("alerts").FindOne(ctx, filter) Expect(res.Err()).To(Succeed()) Expect(res.Decode(doc)).To(Succeed()) - Expect(doc.Low).ToNot(BeNil()) + jsonOut, _ := json.Marshal(doc) + Expect(doc.Low).ToNot(BeNil(), string(jsonOut)) Expect(doc.Low.Base.Enabled).To(Equal(true)) }) + It("sets userId, followedUserId, and uploadId only on creation", func() { + ctx, cfg, filter := prep(false) + cfg.UploadID = "something" + + Expect(alertsRepository.Upsert(ctx, cfg)).To(Succeed()) + doc := &alerts.Config{} + res := store.GetCollection("alerts").FindOne(ctx, filter) + Expect(res.Err()).To(Succeed()) + Expect(res.Decode(doc)).To(Succeed()) + Expect(doc.UserID).To(Equal("user-id")) + Expect(doc.FollowedUserID).To(Equal("followed-user-id")) + Expect(doc.UploadID).To(Equal("something")) + + testDelay := 42 * time.Minute + doc.Alerts.Low = &alerts.LowAlert{} + doc.Alerts.Low.Delay = alerts.DurationMinutes(testDelay) + doc.UploadID = "something else" + doc.UserID = "new junk" + doc.FollowedUserID = "this shouldn't be" + + Expect(alertsRepository.Upsert(ctx, cfg)).To(Succeed()) + res = store.GetCollection("alerts").FindOne(ctx, filter) + Expect(res.Err()).To(Succeed()) + Expect(res.Decode(doc)).To(Succeed()) + Expect(doc.UploadID).To(Equal("something")) + Expect(doc.FollowedUserID).To(Equal("followed-user-id")) + Expect(doc.UserID).To(Equal("user-id")) + Expect(doc.Low.Delay.Duration()).To(Equal(testDelay)) + }) + + It("updates the Config's Activity", func() { + ctx, cfg, filter := prep(true) + testTriggered := time.Now().Add(-5 * time.Minute) + testSent := time.Now().Add(-3 * time.Minute) + cfg.Low = &alerts.LowAlert{ + Base: alerts.Base{ + Enabled: true, + Activity: alerts.Activity{ + Triggered: testTriggered, + Sent: testSent, + // Resolved is unset, so it should be a zero value. + }, + }, + } + + err := alertsRepository.Upsert(ctx, cfg) + Expect(err).To(Succeed()) + + doc := &alerts.Config{} + res := store.GetCollection("alerts").FindOne(ctx, filter) + Expect(res.Err()).To(Succeed()) + Expect(res.Decode(doc)).To(Succeed()) + Expect(doc.Low).ToNot(BeNil()) + Expect(doc.Low.Base.Enabled).To(Equal(true)) + Expect(doc.Low.Triggered).To(BeTemporally("~", testTriggered, time.Millisecond)) + Expect(doc.Low.Sent).To(BeTemporally("~", testSent, time.Millisecond)) + Expect(doc.Low.Resolved).To(Equal(time.Time{})) + }) }) Describe("Get", func() { @@ -2493,10 +2577,13 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { UserID: "879d5cb2-f70d-4b05-8d38-fb6d88ef2ea9", FollowedUserID: "d2ee01db-3458-42ac-95d2-ac2fc571a21d", Alerts: alerts.Alerts{ - High: &alerts.HighAlert{ - Base: alerts.Base{Enabled: true}, + DataAlerts: alerts.DataAlerts{ + High: &alerts.HighAlert{ + Base: alerts.Base{Enabled: true}, + }, }, - }} + }, + } Expect(alertsRepository.Upsert(ctx, other)).To(Succeed()) cfg.Low = &alerts.LowAlert{Base: alerts.Base{Enabled: true}} err := alertsRepository.Upsert(ctx, cfg) @@ -2523,6 +2610,71 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { Expect(res.Err()).To(MatchError(mongo.ErrNoDocuments)) }) }) + + Describe("List", func() { + It("lists only matching configs", func() { + ctx, cfg, _ := prep(true) + cfg2 := &alerts.Config{ + FollowedUserID: "followed-user-id-2", + UserID: "user-id", + } + Expect(alertsRepository.Upsert(ctx, cfg2)).To(Succeed()) + cfg3 := &alerts.Config{ + FollowedUserID: "followed-user-id", + UserID: "user-id-2", + } + Expect(alertsRepository.Upsert(ctx, cfg3)).To(Succeed()) + + got, err := alertsRepository.List(ctx, cfg.FollowedUserID) + Expect(err).To(Succeed()) + Expect(len(got)).To(Equal(2)) + }) + }) + }) + + Context("recorder", func() { + + BeforeEach(func() { + recordsRepository = store.NewRecorderRepository() + Expect(recordsRepository).ToNot(BeNil()) + }) + + Describe("UsersWithoutCommunication", func() { + It("retrieves matching records", func() { + ctx := context.Background() + got, err := recordsRepository.UsersWithoutCommunication(ctx) + Expect(err).To(Succeed()) + Expect(len(got)).To(Equal(0)) + }) + + It("retrieves matching records2", func() { + ctx := context.Background() + testLastComm := alerts.LastCommunication{ + UserID: testUserID, + DataSetID: testDataSetID, + LastReceivedDeviceData: time.Unix(123, 456), + } + Expect(recordsRepository.RecordReceivedDeviceData(ctx, testLastComm)).To(Succeed()) + testLastComm2 := alerts.LastCommunication{ + UserID: testUserID + "2", + DataSetID: testDataSetID + "2", + LastReceivedDeviceData: time.Now(), + } + Expect(recordsRepository.RecordReceivedDeviceData(ctx, testLastComm2)).To(Succeed()) + + got, err := recordsRepository.UsersWithoutCommunication(ctx) + Expect(err).To(Succeed()) + Expect(len(got)).To(Equal(1)) + }) + + It("is true", func() { + Expect(true).To(BeTrue()) + }) + }) }) }) }) + +var testUserID = mocks.TestUserID1 + +const testDataSetID = "blah" diff --git a/data/store/store.go b/data/store/store.go index 7410d76c88..0c33141f1c 100644 --- a/data/store/store.go +++ b/data/store/store.go @@ -7,9 +7,6 @@ import ( "go.mongodb.org/mongo-driver/mongo" "github.com/tidepool-org/platform/alerts" - "github.com/tidepool-org/platform/data/types/blood/glucose" - "github.com/tidepool-org/platform/data/types/dosingdecision" - "github.com/tidepool-org/platform/data" "github.com/tidepool-org/platform/data/types/upload" "github.com/tidepool-org/platform/page" @@ -23,6 +20,7 @@ type Store interface { NewDataRepository() DataRepository NewSummaryRepository() SummaryRepository NewAlertsRepository() alerts.Repository + NewRecorderRepository() alerts.RecordsRepository } // DataSetRepository is the interface for interacting and modifying @@ -65,9 +63,6 @@ type DatumRepository interface { GetDataRange(ctx context.Context, userId string, typ []string, status *data.UserDataStatus) (*mongo.Cursor, error) GetLastUpdatedForUser(ctx context.Context, userId string, typ []string, lastUpdated time.Time) (*data.UserDataStatus, error) DistinctUserIDs(ctx context.Context, typ []string) ([]string, error) - - // GetAlertableData queries for the data used to evaluate alerts configurations. - GetAlertableData(ctx context.Context, params AlertableParams) (*AlertableResponse, error) } // DataRepository is the combined interface of DataSetRepository and @@ -98,19 +93,3 @@ type SummaryRepository interface { GetStore() *storeStructuredMongo.Repository } - -type AlertableParams struct { - // UserID of the user that owns the data. - UserID string - // UploadID of the device data set to query. - UploadID string - // Start limits the data to those recorded after this time. - Start time.Time - // End limits the data to those recorded before this time. - End time.Time -} - -type AlertableResponse struct { - Glucose []*glucose.Glucose - DosingDecisions []*dosingdecision.DosingDecision -} diff --git a/data/store/test/data_repository.go b/data/store/test/data_repository.go index ff5a857bcd..e1dd86df33 100644 --- a/data/store/test/data_repository.go +++ b/data/store/test/data_repository.go @@ -8,6 +8,7 @@ import ( "github.com/onsi/gomega" + "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/data" dataStore "github.com/tidepool-org/platform/data/store" "github.com/tidepool-org/platform/data/types/upload" @@ -182,11 +183,11 @@ type DistinctUserIDsOutput struct { type GetAlertableDataInput struct { Context context.Context - Params dataStore.AlertableParams + Params alerts.GetAlertableDataParams } type GetAlertableDataOutput struct { - Response *dataStore.AlertableResponse + Response *alerts.GetAlertableDataResponse Error error } @@ -528,7 +529,7 @@ func (d *DataRepository) DistinctUserIDs(ctx context.Context, typ []string) ([]s return output.UserIDs, output.Error } -func (d *DataRepository) GetAlertableData(ctx context.Context, params dataStore.AlertableParams) (*dataStore.AlertableResponse, error) { +func (d *DataRepository) GetAlertableData(ctx context.Context, params alerts.GetAlertableDataParams) (*alerts.GetAlertableDataResponse, error) { d.GetAlertableDataInvocations++ d.GetAlertableDataInputs = append(d.GetAlertableDataInputs, GetAlertableDataInput{Context: ctx, Params: params}) diff --git a/log/gocommon_adapter.go b/log/gocommon_adapter.go deleted file mode 100644 index 14a35cbfaa..0000000000 --- a/log/gocommon_adapter.go +++ /dev/null @@ -1,54 +0,0 @@ -package log - -import ( - "context" - "fmt" - "log/slog" -) - -// GoCommonAdapter implements gocommon's asyncevents.Logger interface. -// -// It adapts a Logger for the purpose. -type GoCommonAdapter struct { - Logger Logger -} - -func (a *GoCommonAdapter) Log(ctx context.Context, level slog.Level, msg string, args ...any) { - logger := a.Logger - if fields := a.fieldsFromArgs(args); len(fields) > 0 { - logger = logger.WithFields(fields) - } - logger.Log(SlogLevelToLevel[level], msg) -} - -// fieldsFromArgs builds a Fields following the same rules as slog.Log. -// -// As Fields is a map instead of a slice, !BADKEY becomes !BADKEY[x] where -// x is the index counter of the value. See the godoc for slog.Log for -// details. -func (a *GoCommonAdapter) fieldsFromArgs(args []any) Fields { - fields := Fields{} - for i := 0; i < len(args); i++ { - switch v := args[i].(type) { - case slog.Attr: - fields[v.Key] = v.Value - case string: - if i+1 < len(args) { - fields[v] = args[i+1] - i++ - } else { - fields[fmt.Sprintf("!BADKEY[%d]", i)] = v - } - default: - fields[fmt.Sprintf("!BADKEY[%d]", i)] = v - } - } - return fields -} - -var SlogLevelToLevel = map[slog.Level]Level{ - slog.LevelDebug: DebugLevel, - slog.LevelInfo: InfoLevel, - slog.LevelWarn: WarnLevel, - slog.LevelError: ErrorLevel, -} diff --git a/log/sarama_test.go b/log/sarama_test.go new file mode 100644 index 0000000000..6fccd5e256 --- /dev/null +++ b/log/sarama_test.go @@ -0,0 +1,47 @@ +package log_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/tidepool-org/platform/log" + logtest "github.com/tidepool-org/platform/log/test" +) + +var _ = Describe("NewSarama", func() { + It("initializes a new sarama log adapter", func() { + testLog := logtest.NewLogger() + saramaLog := log.NewSarama(testLog) + Expect(saramaLog).ToNot(Equal(nil)) + }) + + It("implements Print", func() { + testLog := logtest.NewLogger() + saramaLog := log.NewSarama(testLog) + Expect(saramaLog).ToNot(Equal(nil)) + + saramaLog.Print("testing 1 2 3") + + testLog.AssertInfo("testing 1 2 3") + }) + + It("implements Printf", func() { + testLog := logtest.NewLogger() + saramaLog := log.NewSarama(testLog) + Expect(saramaLog).ToNot(Equal(nil)) + + saramaLog.Printf("testing %s", "4 5 6") + + testLog.AssertInfo("testing 4 5 6") + }) + + It("implements Println", func() { + testLog := logtest.NewLogger() + saramaLog := log.NewSarama(testLog) + Expect(saramaLog).ToNot(Equal(nil)) + + saramaLog.Println("testing 7 8 9") + + testLog.AssertInfo("testing 7 8 9") + }) +}) diff --git a/push/logpush_test.go b/push/logpush_test.go new file mode 100644 index 0000000000..f8f8611237 --- /dev/null +++ b/push/logpush_test.go @@ -0,0 +1,50 @@ +package push + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/log" + logtest "github.com/tidepool-org/platform/log/test" +) + +var _ = Describe("NewLogPusher", func() { + It("succeeds", func() { + testLog := logtest.NewLogger() + + Expect(NewLogPusher(testLog)).ToNot(Equal(nil)) + }) + + It("implements Push by logging a message", func() { + testLog := logtest.NewLogger() + ctx := context.Background() + testToken := &devicetokens.DeviceToken{} + testNotification := &Notification{} + + pusher := NewLogPusher(testLog) + Expect(pusher).ToNot(Equal(nil)) + + Expect(pusher.Push(ctx, testToken, testNotification)).To(Succeed()) + testFields := log.Fields{ + "deviceToken": testToken, + "notification": testNotification, + } + testLog.AssertInfo("logging push notification", testFields) + }) + + It("handles being passed a nil logger", func() { + ctx := context.Background() + testToken := &devicetokens.DeviceToken{} + testNotification := &Notification{} + + pusher := NewLogPusher(nil) + Expect(pusher).ToNot(Equal(nil)) + + Expect(func() { + Expect(pusher.Push(ctx, testToken, testNotification)).To(Succeed()) + }).ToNot(Panic()) + }) +}) diff --git a/push/push.go b/push/push.go index 92d5a28eaa..d865e4e3ea 100644 --- a/push/push.go +++ b/push/push.go @@ -11,7 +11,6 @@ import ( "github.com/sideshow/apns2/payload" "github.com/sideshow/apns2/token" - "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" @@ -27,12 +26,6 @@ func (n Notification) String() string { return n.Message } -func FromAlertsNotification(notification *alerts.Notification) *Notification { - return &Notification{ - Message: notification.Message, - } -} - // APNSPusher implements push notifications via Apple APNs. type APNSPusher struct { BundleID string diff --git a/push/push_test.go b/push/push_test.go index 5922f85e25..11496ffabc 100644 --- a/push/push_test.go +++ b/push/push_test.go @@ -49,6 +49,50 @@ func testDeps() (context.Context, *APNSPusher, *pushTestDeps) { } var _ = Describe("APNSPusher", func() { + Describe("NewAPNSPusherFromKeyData", func() { + It("errors if key data is empty or blank", func() { + _, err := NewAPNSPusherFromKeyData([]byte(""), "key", "team", "bundle") + Expect(err).To(MatchError(ContainSubstring("APNs signing key is blank"))) + + _, err = NewAPNSPusherFromKeyData(nil, "key", "team", "bundle") + Expect(err).To(MatchError(ContainSubstring("APNs signing key is blank"))) + }) + + It("errors if key data is invalid", func() { + _, err := NewAPNSPusherFromKeyData([]byte("foo"), "key", "team", "bundle") + Expect(err).To(MatchError(ContainSubstring("AuthKey must be a valid .p8 PEM file"))) + }) + + It("errors if bundleID is blank", func() { + _, err := NewAPNSPusherFromKeyData([]byte("hi"), "key", "team", "") + Expect(err).To(MatchError(ContainSubstring("bundleID is blank"))) + }) + + It("errors if teamID is blank", func() { + _, err := NewAPNSPusherFromKeyData([]byte("hi"), "key", "", "bundle") + Expect(err).To(MatchError(ContainSubstring("teamID is blank"))) + }) + + It("errors if keyID is blank", func() { + _, err := NewAPNSPusherFromKeyData([]byte("hi"), "", "team", "bundle") + Expect(err).To(MatchError(ContainSubstring("keyID is blank"))) + }) + + It("succeeds", func() { + // random private key for testing + data := []byte(`-----BEGIN PRIVATE KEY----- +MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDDNrXT9ZRWPUAAg38Qi +Z553y7sGqOgMxUCG36eCIcRCy1QiTJBgGDxIhWvkE8Sx4N6hZANiAATrsRyRXLa0 +Tgczq8tmFomMP212HdkPF3gFEl/CkqGHUodR2EdZBW1zVcmuLjIN4zvqVVXMJm/U +eHZz9xAZ95y3irAfkMuOD/Bw88UYvhKnipOHBeS8BwqyfFQ+NRB6xYU= +-----END PRIVATE KEY----- +`) + pusher, err := NewAPNSPusherFromKeyData(data, "key", "team", "bundle") + Expect(err).To(Succeed()) + Expect(pusher).ToNot(Equal(nil)) + }) + }) + Describe("Push", func() { It("requires an Apple token", func() { ctx, pusher, deps := testDeps() diff --git a/task/carepartner.go b/task/carepartner.go new file mode 100644 index 0000000000..31b5b6bc58 --- /dev/null +++ b/task/carepartner.go @@ -0,0 +1,18 @@ +package task + +import ( + "time" + + "github.com/tidepool-org/platform/pointer" +) + +const CarePartnerType = "org.tidepool.carepartner" + +func NewCarePartnerTaskCreate() *TaskCreate { + return &TaskCreate{ + Name: pointer.FromAny(CarePartnerType), + Type: CarePartnerType, + AvailableTime: &time.Time{}, + Data: map[string]interface{}{}, + } +} diff --git a/task/carepartner_test.go b/task/carepartner_test.go new file mode 100644 index 0000000000..b6f3f4478b --- /dev/null +++ b/task/carepartner_test.go @@ -0,0 +1,14 @@ +package task + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("NewCarePartnerTaskCreate", func() { + It("succeeds", func() { + Expect(func() { + Expect(NewCarePartnerTaskCreate()).ToNot(Equal(nil)) + }).ToNot(Panic()) + }) +}) diff --git a/task/service/service/service.go b/task/service/service/service.go index 40ed741686..f4a09b5b66 100644 --- a/task/service/service/service.go +++ b/task/service/service/service.go @@ -3,21 +3,27 @@ package service import ( "context" - "github.com/tidepool-org/platform/clinics" - "github.com/tidepool-org/platform/ehr/reconcile" - "github.com/tidepool-org/platform/ehr/sync" + "github.com/kelseyhightower/envconfig" + "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/application" "github.com/tidepool-org/platform/client" + "github.com/tidepool-org/platform/clinics" dataClient "github.com/tidepool-org/platform/data/client" + "github.com/tidepool-org/platform/data/events" dataSource "github.com/tidepool-org/platform/data/source" dataSourceClient "github.com/tidepool-org/platform/data/source/client" "github.com/tidepool-org/platform/dexcom" dexcomClient "github.com/tidepool-org/platform/dexcom/client" dexcomFetch "github.com/tidepool-org/platform/dexcom/fetch" dexcomProvider "github.com/tidepool-org/platform/dexcom/provider" + "github.com/tidepool-org/platform/ehr/reconcile" + "github.com/tidepool-org/platform/ehr/sync" "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/permission" + permissionClient "github.com/tidepool-org/platform/permission/client" "github.com/tidepool-org/platform/platform" + "github.com/tidepool-org/platform/push" serviceService "github.com/tidepool-org/platform/service/service" storeStructuredMongo "github.com/tidepool-org/platform/store/structured/mongo" "github.com/tidepool-org/platform/task" @@ -39,6 +45,9 @@ type Service struct { dexcomClient dexcom.Client taskQueue queue.Queue clinicsClient clinics.Client + alertsClient *alerts.Client + pusher events.Pusher + permissionClient permission.Client } func New() *Service { @@ -70,6 +79,15 @@ func (s *Service) Initialize(provider application.Provider) error { if err := s.initializeClinicsClient(); err != nil { return err } + if err := s.initializeAlertsClient(); err != nil { + return err + } + if err := s.initializePusher(); err != nil { + return err + } + if err := s.initializePermissionClient(); err != nil { + return err + } if err := s.initializeTaskQueue(); err != nil { return err } @@ -346,6 +364,16 @@ func (s *Service) initializeTaskQueue() error { } runners = append(runners, ehrSyncRnnr) + if s.alertsClient == nil { + s.Logger().Info("alerts client is nil; care partner tasks will not run successfully") + } + carePartnerRunner, err := alerts.NewCarePartnerRunner(s.Logger(), s.alertsClient, + s.AuthClient(), s.pusher, s.permissionClient, s.AuthClient()) + if err != nil { + return errors.Wrap(err, "unable to create care partner runner") + } + runners = append(runners, carePartnerRunner) + for _, r := range runners { r := r if err := taskQueue.RegisterRunner(r); err != nil { @@ -359,6 +387,75 @@ func (s *Service) initializeTaskQueue() error { return nil } +func (s *Service) initializeAlertsClient() error { + s.Logger().Debug("initializing alerts client") + + platformConfig := platform.NewConfig() + platformConfig.UserAgent = s.UserAgent() + reporter := s.ConfigReporter().WithScopes("data", "client") + loader := platform.NewConfigReporterLoader(reporter) + if err := platformConfig.Load(loader); err != nil { + return errors.Wrap(err, "Unable to load alerts client config") + } + + s.Logger().Debug("Creating alerts client") + + platformClient, err := platform.NewClient(platformConfig, platform.AuthorizeAsService) + if err != nil { + return errors.Wrap(err, "Unable to create platform client for use in alerts client") + } + s.alertsClient = alerts.NewClient(platformClient, s.Logger()) + + return nil +} + +func (s *Service) initializePusher() error { + var err error + + apns2Config := &struct { + SigningKey []byte `envconfig:"TIDEPOOL_TASK_SERVICE_PUSHER_APNS_SIGNING_KEY"` + KeyID string `envconfig:"TIDEPOOL_TASK_SERVICE_PUSHER_APNS_KEY_ID"` + BundleID string `envconfig:"TIDEPOOL_TASK_SERVICE_PUSHER_APNS_BUNDLE_ID"` + TeamID string `envconfig:"TIDEPOOL_TASK_SERVICE_PUSHER_APNS_TEAM_ID"` + }{} + if err := envconfig.Process("", apns2Config); err != nil { + return errors.Wrap(err, "Unable to process APNs pusher config") + } + + var pusher events.Pusher + pusher, err = push.NewAPNSPusherFromKeyData(apns2Config.SigningKey, apns2Config.KeyID, + apns2Config.TeamID, apns2Config.BundleID) + if err != nil { + s.Logger().WithError(err).Warn("falling back to logging of push notifications") + pusher = push.NewLogPusher(s.Logger()) + } + s.pusher = pusher + + return nil +} + +func (s *Service) initializePermissionClient() error { + s.Logger().Debug("Loading permission client config") + + cfg := platform.NewConfig() + cfg.UserAgent = s.UserAgent() + reporter := s.ConfigReporter().WithScopes("permission", "client") + loader := platform.NewConfigReporterLoader(reporter) + if err := cfg.Load(loader); err != nil { + return errors.Wrap(err, "unable to load permission client config") + } + + s.Logger().Debug("Creating permission client") + + clnt, err := permissionClient.New(cfg, platform.AuthorizeAsService) + if err != nil { + return errors.Wrap(err, "unable to create permission client") + } + s.permissionClient = clnt + + return nil +} + func (s *Service) terminateTaskQueue() { if s.taskQueue != nil { s.Logger().Debug("Stopping task queue") diff --git a/task/service/service/service_test.go b/task/service/service/service_test.go index 8fe529cca5..71314e3c63 100644 --- a/task/service/service/service_test.go +++ b/task/service/service/service_test.go @@ -35,12 +35,12 @@ var _ = Describe("Service", func() { var dataClientConfig map[string]interface{} var dataSourceClientConfig map[string]interface{} var taskStoreConfig map[string]interface{} + var permissionClientConfig map[string]interface{} var taskServiceConfig map[string]interface{} var service *taskServiceService.Service BeforeEach(func() { provider = applicationTest.NewProviderWithDefaults() - serverSecret = authTest.NewServiceSecret() sessionToken = authTest.NewSessionToken() server = NewServer() @@ -69,6 +69,9 @@ var _ = Describe("Service", func() { "address": server.URL(), "server_token_secret": authTest.NewServiceSecret(), } + permissionClientConfig = map[string]interface{}{ + "address": server.URL(), + } taskStoreConfig = map[string]interface{}{ "addresses": os.Getenv("TIDEPOOL_STORE_ADDRESSES"), "database": test.RandomStringFromRangeAndCharset(4, 8, test.CharsetLowercase), @@ -88,6 +91,9 @@ var _ = Describe("Service", func() { "task": map[string]interface{}{ "store": taskStoreConfig, }, + "permission": map[string]interface{}{ + "client": permissionClientConfig, + }, "secret": authTest.NewServiceSecret(), "server": map[string]interface{}{ "address": testHttp.NewAddress(), diff --git a/task/store/mongo/mongo.go b/task/store/mongo/mongo.go index c36cbf1105..9d5041a4d0 100644 --- a/task/store/mongo/mongo.go +++ b/task/store/mongo/mongo.go @@ -81,6 +81,7 @@ func (s *Store) EnsureDefaultTasks() error { repository.EnsureSummaryBackfillTask, repository.EnsureSummaryMigrationTask, repository.EnsureEHRReconcileTask, + repository.EnsureCarePartnerTask, } for _, f := range fs { @@ -176,6 +177,11 @@ func (t *TaskRepository) EnsureEHRReconcileTask(ctx context.Context) error { return t.ensureTask(ctx, create) } +func (t *TaskRepository) EnsureCarePartnerTask(ctx context.Context) error { + create := task.NewCarePartnerTaskCreate() + return t.ensureTask(ctx, create) +} + func (t *TaskRepository) ensureTask(ctx context.Context, create *task.TaskCreate) error { tsk, err := task.NewTask(ctx, create) if err != nil { diff --git a/task/store/mongo/mongo_test.go b/task/store/mongo/mongo_test.go index 756cc33ed0..9c16d09aab 100644 --- a/task/store/mongo/mongo_test.go +++ b/task/store/mongo/mongo_test.go @@ -8,7 +8,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" . "github.com/onsi/gomega/gstruct" - "github.com/prometheus/client_golang/prometheus/testutil" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" From 3f2bf06c347a4e38e961ae0971cb9944a0eaab67 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Thu, 12 Dec 2024 10:27:47 -0700 Subject: [PATCH 27/54] evaluate not looping conditions part 1 I've identified that there are some big changes that will need to happen in order to manage marking things sent and resolved. Those will come in a future commit. BACK-2559 --- alerts/config.go | 62 +++++++++++-- alerts/config_test.go | 199 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 256 insertions(+), 5 deletions(-) diff --git a/alerts/config.go b/alerts/config.go index 56fcd80ca6..46457717bb 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -335,18 +335,70 @@ func (a NotLoopingAlert) Validate(validator structure.Validator) { } // Evaluate if the device is looping. -func (a NotLoopingAlert) Evaluate(ctx context.Context, - decisions []*dosingdecision.DosingDecision) ( - notifcation *NotificationWithHook, _ bool) { +func (a *NotLoopingAlert) Evaluate(ctx context.Context, + decisions []*dosingdecision.DosingDecision) (_ *NotificationWithHook, changed bool) { - // TODO will be implemented in the near future. - return nil, false + defer func() { logNotLoopingEvaluation(ctx, changed, a.IsActive()) }() + + lastLooped := time.Time{} + for _, decision := range decisions { + if decision.Reason == nil || *decision.Reason != DosingDecisionReasonLoop { + continue + } + if decision.Time == nil { + continue + } + if decision.Time.After(lastLooped) { + lastLooped = *decision.Time + } + } + + delay := DefaultNotLoopingDelay + if a.Delay.Duration() != 0 { + delay = a.Delay.Duration() + } + if time.Since(lastLooped) < delay { + if a.IsActive() { + a.Resolved = time.Now() + return nil, true + } + return nil, false + } + + if a.IsActive() { + if time.Since(a.Sent) > NotLoopingRepeat { + notification := a.withHook(&Notification{Message: NotLoopingMessage}) + return notification, false + } + return nil, false + } else { + a.Triggered = time.Now() + notification := a.withHook(&Notification{Message: NotLoopingMessage}) + return notification, true + } +} + +// DefaultNotLoopingDelay is used when the delay has a Zero value (its default). +const DefaultNotLoopingDelay = 30 * time.Minute + +func logNotLoopingEvaluation(ctx context.Context, changed, isAlerting bool) { + fields := log.Fields{ + "changed": changed, + "isAlerting?": isAlerting, + } + lgr := log.LoggerFromContext(ctx) + lgr.WithFields(fields).Info("not looping") } +const NotLoopingMessage = "Loop is not able to loop" + // DosingDecisionReasonLoop is specified in a [dosingdecision.DosingDecision] to indicate // that the decision is part of a loop adjustment (as opposed to bolus or something else). const DosingDecisionReasonLoop string = "loop" +// NotLoopingRepeat is the interval between sending notifications when not looping. +const NotLoopingRepeat = 5 * time.Minute + // NoCommunicationAlert is configured to send notifications when no data is received. // // It differs fundamentally from DataAlerts in that it is polled instead of being triggered diff --git a/alerts/config_test.go b/alerts/config_test.go index fcb2e972d3..c63ea9238f 100644 --- a/alerts/config_test.go +++ b/alerts/config_test.go @@ -15,6 +15,7 @@ import ( "github.com/tidepool-org/platform/data/types" "github.com/tidepool-org/platform/data/types/blood" "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/data/types/dosingdecision" "github.com/tidepool-org/platform/log" logTest "github.com/tidepool-org/platform/log/test" "github.com/tidepool-org/platform/pointer" @@ -311,6 +312,14 @@ var _ = Describe("Config", func() { }, } } + var testDosingDecision = func(d time.Duration) *dosingdecision.DosingDecision { + return &dosingdecision.DosingDecision{ + Base: types.Base{ + Time: pointer.FromAny(time.Now().Add(d)), + }, + Reason: pointer.FromAny(DosingDecisionReasonLoop), + } + } Context("UrgentLowAlert", func() { Context("Threshold", func() { @@ -1016,6 +1025,14 @@ var _ = Describe("Config", func() { }) Context("NotLoopingAlert", func() { + + var decisionsOld = []*dosingdecision.DosingDecision{ + testDosingDecision(-30 * time.Hour), + } + var decisionsRecent = []*dosingdecision.DosingDecision{ + testDosingDecision(-15 * time.Second), + } + Context("Delay", func() { It("accepts values between 0 and 2 hours (inclusive)", func() { val := validator.New(logTest.NewLogger()) @@ -1038,7 +1055,189 @@ var _ = Describe("Config", func() { b.Validate(val) Expect(val.Error()).To(MatchError("value 2h0m1s is not between 0s and 2h0m0s")) }) + }) + + Context("Evaluate", func() { + testNotLooping := func() *NotLoopingAlert { + return &NotLoopingAlert{ + Base: Base{}, + Delay: 0, + } + } + + It("uses a default delay of 30 minutes", func() { + ctx := contextWithTestLogger() + decisionsNoAlert := []*dosingdecision.DosingDecision{ + testDosingDecision(-29 * time.Minute), + } + decisionsWithAlert := []*dosingdecision.DosingDecision{ + testDosingDecision(-30 * time.Minute), + } + + alert := testNotLooping() + + notification, _ := alert.Evaluate(ctx, decisionsNoAlert) + Expect(notification).To(BeNil()) + notification, _ = alert.Evaluate(ctx, decisionsWithAlert) + Expect(notification).ToNot(BeNil()) + Expect(notification.Message).To(ContainSubstring("not able to loop")) + }) + + It("respects custom delays", func() { + ctx := contextWithTestLogger() + decisionsNoAlert := []*dosingdecision.DosingDecision{ + testDosingDecision(-14 * time.Minute), + } + decisionsWithAlert := []*dosingdecision.DosingDecision{ + testDosingDecision(-15 * time.Minute), + } + + alert := testNotLooping() + alert.Delay = DurationMinutes(15 * time.Minute) + + notification, _ := alert.Evaluate(ctx, decisionsNoAlert) + Expect(notification).To(BeNil()) + notification, _ = alert.Evaluate(ctx, decisionsWithAlert) + Expect(notification).ToNot(BeNil()) + Expect(notification.Message).To(ContainSubstring("not able to loop")) + }) + + It("handles being passed empty data", func() { + ctx := contextWithTestLogger() + var notification *NotificationWithHook + + alert := testNotLooping() + + Expect(func() { + notification, _ = alert.Evaluate(ctx, []*dosingdecision.DosingDecision{}) + }).ToNot(Panic()) + Expect(notification.Message).To(ContainSubstring("Loop is not able to loop")) + Expect(func() { + notification, _ = alert.Evaluate(ctx, nil) + }).ToNot(Panic()) + Expect(notification.Message).To(ContainSubstring("Loop is not able to loop")) + }) + + It("logs evaluation results", func() { + ctx := contextWithTestLogger() + decisions := []*dosingdecision.DosingDecision{ + testDosingDecision(-30 * time.Second), + } + + alert := testNotLooping() + + Expect(func() { + alert.Evaluate(ctx, decisions) + }).ToNot(Panic()) + Expect(func() { + lgr := log.LoggerFromContext(ctx).(*logTest.Logger) + lgr.AssertInfo("not looping", log.Fields{ + "changed": false, + "isAlerting?": false, + }) + }).ToNot(Panic()) + }) + + Context("when currently active", func() { + It("marks itself resolved", func() { + ctx := contextWithTestLogger() + + alert := testNotLooping() + Expect(func() { + alert.Evaluate(ctx, decisionsOld) + }).ToNot(Panic()) + Expect(alert.Resolved).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, decisionsRecent) + }).ToNot(Panic()) + Expect(alert.Resolved).ToNot(BeZero()) + }) + }) + + Context("when currently INactive", func() { + It("doesn't re-mark itself resolved", func() { + ctx := contextWithTestLogger() + + alert := testNotLooping() + + Expect(func() { + alert.Evaluate(ctx, decisionsOld) + }).ToNot(Panic()) + Expect(alert.Resolved).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, decisionsRecent) + }).ToNot(Panic()) + Expect(alert.Resolved).ToNot(BeZero()) + was := alert.Resolved + Expect(func() { + alert.Evaluate(ctx, decisionsRecent) + }).ToNot(Panic()) + Expect(alert.Resolved).To(Equal(was)) + }) + }) + + It("marks itself triggered", func() { + ctx := contextWithTestLogger() + + alert := testNotLooping() + + Expect(func() { + alert.Evaluate(ctx, decisionsRecent) + }).ToNot(Panic()) + Expect(alert.Triggered).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, decisionsOld) + }).ToNot(Panic()) + Expect(alert.Triggered).ToNot(BeZero()) + }) + + It("observes NotLoopingRepeat between notifications", func() { + ctx := contextWithTestLogger() + noRepeat := time.Now().Add(-4 * time.Minute) + triggersRepeat := noRepeat.Add(-NotLoopingRepeat) + + alert := testNotLooping() + alert.Sent = noRepeat + alert.Triggered = noRepeat + + notification, _ := alert.Evaluate(ctx, decisionsOld) + Expect(notification).To(BeNil()) + + alert.Sent = triggersRepeat + notification, _ = alert.Evaluate(ctx, decisionsOld) + Expect(notification).ToNot(BeNil()) + }) + + It("ignores decisions without a reason", func() { + ctx := contextWithTestLogger() + + alert := testNotLooping() + noReason := testDosingDecision(time.Second) + noReason.Reason = nil + decisions := []*dosingdecision.DosingDecision{ + testDosingDecision(-time.Hour), + noReason, + } + + notification, _ := alert.Evaluate(ctx, decisions) + Expect(notification).ToNot(BeNil()) + }) + + It("ignores decisions without a time", func() { + ctx := contextWithTestLogger() + + alert := testNotLooping() + noTime := testDosingDecision(time.Second) + noTime.Time = nil + decisions := []*dosingdecision.DosingDecision{ + testDosingDecision(-time.Hour), + noTime, + } + + notification, _ := alert.Evaluate(ctx, decisions) + Expect(notification).ToNot(BeNil()) + }) }) }) From e105d0b7d3084f2d411d01f02c5f4731253cda07 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Tue, 21 Jan 2025 12:30:09 -0700 Subject: [PATCH 28/54] re-working to handle alert resolution and sent tracking The above turned out to be a lot more complicated than I had imagined they'd be. BACK-2559 --- Makefile | 2 +- alerts/config.go | 626 ++++++------ alerts/config_test.go | 1600 +++++++++++++----------------- alerts/evaluator.go | 105 +- alerts/evaluator_test.go | 198 ++-- alerts/tasks.go | 58 +- alerts/tasks_test.go | 155 ++- data/blood/glucose/glucose.go | 4 + data/events/alerts.go | 67 +- data/events/alerts_test.go | 53 +- data/service/service/standard.go | 20 +- data/store/mongo/mongo_alerts.go | 2 +- data/store/mongo/mongo_test.go | 52 +- devicetokens/devicetokens.go | 9 + log/devlog/devlog.go | 16 +- task/service/service/service.go | 1 + 16 files changed, 1411 insertions(+), 1557 deletions(-) diff --git a/Makefile b/Makefile index 8712a40cc7..9bb1eb3c66 100644 --- a/Makefile +++ b/Makefile @@ -236,7 +236,7 @@ ci-test-watch: ginkgo go-test: . ./env.test.sh && $(TIMING_CMD) go test $(GOTEST_FLAGS) $(GOTEST_PKGS) -go-ci-test: GOTEST_FLAGS += -count=1 -race -shuffle=on -cover +go-ci-test: override GOTEST_FLAGS += -count=1 -race -shuffle=on -cover go-ci-test: GOTEST_PKGS = ./... go-ci-test: go-test diff --git a/alerts/config.go b/alerts/config.go index 46457717bb..91b46fb6ff 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -6,6 +6,7 @@ import ( "bytes" "context" "encoding/json" + "os" "slices" "time" @@ -15,6 +16,7 @@ import ( "github.com/tidepool-org/platform/data/types/dosingdecision" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" + logjson "github.com/tidepool-org/platform/log/json" "github.com/tidepool-org/platform/structure" "github.com/tidepool-org/platform/structure/validator" "github.com/tidepool-org/platform/user" @@ -36,30 +38,46 @@ type Config struct { UploadID string `json:"uploadId" bson:"uploadId,omitempty"` // Alerts collects the user settings for each type of alert, and tracks their statuses. - Alerts `bson:",inline,omitempty"` + Alerts `bson:"alerts,omitempty"` + + Activity `bson:"activity,omitempty" json:"activity,omitempty"` } // Alerts is a wrapper to collect the user-modifiable parts of a Config. type Alerts struct { - DataAlerts `bson:",inline,omitempty"` - *NoCommunicationAlert `bson:"noCommunication,omitempty" json:"noCommunication,omitempty"` + UrgentLow *UrgentLowAlert `json:"urgentLow,omitempty" bson:"urgentLow,omitempty"` + Low *LowAlert `json:"low,omitempty" bson:"low,omitempty"` + High *HighAlert `json:"high,omitempty" bson:"high,omitempty"` + NotLooping *NotLoopingAlert `json:"notLooping,omitempty" bson:"notLooping,omitempty"` + NoCommunication *NoCommunicationAlert `bson:"noCommunication,omitempty" json:"noCommunication,omitempty"` } -// DataAlerts models alerts triggered by incoming data. -type DataAlerts struct { - UrgentLow *UrgentLowAlert `json:"urgentLow,omitempty" bson:"urgentLow,omitempty"` - Low *LowAlert `json:"low,omitempty" bson:"low,omitempty"` - High *HighAlert `json:"high,omitempty" bson:"high,omitempty"` - NotLooping *NotLoopingAlert `json:"notLooping,omitempty" bson:"notLooping,omitempty"` +type Activity struct { + UrgentLow AlertActivity `json:"urgentLow,omitempty" bson:"urgentLow,omitempty"` + Low AlertActivity `json:"low,omitempty" bson:"low,omitempty"` + High AlertActivity `json:"high,omitempty" bson:"high,omitempty"` + NotLooping AlertActivity `json:"notLooping,omitempty" bson:"notLooping,omitempty"` + NoCommunication AlertActivity `json:"noCommunication,omitempty" bson:"noCommunication,omitempty"` } func (c Config) Validate(validator structure.Validator) { validator.String("userID", &c.UserID).Using(user.IDValidator) validator.String("followedUserID", &c.FollowedUserID).Using(user.IDValidator) validator.String("uploadID", &c.UploadID).Exists().Using(data.SetIDValidator) - c.DataAlerts.Validate(validator) - if c.NoCommunicationAlert != nil { - c.NoCommunicationAlert.Validate(validator) + if c.Alerts.UrgentLow != nil { + c.Alerts.UrgentLow.Validate(validator) + } + if c.Alerts.Low != nil { + c.Alerts.Low.Validate(validator) + } + if c.Alerts.High != nil { + c.Alerts.High.Validate(validator) + } + if c.Alerts.NotLooping != nil { + c.Alerts.NotLooping.Validate(validator) + } + if c.Alerts.NoCommunication != nil { + c.Alerts.NoCommunication.Validate(validator) } } @@ -68,55 +86,136 @@ func (c Config) Validate(validator structure.Validator) { // While this method, or the methods it calls, can fail, there's no point in returning an // error. Instead errors are logged before continuing. This is to ensure that any possible // alert that should be triggered, will be triggered. -func (c *Config) EvaluateData(ctx context.Context, gd []*glucose.Glucose, - dd []*dosingdecision.DosingDecision) (*NotificationWithHook, bool) { +func (c *Config) EvaluateData(ctx context.Context, gd []*Glucose, + dd []*DosingDecision) (*Notification, bool) { - notification, changed := c.DataAlerts.Evaluate(ctx, gd, dd) - if notification != nil { - notification.FollowedUserID = c.FollowedUserID - notification.RecipientUserID = c.UserID + var n *Notification + var needsUpsert bool + + ul, low, high, nl := EvalResult{}, EvalResult{}, EvalResult{}, EvalResult{} + if c.Alerts.UrgentLow != nil && c.Alerts.UrgentLow.Enabled { + ul = c.Alerts.UrgentLow.Evaluate(ctx, gd) + needsUpsert = needsUpsert || c.Activity.UrgentLow.Update(ul.OutOfRange) + } + if c.Alerts.Low != nil && c.Alerts.Low.Enabled { + low = c.Alerts.Low.Evaluate(ctx, gd) + needsUpsert = needsUpsert || c.Activity.Low.Update(low.OutOfRange) + } + if c.Alerts.High != nil && c.Alerts.High.Enabled { + high = c.Alerts.High.Evaluate(ctx, gd) + needsUpsert = needsUpsert || c.Activity.High.Update(high.OutOfRange) + } + if c.Alerts.NotLooping != nil && c.Alerts.NotLooping.Enabled { + nl = c.Alerts.NotLooping.Evaluate(ctx, dd) + needsUpsert = needsUpsert || c.Activity.NotLooping.Update(nl.OutOfRange) } - return notification, changed + if ul.OutOfRange { + if isReEval(c.Activity.UrgentLow.Sent, ul.NewestTime) { + return nil, needsUpsert + } + msg := genGlucoseThresholdMessage("below urgent low") + return c.newNotification(msg, &c.Activity.UrgentLow), needsUpsert + } + if low.OutOfRange { + if isReEval(c.Activity.Low.Sent, low.NewestTime) { + return nil, needsUpsert + } + delay := c.Alerts.Low.Delay.Duration() + + if time.Since(low.Started) > delay { + repeat := c.Alerts.Low.Repeat + if !c.Activity.Low.IsSent() || mayRepeat(repeat, c.Activity.Low.Sent) { + msg := genGlucoseThresholdMessage("below low") + return c.newNotification(msg, &c.Activity.Low), needsUpsert + + } + } + return nil, needsUpsert + } + if high.OutOfRange { + if isReEval(c.Activity.High.Sent, high.NewestTime) { + return nil, needsUpsert + } + delay := c.Alerts.High.Delay.Duration() + if time.Since(high.Started) > delay { + repeat := c.Alerts.High.Repeat + if !c.Activity.High.IsSent() || mayRepeat(repeat, c.Activity.High.Sent) { + msg := genGlucoseThresholdMessage("above high") + return c.newNotification(msg, &c.Activity.High), needsUpsert + } + } + } + if nl.OutOfRange { + // Because not looping doesn't use a threshold, re-evaluations aren't treated any + // differently. + delay := c.Alerts.NotLooping.Delay.Duration() + if delay == 0 { + delay = NotLoopingRepeat + } + if time.Since(c.Activity.NotLooping.Sent) > delay { + return c.newNotification(NotLoopingMessage, &c.Activity.NotLooping), needsUpsert + } + } + + return n, needsUpsert } -// SentFunc allows [Activity] to be updated in response to a notification being sent. -type SentFunc func(time.Time) +func mayRepeat(repeat DurationMinutes, lastSent time.Time) bool { + return repeat.Duration() > 0 && time.Since(lastSent) > repeat.Duration() +} -// NotificationWithHook wraps a Notification with a SentFunc. -// -// This separates the responsibilities of the individual alerts (e.g. [LowAlert]), which -// create notifications and track when those notifications were sent, from those types which -// trigger the alerts, (e.g. task service's CarePartnerRunner, or data/events' Kafka -// connector). -type NotificationWithHook struct { - Sent SentFunc - *Notification +func (c *Config) newNotification(msg string, act *AlertActivity) *Notification { + return &Notification{ + FollowedUserID: c.FollowedUserID, + RecipientUserID: c.UserID, + Message: msg, + Sent: func(t time.Time) { + if t.After(act.Sent) { + act.Sent = t + } + }, + } +} + +func (c Config) LoggerWithFields(lgr log.Logger) log.Logger { + return lgr.WithFields(log.Fields{ + "userID": c.UserID, + "followedUserID": c.FollowedUserID, + "dataSetID": c.UploadID, + }) } +func isReEval(t1, t2 time.Time) bool { + return t1.After(t2) +} + +// TODO pass in a logger func (c *Config) EvaluateNoCommunication(ctx context.Context, last time.Time) ( - *NotificationWithHook, bool) { + *Notification, bool) { - if c.NoCommunicationAlert == nil { + if c.Alerts.NoCommunication == nil || !c.Alerts.NoCommunication.Enabled { return nil, false } - lgr := log.LoggerFromContext(ctx).WithFields(log.Fields{ - "UserID": c.UserID, - "DataSetID": c.UploadID, - "FollowedUserID": c.FollowedUserID, - }) + lgr := c.LoggerWithFields(log.LoggerFromContext(ctx)) ctx = log.NewContextWithLogger(ctx, lgr) - notification, changed := c.NoCommunicationAlert.Evaluate(ctx, last) - if notification != nil { - notification.FollowedUserID = c.FollowedUserID - notification.RecipientUserID = c.UserID + nc := c.Alerts.NoCommunication.Evaluate(ctx, last) + needsUpsert := c.Activity.NoCommunication.Update(nc.OutOfRange) + // TODO check re-eval? I don't think so + delay := c.Alerts.NoCommunication.Delay.Duration() + if delay == 0 { + delay = DefaultNoCommunicationDelay } - return notification, changed + if time.Since(nc.Started) > delay && time.Since(c.Activity.NoCommunication.Sent) > delay { + n := c.newNotification(NoCommunicationMessage, &c.Activity.NoCommunication) + return n, needsUpsert + } + return nil, needsUpsert } // LongestDelay of the delays set on enabled alerts. -func (a DataAlerts) LongestDelay() time.Duration { +func (a Alerts) LongestDelay() time.Duration { delays := []time.Duration{} if a.Low != nil && a.Low.Enabled { delays = append(delays, a.Low.Delay.Duration()) @@ -133,97 +232,33 @@ func (a DataAlerts) LongestDelay() time.Duration { return slices.Max(delays) } -func (a DataAlerts) Validate(validator structure.Validator) { - if a.UrgentLow != nil { - a.UrgentLow.Validate(validator) - } - if a.Low != nil { - a.Low.Validate(validator) - } - if a.High != nil { - a.High.Validate(validator) - } - if a.NotLooping != nil { - a.NotLooping.Validate(validator) - } -} - -// Evaluate to determine if notifications are indicated. -// -// Evaluations are performed according to priority. The process is "short-circuited" at the -// first indicated notification. -func (a DataAlerts) Evaluate(ctx context.Context, - gd []*glucose.Glucose, dd []*dosingdecision.DosingDecision) (*NotificationWithHook, bool) { - - changed := false - if a.UrgentLow != nil && a.UrgentLow.Enabled { - if n, c := a.UrgentLow.Evaluate(ctx, gd); n != nil { - return n, c - } else { - changed = changed || c - } - } - if a.Low != nil && a.Low.Enabled { - if n, c := a.Low.Evaluate(ctx, gd); n != nil { - return n, changed || c - } else { - changed = changed || c - } - } - if a.High != nil && a.High.Enabled { - if n, c := a.High.Evaluate(ctx, gd); n != nil { - return n, changed || c - } else { - changed = changed || c - } - } - if a.NotLooping != nil && a.NotLooping.Enabled { - if n, c := a.NotLooping.Evaluate(ctx, dd); n != nil { - return n, changed || c - } else { - changed = changed || c - } - } - return nil, changed -} - // Base describes the minimum specifics of a desired alert. type Base struct { // Enabled controls whether notifications should be sent for this alert. Enabled bool `json:"enabled" bson:"enabled"` - - // Activity tracks when events related to the alert occurred. - Activity `json:"activity" bson:"activity,omitempty"` } func (b Base) Validate(validator structure.Validator) { validator.Bool("enabled", &b.Enabled) } -func (b Base) Evaluate(ctx context.Context, data []*glucose.Glucose) *Notification { +func (b Base) Evaluate(ctx context.Context, data []*Glucose) *Notification { if lgr := log.LoggerFromContext(ctx); lgr != nil { lgr.Warn("alerts.Base.Evaluate called, this shouldn't happen!") } return nil } -// withHook wraps a *Notification with a SentFunc that updates its Sent. -func (b *Base) withHook(n *Notification) *NotificationWithHook { - if n == nil { - return nil - } - return &NotificationWithHook{ - Notification: n, - Sent: func(at time.Time) { - if at.Before(b.Activity.Sent) { - return - } - b.Activity.Sent = at - }, +func (b Base) lgr(ctx context.Context) log.Logger { + var lgr log.Logger = log.LoggerFromContext(ctx) + if lgr == nil { + // NewLogger can only fail if os.Stderr is nil. + lgr, _ = logjson.NewLogger(os.Stderr, log.DefaultLevelRanks(), log.DefaultLevel()) } + return lgr } -type Activity struct { +type AlertActivity struct { // Triggered records the last time this alert was triggered. Triggered time.Time `json:"triggered" bson:"triggered"` // Sent records the last time this alert was sent. @@ -232,14 +267,26 @@ type Activity struct { Resolved time.Time `json:"resolved" bson:"resolved"` } -func (a Activity) IsActive() bool { +func (a AlertActivity) IsActive() bool { return a.Triggered.After(a.Resolved) } -func (a Activity) IsSent() bool { +func (a AlertActivity) IsSent() bool { return a.Sent.After(a.Triggered) } +func (a *AlertActivity) Update(outOfRange bool) bool { + changed := false + if outOfRange && !a.IsActive() { + a.Triggered = time.Now() + changed = true + } else if !outOfRange && a.IsActive() { + a.Resolved = time.Now() + changed = true + } + return changed +} + const ( // RepeatMin is the minimum duration for a repeat setting (if not 0). RepeatMin = 15 * time.Minute @@ -274,52 +321,110 @@ func (a UrgentLowAlert) Validate(validator structure.Validator) { a.Threshold.Validate(validator) } -// Evaluate urgent low condition. -// -// Assumes data is pre-sorted in descending order by Time. -func (a *UrgentLowAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) ( - notification *NotificationWithHook, _ bool) { +type EvalResult struct { + Name string + Started time.Time + Threshold float64 + NewestTime time.Time + NewestValue float64 + Evaluator func(dv, tv float64) bool `json:"-"` + OutOfRange bool +} - lgr := log.LoggerFromContext(ctx) - if len(data) == 0 { - lgr.Debug("no data to evaluate for urgent low") - return nil, false - } - datum := data[0] - okDatum, okThreshold, err := validateGlucoseAlertDatum(datum, a.Threshold) +func (r EvalResult) String() string { + b, err := json.Marshal(r) if err != nil { - lgr.WithError(err).Warn("Unable to evaluate urgent low") - return nil, false + return "" } - defer func() { - logGlucoseAlertEvaluation(lgr, "urgent low", notification, okDatum, okThreshold) - }() - active := okDatum < okThreshold - changed := false - if !active { - if a.IsActive() { - a.Resolved = time.Now() - changed = true + return string(b) +} + +func (r *EvalResult) Process(ctx context.Context, t Threshold, data []*Glucose) { + for _, datum := range data { + dv, tv, err := normalizeUnits(datum, t) + if err != nil { + r.lgr(ctx).WithError(err).Info("Unable to normalize datum") + continue + } + + if datum.Time == nil { + r.lgr(ctx).Warn("Unable to process: Time == nil; that shouldn't be possible") + continue + } + + outOfRange := r.Evaluator(dv, tv) + + if r.NewestValue == 0 { + r.NewestValue = dv + r.NewestTime = *datum.Time + r.OutOfRange = outOfRange + r.Threshold = tv + r.logGlucoseEval(ctx) + } + + if !outOfRange { + break + } + + if datum.Time != nil && (r.Started.IsZero() || datum.Time.Before(r.Started)) { + r.Started = *datum.Time } - return nil, changed } - if !a.IsActive() { - a.Triggered = time.Now() - changed = true +} + +// Evaluate urgent low condition. +// +// Assumes data is pre-sorted in descending order by Time. +func (a *UrgentLowAlert) Evaluate(ctx context.Context, data []*Glucose) EvalResult { + er := EvalResult{ + Name: "urgent low", + Evaluator: func(dv, tv float64) bool { return dv < tv }, } - n := &Notification{Message: genGlucoseThresholdMessage("below urgent low")} - return a.withHook(n), changed + er.Process(ctx, a.Threshold, data) + return er } -func validateGlucoseAlertDatum(datum *glucose.Glucose, t Threshold) (float64, float64, error) { - if datum.Blood.Units == nil || datum.Blood.Value == nil || datum.Blood.Time == nil { - return 0, 0, errors.Newf("Unable to evaluate datum: Units, Value, or Time is nil") +func (r EvalResult) logGlucoseEval(ctx context.Context) { + fields := log.Fields{ + "isAlerting?": r.Evaluator(r.NewestValue, r.Threshold), + "threshold": r.Threshold, + "value": r.NewestValue, } - threshold := nontypesglucose.NormalizeValueForUnits(&t.Value, datum.Blood.Units) - if threshold == nil { - return 0, 0, errors.Newf("Unable to normalize threshold units: normalized to nil") + r.lgr(ctx).WithFields(fields).Info(r.Name) +} + +func (r EvalResult) lgr(ctx context.Context) log.Logger { + var lgr log.Logger = log.LoggerFromContext(ctx) + if lgr == nil { + // NewLogger can only fail if os.Stderr is nil. + lgr, _ = logjson.NewLogger(os.Stderr, log.DefaultLevelRanks(), log.DefaultLevel()) } - return *datum.Blood.Value, *threshold, nil + return lgr +} + +func normalizeUnits(datum *Glucose, t Threshold) (float64, float64, error) { + if datum == nil || datum.Blood.Units == nil || datum.Blood.Value == nil { + return 0, 0, errors.Newf("Unable to evaluate datum: Units or Value is nil") + } + + // Both units are the same, no need to convert either. + if t.Units == *datum.Blood.Units { + return *datum.Blood.Value, t.Value, nil + } + + // The units don't match. There exists a known good function that converts to MmolL, so + // we'll convert whichever value isn't in MmolL to MmolL. + + if nontypesglucose.IsMmolL(t.Units) { + n := nontypesglucose.NormalizeValueForUnits(datum.Blood.Value, datum.Blood.Units) + return *n, t.Value, nil + } else if nontypesglucose.IsMmolL(*datum.Blood.Units) { + n := nontypesglucose.NormalizeValueForUnits(&t.Value, &t.Units) + return *datum.Blood.Value, *n, nil + } + + // This shouldn't happen. It indicates a new, third glucose unit is in use. + return 0, 0, errors.New("Unable to handle unit conversion, neither is MmolL") } // NotLoopingAlert extends Base with a delay. @@ -335,64 +440,46 @@ func (a NotLoopingAlert) Validate(validator structure.Validator) { } // Evaluate if the device is looping. -func (a *NotLoopingAlert) Evaluate(ctx context.Context, - decisions []*dosingdecision.DosingDecision) (_ *NotificationWithHook, changed bool) { - - defer func() { logNotLoopingEvaluation(ctx, changed, a.IsActive()) }() - - lastLooped := time.Time{} +func (a *NotLoopingAlert) Evaluate(ctx context.Context, decisions []*DosingDecision) EvalResult { + er := EvalResult{} for _, decision := range decisions { if decision.Reason == nil || *decision.Reason != DosingDecisionReasonLoop { continue } if decision.Time == nil { + a.lgr(ctx).Warn("Unable to process: Time == nil; that shouldn't be possible") continue } - if decision.Time.After(lastLooped) { - lastLooped = *decision.Time + if !decision.Time.IsZero() { + er.NewestTime = *decision.Time + break } } - - delay := DefaultNotLoopingDelay - if a.Delay.Duration() != 0 { - delay = a.Delay.Duration() - } - if time.Since(lastLooped) < delay { - if a.IsActive() { - a.Resolved = time.Now() - return nil, true - } - return nil, false + delay := a.Delay.Duration() + if delay == 0 { + delay = DefaultNotLoopingDelay } + er.OutOfRange = time.Since(er.NewestTime) > delay + logNotLoopingEvaluation(a.lgr(ctx), er.OutOfRange, time.Since(er.NewestTime), delay) - if a.IsActive() { - if time.Since(a.Sent) > NotLoopingRepeat { - notification := a.withHook(&Notification{Message: NotLoopingMessage}) - return notification, false - } - return nil, false - } else { - a.Triggered = time.Now() - notification := a.withHook(&Notification{Message: NotLoopingMessage}) - return notification, true - } + return er } // DefaultNotLoopingDelay is used when the delay has a Zero value (its default). const DefaultNotLoopingDelay = 30 * time.Minute -func logNotLoopingEvaluation(ctx context.Context, changed, isAlerting bool) { +func logNotLoopingEvaluation(lgr log.Logger, isAlerting bool, since, threshold time.Duration) { fields := log.Fields{ - "changed": changed, "isAlerting?": isAlerting, + "value": since, + "threshold": threshold, } - lgr := log.LoggerFromContext(ctx) lgr.WithFields(fields).Info("not looping") } const NotLoopingMessage = "Loop is not able to loop" -// DosingDecisionReasonLoop is specified in a [dosingdecision.DosingDecision] to indicate +// DosingDecisionReasonLoop is specified in a [DosingDecision] to indicate // that the decision is part of a loop adjustment (as opposed to bolus or something else). const DosingDecisionReasonLoop string = "loop" @@ -418,51 +505,28 @@ func (a NoCommunicationAlert) Validate(validator structure.Validator) { } // Evaluate if the time since data was last received warrants a notification. -func (a *NoCommunicationAlert) Evaluate(ctx context.Context, - lastReceived time.Time) (_ *NotificationWithHook, changed bool) { +func (a *NoCommunicationAlert) Evaluate(ctx context.Context, lastReceived time.Time) EvalResult { + er := EvalResult{} - lgr := log.LoggerFromContext(ctx) if lastReceived.IsZero() { - err := errors.Newf("Unable to evaluate no communication: time is Zero") - lgr.WithError(err).Debug("Unable to evaluate no communication") - return nil, false + a.lgr(ctx).Info("Unable to evaluate no communication: time is Zero") + return er } - defer func() { - logNoCommunicationEvaluation(lgr, changed, a.IsActive()) - }() - delay := DefaultNoCommunicationDelay - if a.Delay.Duration() > 0 { - delay = a.Delay.Duration() + delay := a.Delay.Duration() + if delay == 0 { + delay = DefaultNoCommunicationDelay } + er.OutOfRange = time.Since(lastReceived) > delay + er.Started = lastReceived + er.NewestTime = lastReceived + a.lgr(ctx).WithField("isAlerting?", er.OutOfRange).Info("no communication") - if time.Since(lastReceived) < delay { - if a.IsActive() { - a.Resolved = time.Now() - return nil, true - } - return nil, false - } - if !a.IsActive() { - a.Triggered = time.Now() - return a.withHook(&Notification{Message: NoCommunicationMessage}), true - } - if time.Since(a.Sent) > DefaultNoCommunicationDelay { - return a.withHook(&Notification{Message: NoCommunicationMessage}), false - } - return nil, false + return er } const DefaultNoCommunicationDelay = 5 * time.Minute -func logNoCommunicationEvaluation(lgr log.Logger, changed, isAlerting bool) { - fields := log.Fields{ - "changed": changed, - "isAlerting?": isAlerting, - } - lgr.WithFields(fields).Info("no communication") -} - const NoCommunicationMessage = "Tidepool is unable to communicate with a user's device" // LowAlert extends Base with threshold and a delay. @@ -490,50 +554,13 @@ func (a LowAlert) Validate(validator structure.Validator) { // Evaluate the given data to determine if an alert should be sent. // // Assumes data is pre-sorted in descending order by Time. -func (a *LowAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) ( - notification *NotificationWithHook, _ bool) { - - lgr := log.LoggerFromContext(ctx) - if len(data) == 0 { - lgr.Debug("no data to evaluate for low") - return nil, false - } - var eventBegan time.Time - var okDatum, okThreshold float64 - var err error - defer func() { - logGlucoseAlertEvaluation(lgr, "low", notification, okDatum, okThreshold) - }() - for _, datum := range data { - okDatum, okThreshold, err = validateGlucoseAlertDatum(datum, a.Threshold) - if err != nil { - lgr.WithError(err).Debug("Skipping low alert datum evaluation") - continue - } - active := okDatum < okThreshold - if !active { - break - } - if (*datum.Time).Before(eventBegan) || eventBegan.IsZero() { - eventBegan = *datum.Time - } +func (a *LowAlert) Evaluate(ctx context.Context, data []*Glucose) EvalResult { + er := EvalResult{ + Name: "low", + Evaluator: func(dv, tv float64) bool { return dv < tv }, } - changed := false - if eventBegan.IsZero() { - if a.IsActive() { - a.Resolved = time.Now() - changed = true - } - return nil, changed - } - if !a.IsActive() { - if time.Since(eventBegan) > a.Delay.Duration() { - a.Triggered = time.Now() - changed = true - } - } - n := &Notification{Message: genGlucoseThresholdMessage("below low")} - return a.withHook(n), changed + er.Process(ctx, a.Threshold, data) + return er } func genGlucoseThresholdMessage(alertType string) string { @@ -565,63 +592,13 @@ func (a HighAlert) Validate(validator structure.Validator) { // Evaluate the given data to determine if an alert should be sent. // // Assumes data is pre-sorted in descending order by Time. -func (a *HighAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) ( - notification *NotificationWithHook, _ bool) { - - lgr := log.LoggerFromContext(ctx) - if len(data) == 0 { - lgr.Debug("no data to evaluate for high") - return nil, false - } - var eventBegan time.Time - var okDatum, okThreshold float64 - var err error - defer func() { - logGlucoseAlertEvaluation(lgr, "high", notification, okDatum, okThreshold) - }() - for _, datum := range data { - okDatum, okThreshold, err = validateGlucoseAlertDatum(datum, a.Threshold) - if err != nil { - lgr.WithError(err).Debug("Skipping high alert datum evaluation") - continue - } - active := okDatum > okThreshold - if !active { - break - } - if (*datum.Time).Before(eventBegan) || eventBegan.IsZero() { - eventBegan = *datum.Time - } - } - changed := false - if eventBegan.IsZero() { - if a.IsActive() { - a.Resolved = time.Now() - changed = true - } - return nil, changed - } - if !a.IsActive() { - if time.Since(eventBegan) > a.Delay.Duration() { - a.Triggered = time.Now() - changed = true - } +func (a *HighAlert) Evaluate(ctx context.Context, data []*Glucose) EvalResult { + er := &EvalResult{ + Name: "high", + Evaluator: func(dv, tv float64) bool { return dv > tv }, } - n := &Notification{Message: genGlucoseThresholdMessage("above high")} - return a.withHook(n), changed -} - -// logGlucoseAlertEvaluation is called during each glucose-based evaluation for -// record-keeping. -func logGlucoseAlertEvaluation(lgr log.Logger, alertType string, - notification *NotificationWithHook, value, threshold float64) { - - fields := log.Fields{ - "isAlerting?": notification != nil, - "threshold": threshold, - "value": value, - } - lgr.WithFields(fields).Info(alertType) + er.Process(ctx, a.Threshold, data) + return *er } // DurationMinutes reads a JSON integer and converts it to a time.Duration. @@ -700,6 +677,7 @@ type Notification struct { Message string RecipientUserID string FollowedUserID string + Sent func(time.Time) } // RecordsRepository encapsulates queries of the records collection for use with alerts. @@ -711,3 +689,9 @@ type RecordsRepository interface { EnsureIndexes() error } + +// DosingDecision is an alias of convenience. +type DosingDecision = dosingdecision.DosingDecision + +// Glucose is an alias of convenience. +type Glucose = glucose.Glucose diff --git a/alerts/config_test.go b/alerts/config_test.go index c63ea9238f..0a8558f8aa 100644 --- a/alerts/config_test.go +++ b/alerts/config_test.go @@ -3,6 +3,7 @@ package alerts import ( "bytes" "context" + "encoding/json" "fmt" "strings" "testing" @@ -14,8 +15,6 @@ import ( nontypesglucose "github.com/tidepool-org/platform/data/blood/glucose" "github.com/tidepool-org/platform/data/types" "github.com/tidepool-org/platform/data/types/blood" - "github.com/tidepool-org/platform/data/types/blood/glucose" - "github.com/tidepool-org/platform/data/types/dosingdecision" "github.com/tidepool-org/platform/log" logTest "github.com/tidepool-org/platform/log/test" "github.com/tidepool-org/platform/pointer" @@ -29,9 +28,10 @@ func TestSuite(t *testing.T) { } const ( - mockUserID1 = "008c7f79-6545-4466-95fb-34e3ba728d38" - mockUserID2 = "b1880201-30d5-4190-92bb-6afcf08ca15e" - mockDataSetID = "4d3b1abc280511ef9f41abf13a093b64" + mockUserID1 = "11111111-7357-7357-7357-111111111111" + mockUserID2 = "22222222-7357-7357-7357-222222222222" + mockUserID3 = "33333333-7357-7357-7357-333333333333" + mockDataSetID = "73577357735773577357735773577357" ) var _ = Describe("Config", func() { @@ -74,252 +74,350 @@ var _ = Describe("Config", func() { "delay": 6 } }`, mockUserID1, mockUserID2, mockDataSetID) - conf := &Config{} - err := request.DecodeObject(context.Background(), nil, buf, conf) + cfg := &Config{} + err := request.DecodeObject(context.Background(), nil, buf, cfg) Expect(err).ToNot(HaveOccurred()) - Expect(conf.UserID).To(Equal(mockUserID1)) - Expect(conf.FollowedUserID).To(Equal(mockUserID2)) - Expect(conf.UploadID).To(Equal(mockDataSetID)) - Expect(conf.High.Enabled).To(Equal(false)) - Expect(conf.High.Repeat).To(Equal(DurationMinutes(30 * time.Minute))) - Expect(conf.High.Delay).To(Equal(DurationMinutes(5 * time.Minute))) - Expect(conf.High.Threshold.Value).To(Equal(10.0)) - Expect(conf.High.Threshold.Units).To(Equal(nontypesglucose.MmolL)) - Expect(conf.Low.Enabled).To(Equal(true)) - Expect(conf.Low.Repeat).To(Equal(DurationMinutes(30 * time.Minute))) - Expect(conf.Low.Delay).To(Equal(DurationMinutes(10 * time.Minute))) - Expect(conf.Low.Threshold.Value).To(Equal(80.0)) - Expect(conf.Low.Threshold.Units).To(Equal(nontypesglucose.MgdL)) - Expect(conf.UrgentLow.Enabled).To(Equal(false)) - Expect(conf.UrgentLow.Threshold.Value).To(Equal(47.5)) - Expect(conf.UrgentLow.Threshold.Units).To(Equal(nontypesglucose.MgdL)) - Expect(conf.NotLooping.Enabled).To(Equal(true)) - Expect(conf.NotLooping.Delay).To(Equal(DurationMinutes(4 * time.Minute))) - // Expect(conf.NoCommunication.Enabled).To(Equal(true)) - // Expect(conf.NoCommunication.Delay).To(Equal(DurationMinutes(6 * time.Minute))) + Expect(cfg.UserID).To(Equal(mockUserID1)) + Expect(cfg.FollowedUserID).To(Equal(mockUserID2)) + Expect(cfg.UploadID).To(Equal(mockDataSetID)) + Expect(cfg.Alerts.High.Enabled).To(Equal(false)) + Expect(cfg.Alerts.High.Repeat).To(Equal(DurationMinutes(30 * time.Minute))) + Expect(cfg.Alerts.High.Delay).To(Equal(DurationMinutes(5 * time.Minute))) + Expect(cfg.Alerts.High.Threshold.Value).To(Equal(10.0)) + Expect(cfg.Alerts.High.Threshold.Units).To(Equal(nontypesglucose.MmolL)) + Expect(cfg.Alerts.Low.Enabled).To(Equal(true)) + Expect(cfg.Alerts.Low.Repeat).To(Equal(DurationMinutes(30 * time.Minute))) + Expect(cfg.Alerts.Low.Delay).To(Equal(DurationMinutes(10 * time.Minute))) + Expect(cfg.Alerts.Low.Threshold.Value).To(Equal(80.0)) + Expect(cfg.Alerts.Low.Threshold.Units).To(Equal(nontypesglucose.MgdL)) + Expect(cfg.Alerts.UrgentLow.Enabled).To(Equal(false)) + Expect(cfg.Alerts.UrgentLow.Threshold.Value).To(Equal(47.5)) + Expect(cfg.Alerts.UrgentLow.Threshold.Units).To(Equal(nontypesglucose.MgdL)) + Expect(cfg.Alerts.NotLooping.Enabled).To(Equal(true)) + Expect(cfg.Alerts.NotLooping.Delay).To(Equal(DurationMinutes(4 * time.Minute))) + // Expect(conf.Alerts.NoCommunication.Enabled).To(Equal(true)) + // Expect(conf.Alerts.NoCommunication.Delay).To(Equal(DurationMinutes(6 * time.Minute))) }) Context("validations", func() { - testConfig := func() Config { - return Config{ - UserID: mockUserID1, - FollowedUserID: mockUserID2, - UploadID: mockDataSetID, - } - } - It("requires an UploadID", func() { - c := testConfig() - c.UploadID = "" + cfg := testConfig() + cfg.UploadID = "" val := validator.New(logTest.NewLogger()) - c.Validate(val) + cfg.Validate(val) Expect(val.Error()).To(MatchError(ContainSubstring("value is empty"))) }) It("requires an FollowedUserID", func() { - c := testConfig() - c.FollowedUserID = "" + cfg := testConfig() + cfg.FollowedUserID = "" val := validator.New(logTest.NewLogger()) - c.Validate(val) + cfg.Validate(val) Expect(val.Error()).To(MatchError(ContainSubstring("value is empty"))) }) It("requires an UserID", func() { - c := testConfig() - c.UserID = "" + cfg := testConfig() + cfg.UserID = "" val := validator.New(logTest.NewLogger()) - c.Validate(val) + cfg.Validate(val) Expect(val.Error()).To(MatchError(ContainSubstring("value is empty"))) }) }) - Describe("EvaluateData", func() { - Context("when a notification is returned", func() { - It("injects the userIDs", func() { - ctx := contextWithTestLogger() - mockGlucoseData := []*glucose.Glucose{ - { - Blood: blood.Blood{ - Base: types.Base{ - Time: pointer.FromAny(time.Now()), - }, - Units: pointer.FromAny(nontypesglucose.MmolL), - Value: pointer.FromAny(0.0), - }, - }, - } - conf := Config{ - UserID: mockUserID1, - FollowedUserID: mockUserID2, - Alerts: Alerts{ - DataAlerts: DataAlerts{ - UrgentLow: &UrgentLowAlert{ - Base: Base{Enabled: true}, - Threshold: Threshold{ - Value: 10, - Units: nontypesglucose.MmolL, - }, - }, - }, - }, - } + Context("when a notification is returned", func() { + Describe("EvaluateNoCommunication", func() { + It("injects user ids", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.NoCommunication.Enabled = true - notification, _ := conf.EvaluateData(ctx, mockGlucoseData, nil) + when := time.Now().Add(-(DefaultNoCommunicationDelay + time.Second)) + n, _ := cfg.EvaluateNoCommunication(ctx, when) - Expect(notification).ToNot(BeNil()) - Expect(notification.RecipientUserID).To(Equal(mockUserID1)) - Expect(notification.FollowedUserID).To(Equal(mockUserID2)) + Expect(n).ToNot(BeNil()) + Expect(n.RecipientUserID).To(Equal(mockUserID1)) + Expect(n.FollowedUserID).To(Equal(mockUserID2)) }) }) }) - Describe("EvaluateNoCommunication", func() { - Context("when a notification is returned", func() { - It("injects the userIDs", func() { - ctx := contextWithTestLogger() - conf := Config{ - UserID: mockUserID1, - FollowedUserID: mockUserID2, - Alerts: Alerts{ - NoCommunicationAlert: &NoCommunicationAlert{ - Base: Base{ - Enabled: true, - }, - }, - }, - } + Describe("EvaluateData", func() { + var okGlucose = []*Glucose{testInRangeDatum()} + var okDosing = []*DosingDecision{testDosingDecision(time.Second)} + + type evalTest struct { + Name string + Activity func(*Config) *AlertActivity + Glucose []*Glucose + Dosing []*DosingDecision + } - when := time.Now().Add(-time.Second + -DefaultNoCommunicationDelay) - notification, _ := conf.EvaluateNoCommunication(ctx, when) + tests := []evalTest{ + {"UrgentLow", func(c *Config) *AlertActivity { return &c.Activity.UrgentLow }, + []*Glucose{testUrgentLowDatum()}, nil}, + {"Low", func(c *Config) *AlertActivity { return &c.Activity.Low }, + []*Glucose{testLowDatum()}, nil}, + {"High", func(c *Config) *AlertActivity { return &c.Activity.High }, + []*Glucose{testHighDatum()}, nil}, + {"NotLooping", func(c *Config) *AlertActivity { return &c.Activity.NotLooping }, + nil, []*DosingDecision{testDosingDecision(-30 * time.Hour)}}, + } + for _, test := range tests { + Context(test.Name, func() { + It("is triggered", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.NotLooping = testNotLooping() + cfg.EvaluateData(ctx, okGlucose, okDosing) + n, _ := cfg.EvaluateData(ctx, test.Glucose, test.Dosing) + Expect(n).ToNot(BeNil()) + Expect(test.Activity(cfg).Triggered).ToNot(BeZero()) + }) - Expect(notification).ToNot(BeNil()) - Expect(notification.RecipientUserID).To(Equal(mockUserID1)) - Expect(notification.FollowedUserID).To(Equal(mockUserID2)) - }) - }) - }) + It("doesn't update its triggered time", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.NotLooping = testNotLooping() + cfg.EvaluateData(ctx, okGlucose, okDosing) + n, _ := cfg.EvaluateData(ctx, test.Glucose, test.Dosing) + Expect(n).ToNot(BeNil()) + Expect(test.Activity(cfg).Triggered).ToNot(BeZero()) + prev := test.Activity(cfg).Triggered + n, _ = cfg.EvaluateData(ctx, test.Glucose, test.Dosing) + Expect(n).ToNot(BeNil()) + Expect(test.Activity(cfg).Triggered).To(Equal(prev)) + }) - Context("Base", func() { - Context("Activity", func() { - Context("IsActive()", func() { - It("is true", func() { - triggered := time.Now() - resolved := triggered.Add(-time.Nanosecond) - a := Activity{ - Triggered: triggered, - Resolved: resolved, - } - Expect(a.IsActive()).To(BeTrue()) + It("is resolved", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.NotLooping = testNotLooping() + n, _ := cfg.EvaluateData(ctx, test.Glucose, test.Dosing) + Expect(n).ToNot(BeNil()) + Expect(test.Activity(cfg).Resolved).To(BeZero()) + n, _ = cfg.EvaluateData(ctx, okGlucose, okDosing) + Expect(n).To(BeNil()) + Expect(test.Activity(cfg).Resolved).To(BeTemporally("~", time.Now())) }) - It("is false", func() { - triggered := time.Now() - resolved := triggered.Add(time.Nanosecond) - a := Activity{ - Triggered: triggered, - Resolved: resolved, - } - Expect(a.IsActive()).To(BeFalse()) + It("doesn't update its resolved time", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.NotLooping = testNotLooping() + n, _ := cfg.EvaluateData(ctx, test.Glucose, test.Dosing) + Expect(n).ToNot(BeNil()) + n, _ = cfg.EvaluateData(ctx, okGlucose, okDosing) + Expect(n).To(BeNil()) + prev := test.Activity(cfg).Resolved + n, _ = cfg.EvaluateData(ctx, okGlucose, okDosing) + Expect(n).To(BeNil()) + Expect(test.Activity(cfg).Resolved).To(Equal(prev)) }) }) + } - Context("IsSent()", func() { - It("is true", func() { - triggered := time.Now() - sent := triggered.Add(time.Nanosecond) - a := Activity{ - Triggered: triggered, - Sent: sent, - } - Expect(a.IsSent()).To(BeTrue()) - }) + type logTest struct { + Name string + Msg string + Fields log.Fields + } - It("is false", func() { - triggered := time.Now() - notified := triggered.Add(-time.Nanosecond) - a := Activity{ - Triggered: triggered, - Sent: notified, - } - Expect(a.IsSent()).To(BeFalse()) - }) + logTests := []logTest{ + {"UrgentLow", "urgent low", log.Fields{ + "isAlerting?": false, "value": 6.0, "threshold": 3.0}}, + {"Low", "low", log.Fields{ + "isAlerting?": false, "value": 6.0, "threshold": 4.0}}, + {"High", "high", log.Fields{ + "isAlerting?": false, "value": 6.0, "threshold": 10.0}}, + {"NotLooping", "not looping", log.Fields{ + "isAlerting?": false, + // "value" is time-dependent, and would require a lot of work to mock. This + // should be close enough. + "threshold": DefaultNotLoopingDelay, + }}, + } + for _, test := range logTests { + It(test.Name+" logs evaluations", func() { + ctx, lgr, cfg := newConfigTest() + cfg.Alerts.NotLooping.Base.Enabled = true + glucose := []*Glucose{testInRangeDatum()} + dosing := []*DosingDecision{testDosingDecision(-1)} + cfg.EvaluateData(ctx, glucose, dosing) + + Expect(func() { + lgr.AssertLog(log.InfoLevel, test.Msg, test.Fields) + }).ToNot(Panic(), quickJSON(map[string]any{ + "got": lgr.SerializedFields, + "expected": map[string]any{"message": test.Msg, "fields": test.Fields}, + })) + }) + } + + It("injects user IDs into the returned Notification", func() { + ctx, _, cfg := newConfigTest() + mockGlucoseData := []*Glucose{testUrgentLowDatum()} + + n, _ := cfg.EvaluateData(ctx, mockGlucoseData, nil) + + Expect(n).ToNot(BeNil()) + Expect(n.RecipientUserID).To(Equal(mockUserID1)) + Expect(n.FollowedUserID).To(Equal(mockUserID2)) + }) + + It("ripples the needs upsert value (from urgent low)", func() { + ctx, _, cfg := newConfigTest() + + // Generate an urgent low notification. + n, _ := cfg.EvaluateData(ctx, []*Glucose{testUrgentLowDatum()}, nil) + Expect(n).ToNot(Equal(nil)) + // Now resolve the alert, resulting in changed being true, but without a + // notification. + n, needsUpsert := cfg.EvaluateData(ctx, []*Glucose{testInRangeDatum()}, nil) + Expect(n).To(BeNil()) + Expect(needsUpsert).To(Equal(true)) + }) + + It("ripples the needs upsert value (from low)", func() { + ctx, _, cfg := newConfigTest() + + // Generate a low notification. + n, needsUpsert := cfg.EvaluateData(ctx, []*Glucose{testLowDatum()}, nil) + Expect(n).ToNot(BeNil()) + Expect(needsUpsert).To(Equal(true)) + // Now resolve the alert, resulting in changed being true, but without a + // notification. + n, needsUpsert = cfg.EvaluateData(ctx, []*Glucose{testInRangeDatum()}, nil) + Expect(n).To(BeNil()) + Expect(needsUpsert).To(Equal(true)) + }) + + It("ripples the needs upsert value (form high)", func() { + ctx, _, cfg := newConfigTest() + + // Generate a high notification. + n, needsUpsert := cfg.EvaluateData(ctx, []*Glucose{testHighDatum()}, nil) + Expect(n).ToNot(BeNil()) + Expect(needsUpsert).To(Equal(true)) + // Now resolve the alert, resulting in changed being true, but without a + // notification. + n, needsUpsert = cfg.EvaluateData(ctx, []*Glucose{testInRangeDatum()}, nil) + Expect(n).To(BeNil()) + Expect(needsUpsert).To(Equal(true)) + }) + + Describe("Repeat", func() { + It("Low is respected", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.Low.Repeat = DurationMinutes(10 * time.Minute) + cfg.Alerts.Low.Delay = DurationMinutes(1 * time.Nanosecond) + cfg.Activity.Low.Triggered = time.Now().Add(-time.Hour) + cfg.Activity.Low.Sent = time.Now().Add((-10 * time.Minute) + time.Second) + testData := []*Glucose{testLowDatum()} + + n, _ := cfg.EvaluateData(ctx, testData, nil) + Expect(n).To(BeNil()) + + cfg.Activity.Low.Sent = time.Now().Add((-10 * time.Minute) - time.Second) + + n, _ = cfg.EvaluateData(ctx, testData, nil) + Expect(n).ToNot(BeNil()) + }) + + It("High is respected", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.High.Repeat = DurationMinutes(10 * time.Minute) + cfg.Alerts.High.Delay = DurationMinutes(1 * time.Nanosecond) + cfg.Activity.High.Triggered = time.Now().Add(-time.Hour) + cfg.Activity.High.Sent = time.Now().Add((-10 * time.Minute) + time.Second) + delayed := []*Glucose{testHighDatum()} + + n, _ := cfg.EvaluateData(ctx, delayed, nil) + Expect(n).To(BeNil()) + + cfg.Activity.High.Sent = time.Now().Add((-10 * time.Minute) - time.Second) + + n, _ = cfg.EvaluateData(ctx, delayed, nil) + Expect(n).ToNot(BeNil()) }) }) - }) - Context("DataAlerts", func() { - Describe("Evaluate", func() { - var ctxAndData = func() (context.Context, *DataAlerts) { - return contextWithTestLogger(), &DataAlerts{ - UrgentLow: testUrgentLowAlert(), - Low: testLowAlert(), - High: testHighAlert(), - } - } + Describe("Delay", func() { + It("Low is respected", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.Low.Delay = DurationMinutes(5 * time.Minute) + cfg.Alerts.Low.Repeat = DurationMinutes(1 * time.Nanosecond) + delayed := []*Glucose{testLowDatum()} + + n, _ := cfg.EvaluateData(ctx, delayed, nil) + Expect(n).To(BeNil()) + + delayed[0].Time = pointer.FromAny(time.Now().Add(-5 * time.Minute)) - It("ripples changed value (from urgent low)", func() { - ctx, dataAlerts := ctxAndData() - - // Generate an urgent low notification. - notification, changed := dataAlerts.Evaluate(ctx, []*glucose.Glucose{testUrgentLowDatum}, nil) - Expect(notification).ToNot(BeNil()) - Expect(changed).To(Equal(true)) - // Now resolve the alert, resulting in changed being true, but without a - // notification. - notification, changed = dataAlerts.Evaluate(ctx, []*glucose.Glucose{testInRangeDatum}, nil) - Expect(notification).To(BeNil()) - Expect(changed).To(Equal(true)) + n, _ = cfg.EvaluateData(ctx, delayed, nil) + Expect(n).ToNot(BeNil()) }) - It("ripples changed value (from low)", func() { - ctx, dataAlerts := ctxAndData() - - // Generate a low notification. - notification, changed := dataAlerts.Evaluate(ctx, []*glucose.Glucose{testLowDatum}, nil) - Expect(notification).ToNot(BeNil()) - Expect(changed).To(Equal(true)) - // Now resolve the alert, resulting in changed being true, but without a - // notification. - notification, changed = dataAlerts.Evaluate(ctx, []*glucose.Glucose{testInRangeDatum}, nil) - Expect(notification).To(BeNil()) - Expect(changed).To(Equal(true)) + It("High is respected", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.High.Delay = DurationMinutes(5 * time.Minute) + cfg.Alerts.High.Repeat = DurationMinutes(1 * time.Nanosecond) + delayed := []*Glucose{testHighDatum()} + + n, _ := cfg.EvaluateData(ctx, delayed, nil) + Expect(n).To(BeNil()) + + delayed[0].Time = pointer.FromAny(time.Now().Add(-5 * time.Minute)) + + n, _ = cfg.EvaluateData(ctx, delayed, nil) + Expect(n).ToNot(BeNil()) }) - It("ripples changed value (form high)", func() { - ctx, dataAlerts := ctxAndData() - - // Generate a high notification. - notification, changed := dataAlerts.Evaluate(ctx, []*glucose.Glucose{testHighDatum}, nil) - Expect(notification).ToNot(BeNil()) - Expect(changed).To(Equal(true)) - // Now resolve the alert, resulting in changed being true, but without a - // notification. - notification, changed = dataAlerts.Evaluate(ctx, []*glucose.Glucose{testInRangeDatum}, nil) - Expect(notification).To(BeNil()) - Expect(changed).To(Equal(true)) + It("NotLooping is respected", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.NotLooping.Enabled = true + delay := 10 * time.Minute + lessThanDelay := delay - time.Second + cfg.Alerts.NotLooping.Delay = DurationMinutes(delay) + delayed := []*DosingDecision{testDosingDecision(-lessThanDelay)} + + n, _ := cfg.EvaluateData(ctx, nil, delayed) + Expect(n).To(BeNil()) + + moreThanDelay := delay + time.Second + delayed[0].Time = pointer.FromAny(time.Now().Add(-moreThanDelay)) + + n, _ = cfg.EvaluateData(ctx, nil, delayed) + Expect(n).ToNot(BeNil()) + }) + + It("NotLooping uses its default", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.NotLooping.Enabled = true + cfg.Alerts.NotLooping.Delay = 0 + lessThanDelay := DefaultNotLoopingDelay - time.Second + delayed := []*DosingDecision{testDosingDecision(-lessThanDelay)} + + n, _ := cfg.EvaluateData(ctx, nil, delayed) + Expect(n).To(BeNil()) + + moreThanDelay := DefaultNotLoopingDelay + time.Second + delayed[0].Time = pointer.FromAny(time.Now().Add(-moreThanDelay)) + + n, _ = cfg.EvaluateData(ctx, nil, delayed) + Expect(n).ToNot(BeNil()) }) }) }) - var testGlucoseDatum = func(v float64) *glucose.Glucose { - return &glucose.Glucose{ - Blood: blood.Blood{ - Base: types.Base{ - Time: pointer.FromAny(time.Now()), - }, - Units: pointer.FromAny(nontypesglucose.MmolL), - Value: pointer.FromAny(v), - }, - } - } - var testDosingDecision = func(d time.Duration) *dosingdecision.DosingDecision { - return &dosingdecision.DosingDecision{ - Base: types.Base{ - Time: pointer.FromAny(time.Now().Add(d)), - }, - Reason: pointer.FromAny(DosingDecisionReasonLoop), - } - } + It("observes NotLoopingRepeat between notifications", func() { + ctx, _, cfg := newConfigTest() + cfg.Alerts.NotLooping = testNotLooping() + yesterday := []*DosingDecision{testDosingDecision(-24 * time.Hour)} + + cfg.Activity.NotLooping.Sent = time.Now() + n, _ := cfg.EvaluateData(ctx, nil, yesterday) + Expect(n).To(BeNil()) + + cfg.Activity.NotLooping.Sent = time.Now().Add(-(1 + NotLoopingRepeat)) + n, _ = cfg.EvaluateData(ctx, nil, yesterday) + Expect(n).ToNot(BeNil()) + }) Context("UrgentLowAlert", func() { Context("Threshold", func() { @@ -347,133 +445,52 @@ var _ = Describe("Config", func() { }) Context("Evaluate", func() { - testUrgentLow := func() *UrgentLowAlert { - return &UrgentLowAlert{ - Threshold: Threshold{ - Value: 4.0, - Units: nontypesglucose.MmolL, - }, - } - } - It("handles being passed empty data", func() { - ctx := contextWithTestLogger() - var notification *NotificationWithHook - - alert := testUrgentLow() - - Expect(func() { - notification, _ = alert.Evaluate(ctx, []*glucose.Glucose{}) - }).ToNot(Panic()) - Expect(notification).To(BeNil()) - Expect(func() { - notification, _ = alert.Evaluate(ctx, nil) - }).ToNot(Panic()) - Expect(notification).To(BeNil()) - }) - - It("logs evaluation results", func() { - ctx := contextWithTestLogger() - data := []*glucose.Glucose{testGlucoseDatum(1.1)} - - alert := testUrgentLow() - - Expect(func() { - alert.Evaluate(ctx, data) - }).ToNot(Panic()) - Expect(func() { - lgr := log.LoggerFromContext(ctx).(*logTest.Logger) - lgr.AssertLog(log.InfoLevel, "urgent low", log.Fields{ - "threshold": 4.0, - "value": 1.1, - "isAlerting?": true, - }) - }).ToNot(Panic()) - }) - - Context("when currently active", func() { - It("marks itself resolved", func() { - ctx := contextWithTestLogger() - - alert := testUrgentLow() - - Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) - }).ToNot(Panic()) - Expect(alert.Resolved).To(BeZero()) - Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) - }).ToNot(Panic()) - Expect(alert.Resolved).ToNot(BeZero()) - }) - }) - - Context("when currently INactive", func() { - It("doesn't re-mark itself resolved", func() { - ctx := contextWithTestLogger() - - alert := testUrgentLow() - - Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) - }).ToNot(Panic()) - Expect(alert.Resolved).To(BeZero()) - Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) - }).ToNot(Panic()) - Expect(alert.Resolved).ToNot(BeZero()) - was := alert.Resolved - Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) - }).ToNot(Panic()) - Expect(alert.Resolved).To(Equal(was)) - }) - }) - - It("marks itself triggered", func() { - ctx := contextWithTestLogger() - - alert := testUrgentLow() + ctx, _, cfg := newConfigTest() + ul := cfg.Alerts.UrgentLow + er := EvalResult{} Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + er = ul.Evaluate(ctx, []*Glucose{}) }).ToNot(Panic()) - Expect(alert.Triggered).To(BeZero()) Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) + er = ul.Evaluate(ctx, nil) }).ToNot(Panic()) - Expect(alert.Triggered).ToNot(BeZero()) + Expect(er.OutOfRange).To(Equal(false)) }) It("validates glucose data", func() { - ctx := contextWithTestLogger() - var notification *NotificationWithHook + ctx, _, cfg := newConfigTest() + ul := cfg.Alerts.UrgentLow + er := EvalResult{} Expect(func() { - notification, _ = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1)}) + er = ul.Evaluate(ctx, []*Glucose{testUrgentLowDatum()}) }).ToNot(Panic()) - Expect(notification).ToNot(BeNil()) + Expect(er.OutOfRange).To(Equal(true)) - badUnits := testGlucoseDatum(1) + badUnits := testInRangeDatum() badUnits.Units = nil Expect(func() { - notification, _ = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badUnits}) + er = ul.Evaluate(ctx, []*Glucose{badUnits}) }).ToNot(Panic()) - Expect(notification).To(BeNil()) + Expect(er.OutOfRange).To(Equal(false)) - badValue := testGlucoseDatum(1) + badValue := testInRangeDatum() badValue.Value = nil Expect(func() { - notification, _ = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badValue}) + er = ul.Evaluate(ctx, []*Glucose{badValue}) }).ToNot(Panic()) - Expect(notification).To(BeNil()) + Expect(er.OutOfRange).To(Equal(false)) - badTime := testGlucoseDatum(1) - badTime.Time = nil - Expect(func() { - notification, _ = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badTime}) - }).ToNot(Panic()) - Expect(notification).To(BeNil()) + // TODO is this still useful? + // + // badTime := testGlucoseDatum(1) + // badTime.Time = nil + // Expect(func() { + // notification, _ = testUrgentLow().Evaluate(ctx, []*Glucose{badTime}) + // }).ToNot(Panic()) + // Expect(notification).To(BeNil()) }) }) @@ -481,146 +498,57 @@ var _ = Describe("Config", func() { Context("NoCommunicationAlert", func() { Context("Evaluate", func() { - testNoCommunication := func() *NoCommunicationAlert { - return &NoCommunicationAlert{} - } It("handles being passed a Zero time.Time value", func() { - ctx := contextWithTestLogger() - - alert := testNoCommunication() + ctx, _, cfg := newConfigTest() + nc := cfg.Alerts.NoCommunication Expect(func() { - alert.Evaluate(ctx, time.Time{}) + nc.Evaluate(ctx, time.Time{}) }).ToNot(Panic()) }) It("logs evaluation results", func() { - ctx := contextWithTestLogger() - alert := testNoCommunication() + ctx, lgr, cfg := newConfigTest() + nc := cfg.Alerts.NoCommunication Expect(func() { - alert.Evaluate(ctx, time.Now().Add(-12*time.Hour)) + nc.Evaluate(ctx, time.Now().Add(-12*time.Hour)) }).ToNot(Panic()) Expect(func() { - lgr := log.LoggerFromContext(ctx).(*logTest.Logger) lgr.AssertLog(log.InfoLevel, "no communication", log.Fields{ - "changed": true, "isAlerting?": true, }) }).ToNot(Panic()) }) It("honors non-Zero Delay values", func() { - ctx := contextWithTestLogger() - wontTrigger := time.Now().Add(-6 * time.Minute) - willTrigger := time.Now().Add(-12 * time.Hour) - - alert := testNoCommunication() - alert.Delay = DurationMinutes(10 * time.Minute) - - Expect(func() { - alert.Evaluate(ctx, wontTrigger) - }).ToNot(Panic()) - Expect(alert.IsActive()).To(Equal(false)) - Expect(func() { - alert.Evaluate(ctx, willTrigger) - }).ToNot(Panic()) - Expect(alert.IsActive()).To(Equal(true)) - }) - - Context("when currently active", func() { - It("marks itself resolved", func() { - ctx := contextWithTestLogger() - willTrigger := time.Now().Add(-12 * time.Hour) - willResolve := time.Now() - - alert := testNoCommunication() - - Expect(func() { - alert.Evaluate(ctx, willTrigger) - }).ToNot(Panic()) - Expect(alert.Resolved).To(BeZero()) - Expect(func() { - alert.Evaluate(ctx, willResolve) - }).ToNot(Panic()) - Expect(alert.Resolved).ToNot(BeZero()) - }) - - It("doesn't re-send before delay", func() { - ctx := contextWithTestLogger() - willTrigger := time.Now().Add(-12 * time.Hour) - - alert := testNoCommunication() - - notification, _ := alert.Evaluate(ctx, willTrigger) - Expect(notification).ToNot(BeNil()) - sentAt := time.Now() - notification.Sent(sentAt) - Expect(alert.Sent).ToNot(BeZero()) - - notification, _ = alert.Evaluate(ctx, willTrigger) - Expect(notification).To(BeNil()) - Expect(alert.Sent).To(BeTemporally("~", sentAt)) - }) - }) - - Context("when currently INactive", func() { - It("doesn't re-mark itself resolved", func() { - ctx := contextWithTestLogger() - willTrigger := time.Now().Add(-12 * time.Hour) - willResolve := time.Now() - - alert := testNoCommunication() - - Expect(func() { - alert.Evaluate(ctx, willTrigger) - }).ToNot(Panic()) - Expect(alert.Resolved).To(BeZero()) - Expect(func() { - alert.Evaluate(ctx, willResolve) - }).ToNot(Panic()) - Expect(alert.Resolved).ToNot(BeZero()) - was := alert.Resolved - Expect(func() { - alert.Evaluate(ctx, willTrigger) - }).ToNot(Panic()) - Expect(alert.Resolved).To(Equal(was)) - }) - }) - - It("marks itself triggered", func() { - ctx := contextWithTestLogger() - willTrigger := time.Now().Add(-10*time.Minute + -DefaultNoCommunicationDelay) - willResolve := time.Now() - - alert := testNoCommunication() - - Expect(func() { - alert.Evaluate(ctx, willResolve) - }).ToNot(Panic()) - Expect(alert.Triggered).To(BeZero()) - Expect(func() { - alert.Evaluate(ctx, willTrigger) - }).ToNot(Panic()) - Expect(alert.Triggered).ToNot(BeZero()) + ctx, _, cfg := newConfigTest() + nc := cfg.Alerts.NoCommunication + nc.Enabled = true + nc.Delay = DurationMinutes(10 * time.Minute) + + wontTrigger := time.Now().Add(-(nc.Delay.Duration() - time.Second)) + er := nc.Evaluate(ctx, wontTrigger) + Expect(er.OutOfRange).To(Equal(false)) + + willTrigger := time.Now().Add(-(nc.Delay.Duration() + time.Second)) + er = nc.Evaluate(ctx, willTrigger) + Expect(er.OutOfRange).To(Equal(true)) }) It("validates the time at which data was last received", func() { - ctx := contextWithTestLogger() + ctx, _, cfg := newConfigTest() validLastReceived := time.Now().Add(-10*time.Minute + -DefaultNoCommunicationDelay) invalidLastReceived := time.Time{} - var notification *NotificationWithHook + er := EvalResult{} + nc := cfg.Alerts.NoCommunication - Expect(func() { - notification, _ = testNoCommunication().Evaluate(ctx, validLastReceived) - }).ToNot(Panic()) - Expect(notification).ToNot(BeNil()) + er = nc.Evaluate(ctx, validLastReceived) + Expect(er.OutOfRange).To(Equal(true)) - Expect(func() { - notification, _ = testNoCommunication().Evaluate(ctx, invalidLastReceived) - }).ToNot(Panic()) - Expect(notification).To(BeNil()) + er = nc.Evaluate(ctx, invalidLastReceived) + Expect(er.OutOfRange).To(Equal(false)) }) }) }) @@ -684,133 +612,53 @@ var _ = Describe("Config", func() { }) Context("Evaluate", func() { - testLow := func() *LowAlert { - return &LowAlert{ - Threshold: Threshold{ - Value: 4.0, - Units: nontypesglucose.MmolL, - }, - } - } - It("handles being passed empty data", func() { - ctx := contextWithTestLogger() - var notification *NotificationWithHook - - alert := testLow() - - Expect(func() { - notification, _ = alert.Evaluate(ctx, []*glucose.Glucose{}) - }).ToNot(Panic()) - Expect(notification).To(BeNil()) - Expect(func() { - notification, _ = alert.Evaluate(ctx, nil) - }).ToNot(Panic()) - Expect(notification).To(BeNil()) - }) - - It("logs evaluation results", func() { - ctx := contextWithTestLogger() - data := []*glucose.Glucose{testGlucoseDatum(1.1)} - - alert := testLow() - - Expect(func() { - alert.Evaluate(ctx, data) - }).ToNot(Panic()) - Expect(func() { - lgr := log.LoggerFromContext(ctx).(*logTest.Logger) - lgr.AssertLog(log.InfoLevel, "low", log.Fields{ - "threshold": 4.0, - "value": 1.1, - "isAlerting?": true, - }) - }).ToNot(Panic()) - }) - - Context("when currently active", func() { - It("marks itself resolved", func() { - ctx := contextWithTestLogger() - - alert := testLow() - - Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) - }).ToNot(Panic()) - Expect(alert.Resolved).To(BeZero()) - Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) - }).ToNot(Panic()) - Expect(alert.Resolved).ToNot(BeZero()) - }) - }) - - Context("when currently INactive", func() { - It("doesn't re-mark itself resolved", func() { - ctx := contextWithTestLogger() - - alert := testLow() - - Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) - }).ToNot(Panic()) - Expect(alert.Resolved).To(BeZero()) - Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) - }).ToNot(Panic()) - Expect(alert.Resolved).ToNot(BeZero()) - was := alert.Resolved - Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) - }).ToNot(Panic()) - Expect(alert.Resolved).To(Equal(was)) - }) - }) - - It("marks itself triggered", func() { - ctx := contextWithTestLogger() - - alert := testLow() + ctx, _, cfg := newConfigTest() + er := EvalResult{} + low := cfg.Alerts.Low Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + er = low.Evaluate(ctx, []*Glucose{}) }).ToNot(Panic()) - Expect(alert.Triggered).To(BeZero()) + Expect(er.OutOfRange).To(Equal(false)) Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) + er = low.Evaluate(ctx, nil) }).ToNot(Panic()) - Expect(alert.Triggered).ToNot(BeZero()) + Expect(er.OutOfRange).To(Equal(false)) }) It("validates glucose data", func() { - ctx := contextWithTestLogger() - var notification *NotificationWithHook + ctx, _, cfg := newConfigTest() + er := EvalResult{} + low := cfg.Alerts.Low Expect(func() { - notification, _ = testLow().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1)}) + er = low.Evaluate(ctx, []*Glucose{testUrgentLowDatum()}) }).ToNot(Panic()) - Expect(notification).ToNot(BeNil()) + Expect(er.OutOfRange).ToNot(Equal(false)) - badUnits := testGlucoseDatum(1) + badUnits := testUrgentLowDatum() badUnits.Units = nil Expect(func() { - notification, _ = testLow().Evaluate(ctx, []*glucose.Glucose{badUnits}) + er = low.Evaluate(ctx, []*Glucose{badUnits}) }).ToNot(Panic()) - Expect(notification).To(BeNil()) + Expect(er.OutOfRange).To(Equal(false)) - badValue := testGlucoseDatum(1) + badValue := testUrgentLowDatum() badValue.Value = nil Expect(func() { - notification, _ = testLow().Evaluate(ctx, []*glucose.Glucose{badValue}) - }).ToNot(Panic()) - Expect(notification).To(BeNil()) - - badTime := testGlucoseDatum(1) - badTime.Time = nil - Expect(func() { - notification, _ = testLow().Evaluate(ctx, []*glucose.Glucose{badTime}) + er = low.Evaluate(ctx, []*Glucose{badValue}) }).ToNot(Panic()) - Expect(notification).To(BeNil()) + Expect(er.OutOfRange).To(Equal(false)) + + // TODO is this useful? + // + // badTime := testGlucoseDatum(1) + // badTime.Time = nil + // Expect(func() { + // notification, _ = low.Evaluate(ctx, []*Glucose{badTime}) + // }).ToNot(Panic()) + // Expect(notification).To(BeNil()) }) }) }) @@ -867,133 +715,53 @@ var _ = Describe("Config", func() { }) Context("Evaluate", func() { - testHigh := func() *HighAlert { - return &HighAlert{ - Threshold: Threshold{ - Value: 20.0, - Units: nontypesglucose.MmolL, - }, - } - } It("handles being passed empty data", func() { - ctx := contextWithTestLogger() - var notification *NotificationWithHook - - alert := testHigh() - - Expect(func() { - notification, _ = alert.Evaluate(ctx, []*glucose.Glucose{}) - }).ToNot(Panic()) - Expect(notification).To(BeNil()) - Expect(func() { - notification, _ = alert.Evaluate(ctx, nil) - }).ToNot(Panic()) - Expect(notification).To(BeNil()) - }) - - It("logs evaluation results", func() { - ctx := contextWithTestLogger() - data := []*glucose.Glucose{testGlucoseDatum(21.1)} - - alert := testHigh() + ctx, _, cfg := newConfigTest() + er := EvalResult{} + high := cfg.Alerts.High Expect(func() { - alert.Evaluate(ctx, data) + er = high.Evaluate(ctx, []*Glucose{}) }).ToNot(Panic()) + Expect(er.OutOfRange).To(Equal(false)) Expect(func() { - lgr := log.LoggerFromContext(ctx).(*logTest.Logger) - lgr.AssertLog(log.InfoLevel, "high", log.Fields{ - "threshold": 20.0, - "value": 21.1, - "isAlerting?": true, - }) + er = high.Evaluate(ctx, nil) }).ToNot(Panic()) - }) - - Context("when currently active", func() { - It("marks itself resolved", func() { - ctx := contextWithTestLogger() - - alert := testHigh() - - Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(21.0)}) - }).ToNot(Panic()) - Expect(alert.Resolved).To(BeZero()) - Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) - }).ToNot(Panic()) - Expect(alert.Resolved).ToNot(BeZero()) - }) - }) - - Context("when currently INactive", func() { - It("doesn't re-mark itself resolved", func() { - ctx := contextWithTestLogger() - - alert := testHigh() - - Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(21.0)}) - }).ToNot(Panic()) - Expect(alert.Resolved).To(BeZero()) - Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) - }).ToNot(Panic()) - Expect(alert.Resolved).ToNot(BeZero()) - was := alert.Resolved - Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) - }).ToNot(Panic()) - Expect(alert.Resolved).To(Equal(was)) - }) - }) - - It("marks itself triggered", func() { - ctx := contextWithTestLogger() - - alert := testHigh() - - Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) - }).ToNot(Panic()) - Expect(alert.Triggered).To(BeZero()) - Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(21.0)}) - }).ToNot(Panic()) - Expect(alert.Triggered).ToNot(BeZero()) + Expect(er.OutOfRange).To(Equal(false)) }) It("validates glucose data", func() { - ctx := contextWithTestLogger() - var notification *NotificationWithHook + ctx, _, cfg := newConfigTest() + er := EvalResult{} + high := cfg.Alerts.High Expect(func() { - notification, _ = testHigh().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(21)}) + er = high.Evaluate(ctx, []*Glucose{testHighDatum()}) }).ToNot(Panic()) - Expect(notification).ToNot(BeNil()) + Expect(er.OutOfRange).To(Equal(true)) - badUnits := testGlucoseDatum(1) + badUnits := testInRangeDatum() badUnits.Units = nil Expect(func() { - notification, _ = testHigh().Evaluate(ctx, []*glucose.Glucose{badUnits}) + er = high.Evaluate(ctx, []*Glucose{badUnits}) }).ToNot(Panic()) - Expect(notification).To(BeNil()) + Expect(er.OutOfRange).To(Equal(false)) - badValue := testGlucoseDatum(1) + badValue := testInRangeDatum() badValue.Value = nil Expect(func() { - notification, _ = testHigh().Evaluate(ctx, []*glucose.Glucose{badValue}) + er = high.Evaluate(ctx, []*Glucose{badValue}) }).ToNot(Panic()) - Expect(notification).To(BeNil()) + Expect(er.OutOfRange).To(Equal(false)) - badTime := testGlucoseDatum(1) + // TODO is this still useful? + badTime := testInRangeDatum() badTime.Time = nil Expect(func() { - notification, _ = testHigh().Evaluate(ctx, []*glucose.Glucose{badTime}) + er = high.Evaluate(ctx, []*Glucose{badTime}) }).ToNot(Panic()) - Expect(notification).To(BeNil()) + Expect(er.OutOfRange).To(Equal(false)) }) }) }) @@ -1026,13 +794,6 @@ var _ = Describe("Config", func() { Context("NotLoopingAlert", func() { - var decisionsOld = []*dosingdecision.DosingDecision{ - testDosingDecision(-30 * time.Hour), - } - var decisionsRecent = []*dosingdecision.DosingDecision{ - testDosingDecision(-15 * time.Second), - } - Context("Delay", func() { It("accepts values between 0 and 2 hours (inclusive)", func() { val := validator.New(logTest.NewLogger()) @@ -1058,185 +819,83 @@ var _ = Describe("Config", func() { }) Context("Evaluate", func() { - testNotLooping := func() *NotLoopingAlert { - return &NotLoopingAlert{ - Base: Base{}, - Delay: 0, - } - } It("uses a default delay of 30 minutes", func() { - ctx := contextWithTestLogger() - decisionsNoAlert := []*dosingdecision.DosingDecision{ + ctx, _, cfg := newConfigTest() + decisionsNoAlert := []*DosingDecision{ testDosingDecision(-29 * time.Minute), } - decisionsWithAlert := []*dosingdecision.DosingDecision{ + decisionsWithAlert := []*DosingDecision{ testDosingDecision(-30 * time.Minute), } + nl := cfg.Alerts.NotLooping - alert := testNotLooping() - - notification, _ := alert.Evaluate(ctx, decisionsNoAlert) - Expect(notification).To(BeNil()) - notification, _ = alert.Evaluate(ctx, decisionsWithAlert) - Expect(notification).ToNot(BeNil()) - Expect(notification.Message).To(ContainSubstring("not able to loop")) + er := nl.Evaluate(ctx, decisionsNoAlert) + Expect(er.OutOfRange).To(Equal(false), er.String()) + er = nl.Evaluate(ctx, decisionsWithAlert) + Expect(er.OutOfRange).To(Equal(true)) }) It("respects custom delays", func() { - ctx := contextWithTestLogger() - decisionsNoAlert := []*dosingdecision.DosingDecision{ + ctx, _, cfg := newConfigTest() + decisionsNoAlert := []*DosingDecision{ testDosingDecision(-14 * time.Minute), } - decisionsWithAlert := []*dosingdecision.DosingDecision{ + decisionsWithAlert := []*DosingDecision{ testDosingDecision(-15 * time.Minute), } + nl := cfg.Alerts.NotLooping + nl.Delay = DurationMinutes(15 * time.Minute) - alert := testNotLooping() - alert.Delay = DurationMinutes(15 * time.Minute) - - notification, _ := alert.Evaluate(ctx, decisionsNoAlert) - Expect(notification).To(BeNil()) - notification, _ = alert.Evaluate(ctx, decisionsWithAlert) - Expect(notification).ToNot(BeNil()) - Expect(notification.Message).To(ContainSubstring("not able to loop")) + er := nl.Evaluate(ctx, decisionsNoAlert) + Expect(er.OutOfRange).To(Equal(false)) + er = nl.Evaluate(ctx, decisionsWithAlert) + Expect(er.OutOfRange).To(Equal(true)) }) It("handles being passed empty data", func() { - ctx := contextWithTestLogger() - var notification *NotificationWithHook - - alert := testNotLooping() - - Expect(func() { - notification, _ = alert.Evaluate(ctx, []*dosingdecision.DosingDecision{}) - }).ToNot(Panic()) - Expect(notification.Message).To(ContainSubstring("Loop is not able to loop")) - Expect(func() { - notification, _ = alert.Evaluate(ctx, nil) - }).ToNot(Panic()) - Expect(notification.Message).To(ContainSubstring("Loop is not able to loop")) - }) - - It("logs evaluation results", func() { - ctx := contextWithTestLogger() - decisions := []*dosingdecision.DosingDecision{ - testDosingDecision(-30 * time.Second), - } - - alert := testNotLooping() - - Expect(func() { - alert.Evaluate(ctx, decisions) - }).ToNot(Panic()) - Expect(func() { - lgr := log.LoggerFromContext(ctx).(*logTest.Logger) - lgr.AssertInfo("not looping", log.Fields{ - "changed": false, - "isAlerting?": false, - }) - }).ToNot(Panic()) - }) - - Context("when currently active", func() { - It("marks itself resolved", func() { - ctx := contextWithTestLogger() + ctx, _, cfg := newConfigTest() + er := EvalResult{} - alert := testNotLooping() - - Expect(func() { - alert.Evaluate(ctx, decisionsOld) - }).ToNot(Panic()) - Expect(alert.Resolved).To(BeZero()) - Expect(func() { - alert.Evaluate(ctx, decisionsRecent) - }).ToNot(Panic()) - Expect(alert.Resolved).ToNot(BeZero()) - }) - }) - - Context("when currently INactive", func() { - It("doesn't re-mark itself resolved", func() { - ctx := contextWithTestLogger() - - alert := testNotLooping() - - Expect(func() { - alert.Evaluate(ctx, decisionsOld) - }).ToNot(Panic()) - Expect(alert.Resolved).To(BeZero()) - Expect(func() { - alert.Evaluate(ctx, decisionsRecent) - }).ToNot(Panic()) - Expect(alert.Resolved).ToNot(BeZero()) - was := alert.Resolved - Expect(func() { - alert.Evaluate(ctx, decisionsRecent) - }).ToNot(Panic()) - Expect(alert.Resolved).To(Equal(was)) - }) - }) - - It("marks itself triggered", func() { - ctx := contextWithTestLogger() - - alert := testNotLooping() + nl := cfg.Alerts.NotLooping Expect(func() { - alert.Evaluate(ctx, decisionsRecent) + er = nl.Evaluate(ctx, []*DosingDecision{}) }).ToNot(Panic()) - Expect(alert.Triggered).To(BeZero()) + Expect(er.OutOfRange).To(Equal(true)) Expect(func() { - alert.Evaluate(ctx, decisionsOld) + er = nl.Evaluate(ctx, nil) }).ToNot(Panic()) - Expect(alert.Triggered).ToNot(BeZero()) - }) - - It("observes NotLoopingRepeat between notifications", func() { - ctx := contextWithTestLogger() - noRepeat := time.Now().Add(-4 * time.Minute) - triggersRepeat := noRepeat.Add(-NotLoopingRepeat) - - alert := testNotLooping() - alert.Sent = noRepeat - alert.Triggered = noRepeat - - notification, _ := alert.Evaluate(ctx, decisionsOld) - Expect(notification).To(BeNil()) - - alert.Sent = triggersRepeat - notification, _ = alert.Evaluate(ctx, decisionsOld) - Expect(notification).ToNot(BeNil()) + Expect(er.OutOfRange).To(Equal(true)) }) It("ignores decisions without a reason", func() { - ctx := contextWithTestLogger() - - alert := testNotLooping() + ctx, _, cfg := newConfigTest() + nl := cfg.Alerts.NotLooping noReason := testDosingDecision(time.Second) noReason.Reason = nil - decisions := []*dosingdecision.DosingDecision{ + decisions := []*DosingDecision{ testDosingDecision(-time.Hour), noReason, } - notification, _ := alert.Evaluate(ctx, decisions) - Expect(notification).ToNot(BeNil()) + er := nl.Evaluate(ctx, decisions) + Expect(er.OutOfRange).To(Equal(true)) }) It("ignores decisions without a time", func() { - ctx := contextWithTestLogger() + ctx, _, cfg := newConfigTest() - alert := testNotLooping() + nl := cfg.Alerts.NotLooping noTime := testDosingDecision(time.Second) noTime.Time = nil - decisions := []*dosingdecision.DosingDecision{ + decisions := []*DosingDecision{ testDosingDecision(-time.Hour), noTime, } - notification, _ := alert.Evaluate(ctx, decisions) - Expect(notification).ToNot(BeNil()) + er := nl.Evaluate(ctx, decisions) + Expect(er.OutOfRange).To(Equal(true)) }) }) }) @@ -1308,7 +967,7 @@ var _ = Describe("Config", func() { conf := &Config{} err := request.DecodeObject(context.Background(), nil, buf, conf) Expect(err).To(Succeed()) - Expect(conf.Low.Repeat).To(Equal(DurationMinutes(0))) + Expect(conf.Alerts.Low.Repeat).To(Equal(DurationMinutes(0))) }) }) It("validates repeat minutes (negative)", func() { @@ -1349,77 +1008,6 @@ var _ = Describe("Config", func() { }) }) -var ( - testLowAlert = func() *LowAlert { - return &LowAlert{ - Base: Base{Enabled: true}, - Threshold: Threshold{ - Value: 4, - Units: nontypesglucose.MmolL, - }, - } - } - testHighAlert = func() *HighAlert { - return &HighAlert{ - Base: Base{Enabled: true}, - Threshold: Threshold{ - Value: 10, - Units: nontypesglucose.MmolL, - }, - } - } - testUrgentLowAlert = func() *UrgentLowAlert { - return &UrgentLowAlert{ - Base: Base{Enabled: true}, - Threshold: Threshold{ - Value: 3, - Units: nontypesglucose.MmolL, - }, - } - } - testNotLoopingAlert = func() *NotLoopingAlert { - return &NotLoopingAlert{ - Base: Base{Enabled: true}, - } - } - testHighDatum = &glucose.Glucose{ - Blood: blood.Blood{ - Base: types.Base{ - Time: pointer.FromAny(time.Now()), - }, - Units: pointer.FromAny(nontypesglucose.MmolL), - Value: pointer.FromAny(11.0), - }, - } - testLowDatum = &glucose.Glucose{ - Blood: blood.Blood{ - Base: types.Base{ - Time: pointer.FromAny(time.Now()), - }, - Units: pointer.FromAny(nontypesglucose.MmolL), - Value: pointer.FromAny(3.9), - }, - } - testUrgentLowDatum = &glucose.Glucose{ - Blood: blood.Blood{ - Base: types.Base{ - Time: pointer.FromAny(time.Now()), - }, - Units: pointer.FromAny(nontypesglucose.MmolL), - Value: pointer.FromAny(2.9), - }, - } - testInRangeDatum = &glucose.Glucose{ - Blood: blood.Blood{ - Base: types.Base{ - Time: pointer.FromAny(time.Now()), - }, - Units: pointer.FromAny(nontypesglucose.MmolL), - Value: pointer.FromAny(6.0), - }, - } -) - var _ = Describe("Alerts", func() { Describe("LongestDelay", func() { It("does what it says", func() { @@ -1430,7 +1018,7 @@ var _ = Describe("Alerts", func() { notLooping := testNotLoopingAlert() notLooping.Delay = DurationMinutes(5 * time.Minute) - a := DataAlerts{ + a := Alerts{ Low: low, High: high, NotLooping: notLooping, @@ -1449,7 +1037,7 @@ var _ = Describe("Alerts", func() { notLooping := testNotLoopingAlert() notLooping.Delay = DurationMinutes(5 * time.Minute) - a := DataAlerts{ + a := Alerts{ Low: low, High: high, NotLooping: notLooping, @@ -1461,7 +1049,7 @@ var _ = Describe("Alerts", func() { }) It("returns a Zero Duration when no alerts are set", func() { - a := DataAlerts{ + a := Alerts{ Low: nil, High: nil, NotLooping: nil, @@ -1474,49 +1062,42 @@ var _ = Describe("Alerts", func() { }) Describe("Evaluate", func() { - It("logs decisions", func() { - Skip("TODO logAlertEvaluation") + + It("detects urgent low data", func() { + ctx, _, cfg := newConfigTest() + data := []*Glucose{testUrgentLowDatum()} + n, _ := cfg.EvaluateData(ctx, data, nil) + + Expect(n).ToNot(BeNil()) + Expect(n.Message).To(ContainSubstring("below urgent low threshold")) }) It("detects low data", func() { - ctx := contextWithTestLogger() - data := []*glucose.Glucose{testLowDatum} - a := DataAlerts{ - Low: testLowAlert(), - } - - notification, _ := a.Evaluate(ctx, data, nil) + ctx, _, cfg := newConfigTest() + data := []*Glucose{testLowDatum()} + n, _ := cfg.EvaluateData(ctx, data, nil) - Expect(notification).ToNot(BeNil()) - Expect(notification.Message).To(ContainSubstring("below low threshold")) + Expect(n).ToNot(BeNil()) + Expect(n.Message).To(ContainSubstring("below low threshold")) }) It("detects high data", func() { - ctx := contextWithTestLogger() - data := []*glucose.Glucose{testHighDatum} - a := DataAlerts{ - High: testHighAlert(), - } + ctx, _, cfg := newConfigTest() + data := []*Glucose{testHighDatum()} + n, _ := cfg.EvaluateData(ctx, data, nil) - notification, _ := a.Evaluate(ctx, data, nil) - - Expect(notification).ToNot(BeNil()) - Expect(notification.Message).To(ContainSubstring("above high threshold")) + Expect(n).ToNot(BeNil()) + Expect(n.Message).To(ContainSubstring("above high threshold")) }) Context("with both low and urgent low alerts detected", func() { It("prefers urgent low", func() { - ctx := contextWithTestLogger() - data := []*glucose.Glucose{testUrgentLowDatum} - a := DataAlerts{ - Low: testLowAlert(), - UrgentLow: testUrgentLowAlert(), - } - - notification, _ := a.Evaluate(ctx, data, nil) + ctx, _, cfg := newConfigTest() + data := []*Glucose{testUrgentLowDatum()} + n, _ := cfg.EvaluateData(ctx, data, nil) - Expect(notification).ToNot(BeNil()) - Expect(notification.Message).To(ContainSubstring("below urgent low threshold")) + Expect(n).ToNot(BeNil()) + Expect(n.Message).To(ContainSubstring("below urgent low threshold")) }) }) }) @@ -1591,12 +1172,265 @@ var _ = Describe("Threshold", func() { }) +var _ = Describe("AlertActivity", func() { + Describe("IsActive()", func() { + It("is true", func() { + triggered := time.Now() + resolved := triggered.Add(-time.Nanosecond) + a := AlertActivity{ + Triggered: triggered, + Resolved: resolved, + } + Expect(a.IsActive()).To(BeTrue()) + }) + + It("is false", func() { + triggered := time.Now() + resolved := triggered.Add(time.Nanosecond) + a := AlertActivity{ + Triggered: triggered, + Resolved: resolved, + } + Expect(a.IsActive()).To(BeFalse()) + }) + }) + + Describe("IsSent()", func() { + It("is true", func() { + triggered := time.Now() + sent := triggered.Add(time.Nanosecond) + a := AlertActivity{ + Triggered: triggered, + Sent: sent, + } + Expect(a.IsSent()).To(BeTrue()) + }) + + It("is false", func() { + triggered := time.Now() + notified := triggered.Add(-time.Nanosecond) + a := AlertActivity{ + Triggered: triggered, + Sent: notified, + } + Expect(a.IsSent()).To(BeFalse()) + }) + }) + + Describe("normalizeUnits", func() { + Context("given the same units", func() { + It("doesn't alter them at all", func() { + d := testUrgentLowDatum() + t := Threshold{ + Value: 5.0, + Units: nontypesglucose.MmolL, + } + dv, tv, err := normalizeUnits(d, t) + Expect(err).To(Succeed()) + Expect(tv).To(Equal(5.0)) + Expect(dv).To(Equal(2.9)) + + d = testUrgentLowDatum() + d.Blood.Units = pointer.FromAny(nontypesglucose.MgdL) + t = Threshold{ + Value: 5.0, + Units: nontypesglucose.MgdL, + } + dv, tv, err = normalizeUnits(d, t) + Expect(err).To(Succeed()) + Expect(tv).To(Equal(5.0)) + Expect(dv).To(Equal(2.9)) + }) + }) + + Context("value in Mmol/L & threshold in mg/dL", func() { + It("normalizes to Mmol/L", func() { + d := testUrgentLowDatum() + d.Blood.Units = pointer.FromAny(nontypesglucose.MmolL) + t := Threshold{ + Value: 90.0, + Units: nontypesglucose.MgdL, + } + dv, tv, err := normalizeUnits(d, t) + Expect(err).To(Succeed()) + Expect(tv).To(Equal(4.99567)) + Expect(dv).To(Equal(2.9)) + }) + }) + + Context("value in mg/dL & threshold in Mmol/L", func() { + It("normalizes to Mmol/L", func() { + d := testUrgentLowDatum() + d.Blood.Value = pointer.FromAny(90.0) + d.Blood.Units = pointer.FromAny(nontypesglucose.MgdL) + t := Threshold{ + Value: 5.0, + Units: nontypesglucose.MmolL, + } + dv, tv, err := normalizeUnits(d, t) + Expect(err).To(Succeed()) + Expect(tv).To(Equal(5.0)) + Expect(dv).To(Equal(4.99567)) + }) + }) + }) +}) + // buff is a helper for generating a JSON []byte representation. func buff(format string, args ...interface{}) *bytes.Buffer { return bytes.NewBufferString(fmt.Sprintf(format, args...)) } -func contextWithTestLogger() context.Context { +func testDosingDecision(d time.Duration) *DosingDecision { + return &DosingDecision{ + Base: types.Base{ + Time: pointer.FromAny(time.Now().Add(d)), + }, + Reason: pointer.FromAny(DosingDecisionReasonLoop), + } +} + +func testConfig() Config { + return Config{ + UserID: mockUserID1, + FollowedUserID: mockUserID2, + UploadID: mockDataSetID, + } +} + +func testUrgentLowDatum() *Glucose { + return &Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(nontypesglucose.MmolL), + Value: pointer.FromAny(2.9), + }, + } +} + +func testHighDatum() *Glucose { + return &Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(nontypesglucose.MmolL), + Value: pointer.FromAny(11.0), + }, + } +} + +func testLowDatum() *Glucose { + return &Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(nontypesglucose.MmolL), + Value: pointer.FromAny(3.9), + }, + } +} + +func testInRangeDatum() *Glucose { + return &Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(nontypesglucose.MmolL), + Value: pointer.FromAny(6.0), + }, + } +} + +func testNoCommunication() *NoCommunicationAlert { + return &NoCommunicationAlert{ + Base: Base{Enabled: true}, + } +} + +func testNoCommunicationDisabled() *NoCommunicationAlert { + nc := testNoCommunication() + nc.Enabled = false + return nc +} + +func testNotLoopingDisabled() *NotLoopingAlert { + nl := testNotLooping() + nl.Enabled = false + return nl +} + +func testNotLooping() *NotLoopingAlert { + return &NotLoopingAlert{ + Base: Base{Enabled: true}, + Delay: 0, + } +} + +func testAlertsActivity() Activity { + return Activity{} +} + +func testLowAlert() *LowAlert { + return &LowAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 4, + Units: nontypesglucose.MmolL, + }, + } +} +func testHighAlert() *HighAlert { + return &HighAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 10, + Units: nontypesglucose.MmolL, + }, + } +} +func testUrgentLowAlert() *UrgentLowAlert { + return &UrgentLowAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 3, + Units: nontypesglucose.MmolL, + }, + } +} +func testNotLoopingAlert() *NotLoopingAlert { + return &NotLoopingAlert{ + Base: Base{Enabled: true}, + } +} + +func newConfigTest() (context.Context, *logTest.Logger, *Config) { lgr := logTest.NewLogger() - return log.NewContextWithLogger(context.Background(), lgr) + ctx := log.NewContextWithLogger(context.Background(), lgr) + cfg := &Config{ + UserID: mockUserID1, + FollowedUserID: mockUserID2, + UploadID: mockDataSetID, + Alerts: Alerts{ + UrgentLow: testUrgentLowAlert(), + Low: testLowAlert(), + High: testHighAlert(), + NotLooping: testNotLoopingDisabled(), // NOTE: disabled + NoCommunication: testNoCommunicationDisabled(), // NOTE: disabled + }, + Activity: testAlertsActivity(), + } + return ctx, lgr, cfg +} + +func quickJSON(v any) string { + b, err := json.MarshalIndent(v, "", " ") + if err != nil { + return fmt.Sprintf("", v) + } + return string(b) } diff --git a/alerts/evaluator.go b/alerts/evaluator.go index 371f42bdd8..9608c7f243 100644 --- a/alerts/evaluator.go +++ b/alerts/evaluator.go @@ -6,6 +6,7 @@ import ( "slices" "time" + "github.com/tidepool-org/platform/auth" "github.com/tidepool-org/platform/data/types/blood/glucose" "github.com/tidepool-org/platform/data/types/dosingdecision" "github.com/tidepool-org/platform/log" @@ -37,26 +38,28 @@ type GetAlertableDataResponse struct { } type Evaluator struct { - Alerts Repository - Data DataRepository - Logger log.Logger - Permissions permission.Client + Alerts Repository + Data DataRepository + Logger log.Logger + Permissions permission.Client + TokenProvider auth.ServerSessionTokenProvider } func NewEvaluator(alerts Repository, dataRepo DataRepository, permissions permission.Client, - logger log.Logger) *Evaluator { + logger log.Logger, tokenProvider auth.ServerSessionTokenProvider) *Evaluator { return &Evaluator{ - Alerts: alerts, - Data: dataRepo, - Logger: logger, - Permissions: permissions, + Alerts: alerts, + Data: dataRepo, + Logger: logger, + Permissions: permissions, + TokenProvider: tokenProvider, } } // EvaluateData generates alert notifications in response to a user uploading data. func (e *Evaluator) EvaluateData(ctx context.Context, followedUserID, dataSetID string) ( - []*NotificationWithHook, error) { + []*Notification, error) { configs, err := e.gatherConfigs(ctx, followedUserID, dataSetID) if err != nil { @@ -65,18 +68,40 @@ func (e *Evaluator) EvaluateData(ctx context.Context, followedUserID, dataSetID configsByDataSetID := e.mapConfigsByDataSetID(configs) - notifications := []*NotificationWithHook{} - for dsID, cfgs := range configsByDataSetID { - resp, err := e.gatherData(ctx, followedUserID, dsID, cfgs) + notifications := []*Notification{} + for dsID, configs := range configsByDataSetID { + resp, err := e.gatherData(ctx, followedUserID, dsID, configs) if err != nil { return nil, err } - notifications = slices.Concat(notifications, e.generateNotes(ctx, cfgs, resp)) + for _, config := range configs { + lgr := config.LoggerWithFields(e.Logger) + notification, needsUpsert := e.genNotificationForConfig(ctx, lgr, config, resp) + if notification != nil { + notifications = append(notifications, notification) + } + if needsUpsert { + err := e.Alerts.Upsert(ctx, config) + if err != nil { + lgr.WithError(err).Error("Unable to upsert changed alerts config") + } + } + } } return notifications, nil } +func (e *Evaluator) genNotificationForConfig(ctx context.Context, lgr log.Logger, + config *Config, resp *GetAlertableDataResponse) (*Notification, bool) { + + notification, needsUpsert := config.EvaluateData(ctx, resp.Glucose, resp.DosingDecisions) + if notification != nil { + notification.Sent = e.wrapWithUpsert(ctx, lgr, config, notification.Sent) + } + return notification, needsUpsert +} + func (e *Evaluator) mapConfigsByDataSetID(cfgs []*Config) map[string][]*Config { mapped := map[string][]*Config{} for _, cfg := range cfgs { @@ -117,6 +142,7 @@ func (e *Evaluator) authDenied(ctx context.Context) func(*Config) bool { "userID": c.UserID, "followedUserID": c.FollowedUserID, }) + ctx = auth.NewContextWithServerSessionTokenProvider(ctx, e.TokenProvider) perms, err := e.Permissions.GetUserPermissions(ctx, c.UserID, c.FollowedUserID) if err != nil { logger.WithError(err).Warn("Unable to confirm permissions; skipping") @@ -151,53 +177,24 @@ func (e *Evaluator) gatherData(ctx context.Context, followedUserID, dataSetID st return nil, err } - return resp, nil -} + resp.Glucose = slices.DeleteFunc(resp.Glucose, + func(g *glucose.Glucose) bool { return g.Time == nil }) + resp.DosingDecisions = slices.DeleteFunc(resp.DosingDecisions, + func(d *dosingdecision.DosingDecision) bool { return d.Time == nil }) -func (e *Evaluator) generateNotes(ctx context.Context, configs []*Config, - resp *GetAlertableDataResponse) []*NotificationWithHook { - - if len(configs) == 0 { - return nil - } - - notifications := []*NotificationWithHook{} - for _, config := range configs { - lgr := e.Logger.WithFields(log.Fields{ - "userID": config.UserID, - "followedUserID": config.FollowedUserID, - "uploadID": config.UploadID, - }) - evalCtx := log.NewContextWithLogger(ctx, lgr) - notification, changed := config.EvaluateData(evalCtx, resp.Glucose, resp.DosingDecisions) - if notification != nil { - if notification.Sent != nil { - notification.Sent = e.wrapWithUpsert(evalCtx, lgr, config, notification.Sent) - } - notifications = append(notifications, notification) - continue - } else if changed { - // No notification was generated, so no further changes are expected. However, - // there were activity changes that need persisting. - err := e.Alerts.Upsert(ctx, config) - if err != nil { - lgr.WithError(err).Error("Unable to save changed alerts config") - continue - } - } - } - - return notifications + return resp, nil } // wrapWithUpsert to upsert the Config that triggered the Notification after it's sent. -func (e *Evaluator) wrapWithUpsert(ctx context.Context, - lgr log.Logger, config *Config, original SentFunc) SentFunc { +func (e *Evaluator) wrapWithUpsert(ctx context.Context, lgr log.Logger, config *Config, + original func(time.Time)) func(time.Time) { return func(at time.Time) { - original(at) + if original != nil { + original(at) + } if err := e.Alerts.Upsert(ctx, config); err != nil { - lgr.WithError(err).Error("Unable to save changed alerts config") + lgr.WithError(err).Error("Unable to upsert changed alerts config") } } } diff --git a/alerts/evaluator_test.go b/alerts/evaluator_test.go index 52a1449756..a7882626ca 100644 --- a/alerts/evaluator_test.go +++ b/alerts/evaluator_test.go @@ -22,7 +22,7 @@ var _ = Describe("Evaluator", func() { ctx, lgr := contextWithNullLoggerDeluxe() alertsRepo := newMockAlertsClient() - evaluator := NewEvaluator(alertsRepo, nil, nil, lgr) + evaluator := NewEvaluator(alertsRepo, nil, nil, lgr, nil) notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) Expect(notifications).To(BeEmpty()) @@ -42,7 +42,7 @@ var _ = Describe("Evaluator", func() { dataRepo := newMockDataRepo() perms := newMockPermissionClient() - evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr) + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) Expect(notifications).To(BeEmpty()) @@ -60,13 +60,11 @@ var _ = Describe("Evaluator", func() { FollowedUserID: testFollowedUserID, UploadID: testDataSetID, Alerts: Alerts{ - DataAlerts: DataAlerts{ - High: &HighAlert{ - Base: Base{Enabled: true}, - Threshold: Threshold{ - Value: 10.0, - Units: nontypesglucose.MmolL, - }, + High: &HighAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 10.0, + Units: nontypesglucose.MmolL, }, }, }, @@ -76,13 +74,11 @@ var _ = Describe("Evaluator", func() { FollowedUserID: testFollowedUserID, UploadID: testDataSetID, Alerts: Alerts{ - DataAlerts: DataAlerts{ - High: &HighAlert{ - Base: Base{Enabled: true}, - Threshold: Threshold{ - Value: 10.0, - Units: nontypesglucose.MmolL, - }, + High: &HighAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 10.0, + Units: nontypesglucose.MmolL, }, }, }, @@ -91,15 +87,15 @@ var _ = Describe("Evaluator", func() { dataRepo := newMockDataRepo() dataRepo.AlertableData = []*GetAlertableDataResponse{ { - Glucose: []*glucose.Glucose{testHighDatum}, + Glucose: []*glucose.Glucose{testHighDatum()}, }, } perms := newMockPermissionClient() - perms.Allow(testUserID, permission.Follow, testFollowedUserID) + perms.Allow(testUserID, testFollowedUserID, permission.Follow) // This user still has a config, but has had their follow permission revoked. - perms.Allow(testUserID+"-2", permission.Read, testFollowedUserID) + perms.Allow(testUserID+"-2", testFollowedUserID, permission.Read) - evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr) + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) Expect(err).To(Succeed()) @@ -109,23 +105,17 @@ var _ = Describe("Evaluator", func() { }) It("handles data queries that return empty results (no data)", func() { - ctx, lgr := contextWithNullLoggerDeluxe() + ctx, lgr, cfg := newConfigTest() alertsRepo := newMockAlertsClient() - alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{ - { - UserID: testUserID, - FollowedUserID: testFollowedUserID, - UploadID: testDataSetID, - }, - }) + alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{cfg}) dataRepo := newMockDataRepo() perms := newMockPermissionClient() perms.AlwaysAllow = true - evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr) - notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) + e := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) + ns, err := e.EvaluateData(ctx, mockUserID2, mockDataSetID) - Expect(notifications).To(BeEmpty()) + Expect(ns).To(BeEmpty()) Expect(err).To(Succeed()) }) @@ -138,28 +128,20 @@ var _ = Describe("Evaluator", func() { FollowedUserID: testFollowedUserID, UploadID: testDataSetID, Alerts: Alerts{ - DataAlerts: DataAlerts{ - UrgentLow: &UrgentLowAlert{ - Base: Base{Enabled: true}, - Threshold: Threshold{ - Value: 3.0, - Units: nontypesglucose.MmolL, - }, - }, - }, + UrgentLow: testUrgentLowAlert(), }, }, }) dataRepo := newMockDataRepo() dataRepo.AlertableData = []*GetAlertableDataResponse{ { - Glucose: []*glucose.Glucose{testUrgentLowDatum}, + Glucose: []*glucose.Glucose{testUrgentLowDatum()}, }, } perms := newMockPermissionClient() perms.AlwaysAllow = true - evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr) + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) if Expect(notifications).To(HaveLen(1)) { @@ -172,22 +154,15 @@ var _ = Describe("Evaluator", func() { It("queries data based on the longest delay", func() { ctx, lgr := contextWithNullLoggerDeluxe() alertsRepo := newMockAlertsClient() + longerDelay := testHighAlert() + longerDelay.Delay = DurationMinutes(3) alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{ { UserID: testUserID + "-2", FollowedUserID: testFollowedUserID, UploadID: testDataSetID, Alerts: Alerts{ - DataAlerts: DataAlerts{ - High: &HighAlert{ - Base: Base{Enabled: true}, - Delay: DurationMinutes(6), - Threshold: Threshold{ - Value: 10.0, - Units: nontypesglucose.MmolL, - }, - }, - }, + High: testHighAlert(), }, }, { @@ -195,20 +170,11 @@ var _ = Describe("Evaluator", func() { FollowedUserID: testFollowedUserID, UploadID: testDataSetID, Alerts: Alerts{ - DataAlerts: DataAlerts{ - High: &HighAlert{ - Base: Base{Enabled: true}, - Delay: DurationMinutes(3), - Threshold: Threshold{ - Value: 10.0, - Units: nontypesglucose.MmolL, - }, - }, - }, + High: longerDelay, }, }, }) - highDatum := testHighDatum + highDatum := testHighDatum() highDatum.Blood.Base.Time = pointer.FromAny(time.Now().Add(-10 * time.Minute)) dataRepo := newMockDataRepo() dataRepo.AlertableData = []*GetAlertableDataResponse{ @@ -219,62 +185,37 @@ var _ = Describe("Evaluator", func() { perms := newMockPermissionClient() perms.AlwaysAllow = true - evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr) + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) - + Expect(err).To(Succeed()) if Expect(notifications).To(HaveLen(2)) { msgFound := strings.Contains(notifications[0].Message, "above high") - Expect(msgFound).To(BeTrue()) + Expect(msgFound).To(BeTrue(), notifications[0].Message) } - Expect(err).To(Succeed()) }) - It("wraps notifications so that changes are persisted when notifications are pushed", func() { - ctx, lgr := contextWithNullLoggerDeluxe() + It("wraps notifications so that changes are persisted when pushed", func() { + ctx, lgr, cfg := newConfigTest() startOfTest := time.Now() alertsRepo := newMockAlertsClient() - alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{ - { - UserID: testUserID, - FollowedUserID: testFollowedUserID, - UploadID: testDataSetID, - Alerts: Alerts{ - DataAlerts: DataAlerts{ - UrgentLow: &UrgentLowAlert{ - Base: Base{ - Enabled: true, - Activity: Activity{ - Triggered: time.Now().Add(-10 * time.Minute), - }, - }, - Threshold: Threshold{ - Value: 3.0, - Units: nontypesglucose.MmolL, - }, - }, - }, - }, - }, - }) + alertsRepo.ListResponses = append(alertsRepo.ListResponses, []*Config{cfg}) dataRepo := newMockDataRepo() dataRepo.AlertableData = []*GetAlertableDataResponse{ - { - Glucose: []*glucose.Glucose{testUrgentLowDatum}, - }, + {Glucose: []*glucose.Glucose{testUrgentLowDatum()}}, } perms := newMockPermissionClient() perms.AlwaysAllow = true - evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr) - notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) + ns, err := evaluator.EvaluateData(ctx, mockUserID2, mockDataSetID) Expect(err).To(Succeed()) - for _, notification := range notifications { - Expect(func() { notification.Sent(time.Now()) }).ToNot(Panic()) + Expect(len(ns)).To(Equal(1)) + for _, n := range ns { + Expect(n.Sent).ToNot(BeNil()) + n.Sent(time.Now()) } - - Expect(len(notifications)).To(Equal(1)) - if Expect(len(alertsRepo.UpsertCalls)).To(Equal(1)) { - activity := alertsRepo.UpsertCalls[0].UrgentLow.Activity + if Expect(len(alertsRepo.UpsertCalls)).To(Equal(2)) { + activity := alertsRepo.UpsertCalls[1].Activity.UrgentLow Expect(activity.Sent).To(BeTemporally(">", startOfTest)) } }) @@ -291,19 +232,11 @@ var _ = Describe("Evaluator", func() { FollowedUserID: testFollowedUserID, UploadID: testDataSetID, Alerts: Alerts{ - DataAlerts: DataAlerts{ - UrgentLow: &UrgentLowAlert{ - Base: Base{ - Enabled: true, - Activity: Activity{ - Triggered: time.Now().Add(-10 * time.Minute), - }, - }, - Threshold: Threshold{ - Value: 3.0, - Units: nontypesglucose.MmolL, - }, - }, + UrgentLow: testUrgentLowAlert(), + }, + Activity: Activity{ + UrgentLow: AlertActivity{ + Triggered: time.Now().Add(-10 * time.Minute), }, }, }, @@ -311,19 +244,19 @@ var _ = Describe("Evaluator", func() { dataRepo := newMockDataRepo() dataRepo.AlertableData = []*GetAlertableDataResponse{ { - Glucose: []*glucose.Glucose{testInRangeDatum}, + Glucose: []*glucose.Glucose{testInRangeDatum()}, }, } perms := newMockPermissionClient() perms.AlwaysAllow = true - evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr) - notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) + ns, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) Expect(err).To(Succeed()) - Expect(len(notifications)).To(Equal(0)) + Expect(len(ns)).To(Equal(0)) if Expect(len(alertsRepo.UpsertCalls)).To(Equal(1)) { - activity := alertsRepo.UpsertCalls[0].UrgentLow.Activity + activity := alertsRepo.UpsertCalls[0].Activity.UrgentLow Expect(activity.Resolved).To(BeTemporally(">", startOfTest)) } }) @@ -338,18 +271,18 @@ var _ = Describe("Evaluator", func() { []*Config{resp1, resp2}) dataRepo := newMockDataRepo() dataRepo.AlertableData = []*GetAlertableDataResponse{ - {Glucose: []*glucose.Glucose{testUrgentLowDatum}}, + {Glucose: []*glucose.Glucose{testUrgentLowDatum()}}, } perms := newMockPermissionClient() perms.AlwaysAllow = true - evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr) + evaluator := NewEvaluator(alertsRepo, dataRepo, perms, lgr, nil) notifications, err := evaluator.EvaluateData(ctx, testFollowedUserID, testDataSetID) Expect(err).To(Succeed()) if Expect(len(notifications)).To(Equal(1)) { - recipientUserID := notifications[0].Notification.RecipientUserID + recipientUserID := notifications[0].RecipientUserID Expect(recipientUserID).To(Equal(testUserID)) } }) @@ -363,9 +296,7 @@ func newTestAlertsConfig(userID, dataSetID string) *Config { FollowedUserID: testFollowedUserID, UploadID: dataSetID, Alerts: Alerts{ - DataAlerts: DataAlerts{ - UrgentLow: testUrgentLowAlert(), - }, + UrgentLow: testUrgentLowAlert(), }, } } @@ -392,7 +323,12 @@ func (c *mockAlertsClient) Get(ctx context.Context, conf *Config) (*Config, erro } func (c *mockAlertsClient) Upsert(ctx context.Context, conf *Config) error { - c.UpsertCalls = append(c.UpsertCalls, conf) + if conf == nil { + c.UpsertCalls = append(c.UpsertCalls, nil) + } else { + copyConf := *conf + c.UpsertCalls = append(c.UpsertCalls, ©Conf) + } if c.UpsertError != nil { return c.UpsertError } @@ -486,12 +422,14 @@ func (c *mockPermissionClient) GetUserPermissions(ctx context.Context, } } -func (c *mockPermissionClient) Allow(requestUserID, perm, targetUserID string) { +func (c *mockPermissionClient) Allow(requestUserID, targetUserID string, perms ...string) { key := c.Key(requestUserID, targetUserID) if _, found := c.Perms[key]; !found { c.Perms[key] = permission.Permissions{} } - c.Perms[key][perm] = permission.Permission{} + for _, perm := range perms { + c.Perms[key][perm] = permission.Permission{} + } } func (c *mockPermissionClient) Key(requesterUserID, targetUserID string) string { diff --git a/alerts/tasks.go b/alerts/tasks.go index 77d8477a7b..498fcce7f5 100644 --- a/alerts/tasks.go +++ b/alerts/tasks.go @@ -85,7 +85,8 @@ func (r *CarePartnerRunner) evaluateLastComms(ctx context.Context) error { if err := r.evaluateLastComm(ctx, lastComm); err != nil { r.logger.WithError(err). WithField("followedUserID", lastComm.UserID). - Info("unable to evaluate no communication") + WithField("dataSetID", lastComm.DataSetID). + Info("Unable to evaluate no communication") continue } } @@ -96,41 +97,52 @@ func (r *CarePartnerRunner) evaluateLastComms(ctx context.Context) error { func (r *CarePartnerRunner) evaluateLastComm(ctx context.Context, lastComm LastCommunication) error { - alertsConfigs, err := r.alerts.List(ctx, lastComm.UserID) + configs, err := r.alerts.List(ctx, lastComm.UserID) if err != nil { return errors.Wrap(err, "listing follower alerts configs") } - alertsConfigs = slices.DeleteFunc(alertsConfigs, func(config *Config) bool { + + configs = slices.DeleteFunc(configs, r.authDenied(ctx)) + configs = slices.DeleteFunc(configs, func(config *Config) bool { return config.UploadID != lastComm.DataSetID }) - alertsConfigs = slices.DeleteFunc(alertsConfigs, r.authDenied(ctx)) - notifications := []*NotificationWithHook{} - toUpdate := map[*Config]struct{}{} - for _, alertsConfig := range alertsConfigs { + + notifications := []*Notification{} + for _, config := range configs { + lgr := config.LoggerWithFields(r.logger) lastData := lastComm.LastReceivedDeviceData - notification, changed := alertsConfig.EvaluateNoCommunication(ctx, lastData) + notification, needsUpsert := config.EvaluateNoCommunication(ctx, lastData) if notification != nil { + notification.Sent = r.wrapWithUpsert(ctx, lgr, config, notification.Sent) notifications = append(notifications, notification) } - if changed || notification != nil { - toUpdate[alertsConfig] = struct{}{} + if needsUpsert { + err := r.alerts.Upsert(ctx, config) + if err != nil { + lgr.WithError(err).Error("Unable to upsert changed alerts config") + } } } + r.pushNotifications(ctx, notifications) - // Only after notifications have been pushed should they be saved. The alerts configs - // could change during evaluation or in response to their notification being pushed. - for alertConfig := range toUpdate { - if err := r.alerts.Upsert(ctx, alertConfig); err != nil { - r.logger.WithError(err). - WithField("UserID", alertConfig.UserID). - WithField("FollowedUserID", alertConfig.FollowedUserID). - Info("Unable to upsert alerts config") - } - } return nil } +// wrapWithUpsert to upsert the Config that triggered the Notification after it's sent. +func (r *CarePartnerRunner) wrapWithUpsert(ctx context.Context, lgr log.Logger, config *Config, + original func(time.Time)) func(time.Time) { + + return func(at time.Time) { + if original != nil { + original(at) + } + if err := r.alerts.Upsert(ctx, config); err != nil { + lgr.WithError(err).Error("Unable to upsert changed alerts config") + } + } +} + func (r *CarePartnerRunner) authDenied(ctx context.Context) func(*Config) bool { return func(c *Config) bool { if c == nil { @@ -154,7 +166,7 @@ func (r *CarePartnerRunner) authDenied(ctx context.Context) func(*Config) bool { } func (r *CarePartnerRunner) pushNotifications(ctx context.Context, - notifications []*NotificationWithHook) { + notifications []*Notification) { for _, notification := range notifications { lgr := r.logger.WithField("recipientUserID", notification.RecipientUserID) @@ -165,9 +177,9 @@ func (r *CarePartnerRunner) pushNotifications(ctx context.Context, if len(tokens) == 0 { lgr.Debug("no device tokens found, won't push any notifications") } - pushNote := ToPushNotification(notification.Notification) + pushNotification := ToPushNotification(notification) for _, token := range tokens { - err := r.pusher.Push(ctx, token, pushNote) + err := r.pusher.Push(ctx, token, pushNotification) if err != nil { lgr.WithError(err).Info("unable to push notification") } else { diff --git a/alerts/tasks_test.go b/alerts/tasks_test.go index 13cf565ee4..ba4d7c0121 100644 --- a/alerts/tasks_test.go +++ b/alerts/tasks_test.go @@ -44,21 +44,30 @@ var _ = Describe("CarePartnerRunner", func() { runner.Run(test.Ctx, test.Task) - test.Logger.AssertInfo("unable to evaluate no communication", log.Fields{ - "followedUserID": testFollowedUserID, + Expect(func() { + test.Logger.AssertInfo("Unable to evaluate no communication", log.Fields{ + "followedUserID": mockUserID2, + }) + }).ToNot(Panic(), map[string]any{ + "got": quickJSON(test.Logger.SerializedFields), }) }) - It("upserting alerts configs", func() { + It("upsetting alerts configs", func() { runner, test := newCarePartnerRunnerTest() test.Alerts.UpsertError = fmt.Errorf("test error") runner.Run(test.Ctx, test.Task) - test.Logger.AssertInfo("Unable to upsert alerts config", log.Fields{ - "UserID": testUserID, - "FollowedUserID": testFollowedUserID, - }) + Expect(func() { + test.Logger.AssertError("Unable to upsert changed alerts config", log.Fields{ + "userID": mockUserID1, + "followedUserID": mockUserID2, + "dataSetID": mockDataSetID, + }) + }).ToNot(Panic(), quickJSON(map[string]any{ + "got": test.Logger.SerializedFields, + })) }) It("retrieving device tokens", func() { @@ -67,21 +76,29 @@ var _ = Describe("CarePartnerRunner", func() { runner.Run(test.Ctx, test.Task) - test.Logger.AssertInfo("unable to retrieve device tokens", log.Fields{ - "recipientUserID": testUserID, - }) + Expect(func() { + test.Logger.AssertInfo("unable to retrieve device tokens", log.Fields{ + "recipientUserID": mockUserID1, + }) + }, quickJSON(map[string]any{ + "got": test.Logger.SerializedFields, + })) }) - It("pushes notifications", func() { + It("pushing notifications", func() { runner, test := newCarePartnerRunnerTest() test.Pusher.PushErrors = append(test.Pusher.PushErrors, fmt.Errorf("test error")) runner.Run(test.Ctx, test.Task) Expect(len(test.Pusher.PushCalls)).To(Equal(1)) - test.Logger.AssertInfo("unable to push notification", log.Fields{ - "recipientUserID": testUserID, - }) + Expect(func() { + test.Logger.AssertInfo("unable to push notification", log.Fields{ + "recipientUserID": testUserID, + }) + }, quickJSON(map[string]any{ + "got": test.Logger.SerializedFields, + })) }) }) @@ -109,7 +126,7 @@ var _ = Describe("CarePartnerRunner", func() { Expect(len(test.Pusher.PushCalls)).To(Equal(2)) }) - It("pushes to each token, even if the first experiences an error", func() { + It("pushes to each token, continuing if any experience an error", func() { runner, test := newCarePartnerRunnerTest() test.Tokens.GetResponses[0] = append(test.Tokens.GetResponses[0], test.Tokens.GetResponses[0][0]) @@ -130,27 +147,74 @@ var _ = Describe("CarePartnerRunner", func() { // reset, add a user *with* perms, and check that it works runner, test = newCarePartnerRunnerTest() - userIDWithPerm := testUserID + "2" test.Permissions.AlwaysAllow = false - test.Permissions.Allow(userIDWithPerm, permission.Follow, testFollowedUserID) - test.Alerts.ListResponses[0] = append(test.Alerts.ListResponses[0], - &Config{ - UserID: userIDWithPerm, - FollowedUserID: testFollowedUserID, - UploadID: testDataSetID, - Alerts: Alerts{ - NoCommunicationAlert: &NoCommunicationAlert{}, - }, - }, - ) + test.Permissions.Allow(mockUserID3, mockUserID2, permission.Follow, permission.Read) + cfg := *test.Config + cfg.UserID = mockUserID3 + test.Alerts.ListResponses[0] = append(test.Alerts.ListResponses[0], &cfg) runner.Run(test.Ctx, test.Task) Expect(len(test.Pusher.PushCalls)).To(Equal(1)) }) + + It("upserts configs that need it", func() { + runner, test := newCarePartnerRunnerTest() + runner.Run(test.Ctx, test.Task) + + // One call from needsUpsert, another when the notification is sent. + Expect(len(test.Alerts.UpsertCalls)).To(Equal(2)) + act0 := test.Alerts.UpsertCalls[0].Activity.NoCommunication + Expect(act0.Triggered).ToNot(BeZero()) + Expect(act0.Sent).To(BeZero()) + act1 := test.Alerts.UpsertCalls[1].Activity.NoCommunication + Expect(act1.Sent).ToNot(BeZero()) + }) + + It("upserts configs that need it, even without a notification", func() { + runner, test := newCarePartnerRunnerTest() + act := test.Alerts.ListResponses[0][0].Activity.NoCommunication + act.Triggered = time.Now().Add(-time.Hour) + act.Sent = time.Now().Add(-time.Hour) + test.Alerts.ListResponses[0][0].Activity.NoCommunication = act + test.Alerts.UsersWithoutCommsResponses[0][0].LastReceivedDeviceData = time.Now() + + runner.Run(test.Ctx, test.Task) + + // One call from needsUpsert, no call from sent (no notification to send) + Expect(len(test.Alerts.UpsertCalls)).To(Equal(1)) + act0 := test.Alerts.UpsertCalls[0].Activity.NoCommunication + Expect(act0.Resolved).To(BeTemporally("~", time.Now())) + }) + + It("doesn't re-mark itself resolved", func() { + runner, test := newCarePartnerRunnerTest() + act := test.Alerts.ListResponses[0][0].Activity.NoCommunication + act.Triggered = time.Now().Add(-time.Hour) + act.Sent = time.Now().Add(-time.Hour) + act.Resolved = time.Now().Add(-time.Minute) + test.Alerts.ListResponses[0][0].Activity.NoCommunication = act + test.Alerts.UsersWithoutCommsResponses[0][0].LastReceivedDeviceData = time.Now() + + runner.Run(test.Ctx, test.Task) + Expect(len(test.Alerts.UpsertCalls)).To(Equal(0)) + }) + + It("doesn't re-send before delay", func() { + runner, test := newCarePartnerRunnerTest() + act := test.Alerts.ListResponses[0][0].Activity.NoCommunication + orig := time.Now().Add(-time.Minute) + act.Triggered = orig + act.Sent = orig + test.Alerts.ListResponses[0][0].Activity.NoCommunication = act + + runner.Run(test.Ctx, test.Task) + Expect(len(test.Alerts.UpsertCalls)).To(Equal(0)) + }) }) }) type carePartnerRunnerTest struct { Alerts *mockAlertsClient + Config *Config Ctx context.Context Logger *logtest.Logger Permissions *mockPermissionClient @@ -161,49 +225,36 @@ type carePartnerRunnerTest struct { func newCarePartnerRunnerTest() (*CarePartnerRunner, *carePartnerRunnerTest) { alerts := newMockAlertsClient() - lgr := logtest.NewLogger() - ctx := log.NewContextWithLogger(context.Background(), lgr) + ctx, lgr, cfg := newConfigTest() + cfg.Alerts.NoCommunication.Enabled = true pusher := newMockPusher() tsk := &task.Task{} tokens := newMockDeviceTokensClient() perms := newMockPermissionClient() - authClient := newMockAuthTokenProvider() perms.AlwaysAllow = true + authClient := newMockAuthTokenProvider() runner, err := NewCarePartnerRunner(lgr, alerts, tokens, pusher, perms, authClient) Expect(err).To(Succeed()) - alerts.UsersWithoutCommsResponses = [][]LastCommunication{ + last := time.Now().Add(-(DefaultNoCommunicationDelay + time.Second)) + alerts.UsersWithoutCommsResponses = [][]LastCommunication{{ { - { - UserID: testFollowedUserID, - DataSetID: testDataSetID, - LastReceivedDeviceData: time.Now().Add(-12 * time.Hour), - }, + UserID: mockUserID2, + DataSetID: mockDataSetID, + LastReceivedDeviceData: last, }, - } - alerts.ListResponses = [][]*Config{ - { - { - UserID: testUserID, - FollowedUserID: testFollowedUserID, - UploadID: testDataSetID, - Alerts: Alerts{ - NoCommunicationAlert: &NoCommunicationAlert{}, - }, - }, - }, - } + }} + alerts.ListResponses = [][]*Config{{cfg}} tokens.GetResponses = [][]*devicetokens.DeviceToken{ { - { - Apple: &devicetokens.AppleDeviceToken{}, - }, + {Apple: &devicetokens.AppleDeviceToken{}}, }, } return runner, &carePartnerRunnerTest{ Alerts: alerts, + Config: cfg, Ctx: ctx, Logger: lgr, Permissions: perms, diff --git a/data/blood/glucose/glucose.go b/data/blood/glucose/glucose.go index 32ca889dd9..ac1d7717a9 100644 --- a/data/blood/glucose/glucose.go +++ b/data/blood/glucose/glucose.go @@ -67,3 +67,7 @@ func NormalizeValueForUnits(value *float64, units *string) *float64 { } return value } + +func IsMmolL(units string) bool { + return units == MmolL || units == Mmoll +} diff --git a/data/events/alerts.go b/data/events/alerts.go index 2e7397dc4c..7c12cc0816 100644 --- a/data/events/alerts.go +++ b/data/events/alerts.go @@ -23,13 +23,14 @@ import ( ) type Consumer struct { - Alerts AlertsClient - Data alerts.DataRepository - DeviceTokens auth.DeviceTokensClient - Evaluator AlertsEvaluator - Permissions permission.Client - Pusher Pusher - Recorder EventsRecorder + Alerts AlertsClient + Data alerts.DataRepository + DeviceTokens auth.DeviceTokensClient + Evaluator AlertsEvaluator + Permissions permission.Client + Pusher Pusher + Recorder EventsRecorder + TokensProvider auth.ServerSessionTokenProvider Logger log.Logger } @@ -48,6 +49,8 @@ func (c *Consumer) Consume(ctx context.Context, return nil } + ctx = auth.NewContextWithServerSessionTokenProvider(ctx, c.TokensProvider) + switch { case strings.Contains(msg.Topic, ".data.alerts"): return c.consumeAlertsConfigs(ctx, session, msg) @@ -65,13 +68,20 @@ func (c *Consumer) consumeAlertsConfigs(ctx context.Context, session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) error { cfg := &alerts.Config{} - if err := unmarshalMessageValue(msg.Value, cfg); err != nil { + updatedFields, err := unmarshalMessageValue(msg.Value, cfg) + if err != nil { return err } lgr := c.logger(ctx) + if isActivityAndActivityOnly(updatedFields) { + lgr.WithField("updatedFields", updatedFields). + Debug("alerts config is an activity update, will skip") + return nil + } + lgr.WithField("cfg", cfg).Info("consuming an alerts config message") - ctxLog := c.logger(ctx).WithField("followedUserID", cfg.FollowedUserID) + ctxLog := cfg.LoggerWithFields(c.logger(ctx)) ctx = log.NewContextWithLogger(ctx, ctxLog) notes, err := c.Evaluator.EvaluateData(ctx, cfg.FollowedUserID, cfg.UploadID) @@ -81,18 +91,30 @@ func (c *Consumer) consumeAlertsConfigs(ctx context.Context, } ctxLog.WithField("notes", notes).Debug("notes generated from alerts config") - c.pushNotes(ctx, notes) + c.pushNotifications(ctx, notes) session.MarkMessage(msg, "") lgr.WithField("message", msg).Debug("marked") return nil } +func isActivityAndActivityOnly(updatedFields []string) bool { + hasActivity := false + for _, field := range updatedFields { + if field == "activity" { + hasActivity = true + } else { + return false + } + } + return hasActivity +} + func (c *Consumer) consumeDeviceData(ctx context.Context, session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) error { datum := &Glucose{} - if err := unmarshalMessageValue(msg.Value, datum); err != nil { + if _, err := unmarshalMessageValue(msg.Value, datum); err != nil { return err } lgr := c.logger(ctx) @@ -123,14 +145,14 @@ func (c *Consumer) consumeDeviceData(ctx context.Context, lgr.WithField("idx", idx).WithField("note", note).Debug("notes") } - c.pushNotes(ctx, notes) + c.pushNotifications(ctx, notes) session.MarkMessage(msg, "") lgr.WithField("message", msg).Debug("marked") return nil } -func (c *Consumer) pushNotes(ctx context.Context, notifications []*alerts.NotificationWithHook) { +func (c *Consumer) pushNotifications(ctx context.Context, notifications []*alerts.Notification) { lgr := c.logger(ctx) // Notes could be pushed into a Kafka topic to have a more durable retry, @@ -144,7 +166,7 @@ func (c *Consumer) pushNotes(ctx context.Context, notifications []*alerts.Notifi if len(tokens) == 0 { lgr.Debug("no device tokens found, won't push any notifications") } - pushNote := alerts.ToPushNotification(notification.Notification) + pushNote := alerts.ToPushNotification(notification) for _, token := range tokens { err := c.Pusher.Push(ctx, token, pushNote) if err != nil { @@ -176,18 +198,25 @@ func (c *Consumer) logger(ctx context.Context) log.Logger { type AlertsEvaluator interface { // EvaluateData to check if notifications should be sent in response to new data. - EvaluateData(ctx context.Context, followedUserID, dataSetID string) ([]*alerts.NotificationWithHook, error) + EvaluateData(ctx context.Context, followedUserID, dataSetID string) ([]*alerts.Notification, error) } -func unmarshalMessageValue[A any](b []byte, payload *A) error { +func unmarshalMessageValue[A any](b []byte, payload *A) ([]string, error) { wrapper := &struct { - FullDocument A `json:"fullDocument"` + FullDocument A `json:"fullDocument"` + UpdateDescription struct { + UpdatedFields map[string]any `json:"updatedFields"` + } `json:"updateDescription"` }{} if err := bson.UnmarshalExtJSON(b, false, wrapper); err != nil { - return errors.Wrap(err, "Unable to unmarshal ExtJSON") + return nil, errors.Wrap(err, "Unable to unmarshal ExtJSON") } *payload = wrapper.FullDocument - return nil + fields := []string{} + for k := range wrapper.UpdateDescription.UpdatedFields { + fields = append(fields, k) + } + return fields, nil } type AlertsClient interface { diff --git a/data/events/alerts_test.go b/data/events/alerts_test.go index 51e66863e9..5cfac5cc7f 100644 --- a/data/events/alerts_test.go +++ b/data/events/alerts_test.go @@ -47,14 +47,11 @@ var _ = Describe("Consumer", func() { UserID: testUserID, FollowedUserID: testFollowedUserID, Alerts: alerts.Alerts{ - DataAlerts: alerts.DataAlerts{ - Low: &alerts.LowAlert{ - Base: alerts.Base{ - Enabled: true}, - Threshold: alerts.Threshold{ - Value: 101.1, - Units: "mg/dL", - }, + Low: &alerts.LowAlert{ + Base: alerts.Base{Enabled: true}, + Threshold: alerts.Threshold{ + Value: 101.1, + Units: "mg/dL", }, }, }, @@ -119,14 +116,12 @@ var _ = Describe("Consumer", func() { eval := newMockEvaluator() eval.Evaluations[testFollowedUserID+testDataSetID] = []mockEvaluatorResponse{ { - Notifications: []*alerts.NotificationWithHook{ + Notifications: []*alerts.Notification{ { - Notification: &alerts.Notification{ - Message: "something", - RecipientUserID: testUserID, - FollowedUserID: testFollowedUserID, - }, - Sent: func(time.Time) {}, + Message: "something", + RecipientUserID: testUserID, + FollowedUserID: testFollowedUserID, + Sent: func(time.Time) {}, }, }, }, @@ -178,9 +173,7 @@ func newConsumerTestDeps(docs []interface{}) (*Consumer, *consumerTestDeps) { { UserID: testUserID, FollowedUserID: testFollowedUserID, - Alerts: alerts.Alerts{ - DataAlerts: alerts.DataAlerts{}, - }, + Alerts: alerts.Alerts{}, }, }, nil) dataRepo := storetest.NewDataRepository() @@ -226,7 +219,7 @@ type mockEvaluator struct { } type mockEvaluatorResponse struct { - Notifications []*alerts.NotificationWithHook + Notifications []*alerts.Notification Error error } @@ -238,7 +231,7 @@ func newMockEvaluator() *mockEvaluator { } func (e *mockEvaluator) EvaluateData(ctx context.Context, followedUserID, dataSetID string) ( - []*alerts.NotificationWithHook, error) { + []*alerts.Notification, error) { key := followedUserID + dataSetID if _, found := e.Evaluations[key]; !found { @@ -275,8 +268,8 @@ func newMockStaticEvaluator() *mockStaticEvaluator { return &mockStaticEvaluator{newMockEvaluator()} } -func (e *mockStaticEvaluator) EvaluateData(ctx context.Context, followedUserID, dataSetID string) ( - []*alerts.NotificationWithHook, error) { +func (e *mockStaticEvaluator) EvaluateData(ctx context.Context, + followedUserID, dataSetID string) ([]*alerts.Notification, error) { e.EvaluateCalls[followedUserID] += 1 return nil, nil @@ -335,22 +328,6 @@ func newMockMongoCursor(docs []interface{}) *mongo.Cursor { return cur } -type mockPusher struct { - Pushes []string -} - -func newMockPusher() *mockPusher { - return &mockPusher{ - Pushes: []string{}, - } -} - -func (p *mockPusher) Push(ctx context.Context, - deviceToken *devicetokens.DeviceToken, notification *push.Notification) error { - p.Pushes = append(p.Pushes, notification.Message) - return nil -} - type mockAlertsConfigClient struct { Error error Configs []*alerts.Config diff --git a/data/service/service/standard.go b/data/service/service/standard.go index 4988459148..e0deca8c54 100644 --- a/data/service/service/standard.go +++ b/data/service/service/standard.go @@ -498,17 +498,19 @@ func (s *Standard) initializeAlertsEventsHandler() error { dataRepo := s.dataStore.NewAlertsDataRepository() recorderRepo := s.dataStore.NewRecorderRepository() - alertsEvaluator := alerts.NewEvaluator(alertsRepo, dataRepo, s.permissionClient, s.Logger()) + alertsEvaluator := alerts.NewEvaluator(alertsRepo, dataRepo, s.PermissionClient(), + s.Logger(), s.AuthClient()) ec := &dataEvents.Consumer{ - Alerts: alertsRepo, - Evaluator: alertsEvaluator, - Data: dataRepo, - DeviceTokens: s.AuthClient(), - Logger: s.Logger(), - Permissions: s.permissionClient, - Pusher: s.pusher, - Recorder: dataEvents.NewRecorder(recorderRepo), + Alerts: alertsRepo, + Evaluator: alertsEvaluator, + Data: dataRepo, + DeviceTokens: s.AuthClient(), + Logger: s.Logger(), + Permissions: s.PermissionClient(), + Pusher: s.pusher, + Recorder: dataEvents.NewRecorder(recorderRepo), + TokensProvider: s.AuthClient(), } runnerCfg := dataEvents.SaramaRunnerConfig{ diff --git a/data/store/mongo/mongo_alerts.go b/data/store/mongo/mongo_alerts.go index ba37b52f2b..5887f8f834 100644 --- a/data/store/mongo/mongo_alerts.go +++ b/data/store/mongo/mongo_alerts.go @@ -32,7 +32,7 @@ func (r *alertsRepo) Upsert(ctx context.Context, conf *alerts.Config) error { {Key: "uploadId", Value: conf.UploadID}, } doc := bson.M{ - "$set": conf.Alerts, + "$set": bson.M{"alerts": conf.Alerts, "activity": conf.Activity}, "$setOnInsert": filter, } _, err := r.UpdateOne(ctx, filter, doc, opts) diff --git a/data/store/mongo/mongo_test.go b/data/store/mongo/mongo_test.go index a241cf3c0b..9031564938 100644 --- a/data/store/mongo/mongo_test.go +++ b/data/store/mongo/mongo_test.go @@ -2464,8 +2464,7 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { ctx := context.Background() filter := bson.M{} if upsertDoc { - Expect(alertsRepository.Upsert(ctx, cfg)). - To(Succeed()) + Expect(alertsRepository.Upsert(ctx, cfg)).To(Succeed()) filter["userId"] = cfg.UserID filter["followedUserId"] = cfg.FollowedUserID } @@ -2488,7 +2487,7 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { It("updates the existing document", func() { ctx, cfg, filter := prep(true) - cfg.Low = &alerts.LowAlert{Base: alerts.Base{Enabled: true}} + cfg.Alerts.Low = &alerts.LowAlert{Base: alerts.Base{Enabled: true}} err := alertsRepository.Upsert(ctx, cfg) Expect(err).To(Succeed()) @@ -2497,8 +2496,8 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { Expect(res.Err()).To(Succeed()) Expect(res.Decode(doc)).To(Succeed()) jsonOut, _ := json.Marshal(doc) - Expect(doc.Low).ToNot(BeNil(), string(jsonOut)) - Expect(doc.Low.Base.Enabled).To(Equal(true)) + Expect(doc.Alerts.Low).ToNot(BeNil(), string(jsonOut)) + Expect(doc.Alerts.Low.Base.Enabled).To(Equal(true)) }) It("sets userId, followedUserId, and uploadId only on creation", func() { @@ -2528,36 +2527,39 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { Expect(doc.UploadID).To(Equal("something")) Expect(doc.FollowedUserID).To(Equal("followed-user-id")) Expect(doc.UserID).To(Equal("user-id")) - Expect(doc.Low.Delay.Duration()).To(Equal(testDelay)) + Expect(doc.Alerts.Low.Delay.Duration()).To(Equal(testDelay)) }) It("updates the Config's Activity", func() { ctx, cfg, filter := prep(true) - testTriggered := time.Now().Add(-5 * time.Minute) testSent := time.Now().Add(-3 * time.Minute) - cfg.Low = &alerts.LowAlert{ + testTriggered := time.Now().Add(-5 * time.Minute) + cfg.Alerts.Low = &alerts.LowAlert{ Base: alerts.Base{ Enabled: true, - Activity: alerts.Activity{ - Triggered: testTriggered, - Sent: testSent, - // Resolved is unset, so it should be a zero value. - }, + // Activity: alerts.AlertActivity{ + // Triggered: testTriggered, + // Sent: testSent, + // // Resolved is unset, so it should be a zero value. + // }, }, } + cfg.Activity.Low.Sent = testSent + cfg.Activity.Low.Triggered = testTriggered err := alertsRepository.Upsert(ctx, cfg) Expect(err).To(Succeed()) doc := &alerts.Config{} + //raw := map[string]any{} res := store.GetCollection("alerts").FindOne(ctx, filter) Expect(res.Err()).To(Succeed()) Expect(res.Decode(doc)).To(Succeed()) - Expect(doc.Low).ToNot(BeNil()) - Expect(doc.Low.Base.Enabled).To(Equal(true)) - Expect(doc.Low.Triggered).To(BeTemporally("~", testTriggered, time.Millisecond)) - Expect(doc.Low.Sent).To(BeTemporally("~", testSent, time.Millisecond)) - Expect(doc.Low.Resolved).To(Equal(time.Time{})) + Expect(doc.Alerts.Low).ToNot(BeNil()) + Expect(doc.Alerts.Low.Base.Enabled).To(Equal(true)) + Expect(doc.Activity.Low.Triggered).To(BeTemporally("~", testTriggered, time.Millisecond)) + Expect(doc.Activity.Low.Sent).To(BeTemporally("~", testSent, time.Millisecond)) + Expect(doc.Activity.Low.Resolved).To(Equal(time.Time{})) }) }) @@ -2577,23 +2579,23 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { UserID: "879d5cb2-f70d-4b05-8d38-fb6d88ef2ea9", FollowedUserID: "d2ee01db-3458-42ac-95d2-ac2fc571a21d", Alerts: alerts.Alerts{ - DataAlerts: alerts.DataAlerts{ - High: &alerts.HighAlert{ - Base: alerts.Base{Enabled: true}, - }, + // DataAlerts: alerts.DataAlerts{ + High: &alerts.HighAlert{ + Base: alerts.Base{Enabled: true}, }, + // }, }, } Expect(alertsRepository.Upsert(ctx, other)).To(Succeed()) - cfg.Low = &alerts.LowAlert{Base: alerts.Base{Enabled: true}} + cfg.Alerts.Low = &alerts.LowAlert{Base: alerts.Base{Enabled: true}} err := alertsRepository.Upsert(ctx, cfg) Expect(err).To(Succeed()) got, err := alertsRepository.Get(ctx, cfg) Expect(err).To(Succeed()) Expect(got).ToNot(BeNil()) - Expect(got.Low).ToNot(BeNil()) - Expect(got.Low.Enabled).To(Equal(true)) + Expect(got.Alerts.Low).ToNot(BeNil()) + Expect(got.Alerts.Low.Enabled).To(Equal(true)) Expect(got.UserID).To(Equal(cfg.UserID)) Expect(got.FollowedUserID).To(Equal(cfg.FollowedUserID)) }) diff --git a/devicetokens/devicetokens.go b/devicetokens/devicetokens.go index 721f110653..fc901187f7 100644 --- a/devicetokens/devicetokens.go +++ b/devicetokens/devicetokens.go @@ -4,6 +4,7 @@ import ( "context" "crypto/sha256" "encoding/hex" + "encoding/json" "fmt" "github.com/tidepool-org/platform/structure" @@ -52,6 +53,14 @@ type DeviceToken struct { Apple *AppleDeviceToken `json:"apple,omitempty" bson:"apple,omitempty"` } +func (t DeviceToken) String() string { + b, err := json.Marshal(t) + if err != nil { + return "" + } + return string(b) +} + // key provides a unique string value to identify this device token. // // Intended to be used as part of a unique index for database indexes. diff --git a/log/devlog/devlog.go b/log/devlog/devlog.go index 2800cfd2a0..f1dc98193b 100644 --- a/log/devlog/devlog.go +++ b/log/devlog/devlog.go @@ -15,6 +15,7 @@ import ( "fmt" "io" stdlog "log" + "os" "sort" "strings" "time" @@ -83,7 +84,20 @@ func (s *serializer) Serialize(fields log.Fields) error { if len(pairs) > 0 { rest = ": " + strings.Join(pairs, " ") } - s.Logger.Printf(msgTime + " " + msgLevel + " " + msg + rest) + prefixes := []string{} + prefixes = append(prefixes, msgTime) + // HOSTNAME is set on Kubernetes pods and is useful for distinguishing logs from an + // outgoing Pod vs a newly created Pod. + if h := os.Getenv("HOSTNAME"); h != "" { + pieces := strings.Split(h, "-") + if len(pieces) > 0 { + prefixes = append(prefixes, pieces[len(pieces)-1]) + } else { + prefixes = append(prefixes, h) + } + } + prefixes = append(prefixes, msgLevel) + s.Logger.Print(strings.Join(prefixes, " ") + " " + msg + rest) return nil } diff --git a/task/service/service/service.go b/task/service/service/service.go index f4a09b5b66..dbd5da9695 100644 --- a/task/service/service/service.go +++ b/task/service/service/service.go @@ -367,6 +367,7 @@ func (s *Service) initializeTaskQueue() error { if s.alertsClient == nil { s.Logger().Info("alerts client is nil; care partner tasks will not run successfully") } + carePartnerRunner, err := alerts.NewCarePartnerRunner(s.Logger(), s.alertsClient, s.AuthClient(), s.pusher, s.permissionClient, s.AuthClient()) if err != nil { From 63234624161623ba7f6c8be3e57d5c0d51906716 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Tue, 4 Feb 2025 14:55:13 -0700 Subject: [PATCH 29/54] reduce kafka topics for care partner alerts outside of production As requested in code review. https://github.com/tidepool-org/terraform-modules/pull/72#pullrequestreview-2593875609 BACK-2559 --- data/service/service/standard.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/data/service/service/standard.go b/data/service/service/standard.go index e0deca8c54..7e42bf6b1e 100644 --- a/data/service/service/standard.go +++ b/data/service/service/standard.go @@ -523,8 +523,12 @@ func (s *Standard) initializeAlertsEventsHandler() error { }, } - eventsRunner := dataEvents.NewCascadingSaramaEventsRunner(runnerCfg, s.Logger(), - []time.Duration{0, 1 * time.Second, 2 * time.Second, 3 * time.Second, 5 * time.Second}) + retryDelays := []time.Duration{0, 1 * time.Second} + if strings.Contains(commonConfig.KafkaTopicPrefix, "tidepool-prod") { + // Kakfa topics/partitions aren't cheap, so minimize costs outside of production. + retryDelays = append(retryDelays, 2*time.Second, 3*time.Second, 5*time.Second) + } + eventsRunner := dataEvents.NewCascadingSaramaEventsRunner(runnerCfg, s.Logger(), retryDelays) runner := dataEvents.NewSaramaRunner(eventsRunner) if err := runner.Initialize(); err != nil { return errors.Wrap(err, "Unable to initialize alerts events handler runner") From 80d09a90688a913175c94cb91759a295cad8ac1e Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Tue, 4 Feb 2025 15:40:30 -0700 Subject: [PATCH 30/54] bump go-common to get kafka CDC updates for CPA BACK-2559 BACK-2499 --- data/events/events.go | 20 +++++++++++++++++++- data/events/events_test.go | 2 +- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/data/events/events.go b/data/events/events.go index 54c73b2bc7..af74f9c655 100644 --- a/data/events/events.go +++ b/data/events/events.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "log/slog" "os" "strconv" "sync" @@ -348,7 +349,8 @@ func (r *CascadingSaramaEventsRunner) buildConsumer(ctx context.Context, idx int Logger: r.Logger, } } - handler := asyncevents.NewSaramaConsumerGroupHandler(consumer, + aeLoggerAdapter := &asynceventsLoggerAdapter{r.Logger} + handler := asyncevents.NewSaramaConsumerGroupHandler(aeLoggerAdapter, consumer, AlertsEventConsumptionTimeout) topic := baseTopic if delay > 0 { @@ -553,3 +555,19 @@ type EventsRecorder interface { // of a given user. RecordReceivedDeviceData(context.Context, alerts.LastCommunication) error } + +// asynceventsLoggerAdapter adapts a [log.Logger] to [asyncevents.Logger]. +type asynceventsLoggerAdapter struct { + log.Logger +} + +var logLevels map[slog.Level]log.Level = map[slog.Level]log.Level{ + slog.LevelDebug: log.DebugLevel, + slog.LevelInfo: log.InfoLevel, + slog.LevelWarn: log.WarnLevel, + slog.LevelError: log.ErrorLevel, +} + +func (a *asynceventsLoggerAdapter) Log(ctx context.Context, level slog.Level, msg string, args ...any) { + a.Logger.Log(logLevels[level], fmt.Sprintf(msg, args...)) +} diff --git a/data/events/events_test.go b/data/events/events_test.go index a3bb7de8b1..9e8036a54d 100644 --- a/data/events/events_test.go +++ b/data/events/events_test.go @@ -619,7 +619,7 @@ func (s *mockSaramaConsumerGroupSession) MarkMessage(msg *sarama.ConsumerMessage } func (s *mockSaramaConsumerGroupSession) Context() context.Context { - panic("not implemented") // implement if needed + return context.Background() } type mockSaramaConsumerGroupClaim struct { From ca1dd3f8ee914ba3517c0f8f587b7847cb7d5cd2 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Mon, 10 Feb 2025 15:34:59 -0700 Subject: [PATCH 31/54] rename nontypesglucose -> dataBloodGlucose This maintains consistency. BACK-2499 BACK-2559 --- alerts/config.go | 24 +++++++++--------- alerts/config_test.go | 52 +++++++++++++++++++------------------- alerts/evaluator_test.go | 6 ++--- data/events/alerts_test.go | 4 +-- 4 files changed, 43 insertions(+), 43 deletions(-) diff --git a/alerts/config.go b/alerts/config.go index 91b46fb6ff..b128e69e16 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -11,7 +11,7 @@ import ( "time" "github.com/tidepool-org/platform/data" - nontypesglucose "github.com/tidepool-org/platform/data/blood/glucose" + dataBloodGlucose "github.com/tidepool-org/platform/data/blood/glucose" "github.com/tidepool-org/platform/data/types/blood/glucose" "github.com/tidepool-org/platform/data/types/dosingdecision" "github.com/tidepool-org/platform/errors" @@ -415,11 +415,11 @@ func normalizeUnits(datum *Glucose, t Threshold) (float64, float64, error) { // The units don't match. There exists a known good function that converts to MmolL, so // we'll convert whichever value isn't in MmolL to MmolL. - if nontypesglucose.IsMmolL(t.Units) { - n := nontypesglucose.NormalizeValueForUnits(datum.Blood.Value, datum.Blood.Units) + if dataBloodGlucose.IsMmolL(t.Units) { + n := dataBloodGlucose.NormalizeValueForUnits(datum.Blood.Value, datum.Blood.Units) return *n, t.Value, nil - } else if nontypesglucose.IsMmolL(*datum.Blood.Units) { - n := nontypesglucose.NormalizeValueForUnits(&t.Value, &t.Units) + } else if dataBloodGlucose.IsMmolL(*datum.Blood.Units) { + n := dataBloodGlucose.NormalizeValueForUnits(&t.Value, &t.Units) return *datum.Blood.Value, *n, nil } @@ -641,20 +641,20 @@ type Threshold ValueWithUnits // Validate implements structure.Validatable func (t Threshold) Validate(v structure.Validator) { - v.String("units", &t.Units).OneOf(nontypesglucose.MgdL, nontypesglucose.MmolL) + v.String("units", &t.Units).OneOf(dataBloodGlucose.MgdL, dataBloodGlucose.MmolL) // This is a sanity check. Client software will likely further constrain these // values. The broadness of these values allows clients to change their own min and max // values independently, and it sidesteps rounding and conversion conflicts between the // backend and clients. var max, min float64 switch t.Units { - case nontypesglucose.MgdL, nontypesglucose.Mgdl: - max = nontypesglucose.MgdLMaximum - min = nontypesglucose.MgdLMinimum + case dataBloodGlucose.MgdL, dataBloodGlucose.Mgdl: + max = dataBloodGlucose.MgdLMaximum + min = dataBloodGlucose.MgdLMinimum v.Float64("value", &t.Value).InRange(min, max) - case nontypesglucose.MmolL, nontypesglucose.Mmoll: - max = nontypesglucose.MmolLMaximum - min = nontypesglucose.MmolLMinimum + case dataBloodGlucose.MmolL, dataBloodGlucose.Mmoll: + max = dataBloodGlucose.MmolLMaximum + min = dataBloodGlucose.MmolLMinimum v.Float64("value", &t.Value).InRange(min, max) default: v.WithReference("value").ReportError(validator.ErrorValueNotValid()) diff --git a/alerts/config_test.go b/alerts/config_test.go index 0a8558f8aa..fd2dc80da9 100644 --- a/alerts/config_test.go +++ b/alerts/config_test.go @@ -12,7 +12,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - nontypesglucose "github.com/tidepool-org/platform/data/blood/glucose" + dataBloodGlucose "github.com/tidepool-org/platform/data/blood/glucose" "github.com/tidepool-org/platform/data/types" "github.com/tidepool-org/platform/data/types/blood" "github.com/tidepool-org/platform/log" @@ -84,15 +84,15 @@ var _ = Describe("Config", func() { Expect(cfg.Alerts.High.Repeat).To(Equal(DurationMinutes(30 * time.Minute))) Expect(cfg.Alerts.High.Delay).To(Equal(DurationMinutes(5 * time.Minute))) Expect(cfg.Alerts.High.Threshold.Value).To(Equal(10.0)) - Expect(cfg.Alerts.High.Threshold.Units).To(Equal(nontypesglucose.MmolL)) + Expect(cfg.Alerts.High.Threshold.Units).To(Equal(dataBloodGlucose.MmolL)) Expect(cfg.Alerts.Low.Enabled).To(Equal(true)) Expect(cfg.Alerts.Low.Repeat).To(Equal(DurationMinutes(30 * time.Minute))) Expect(cfg.Alerts.Low.Delay).To(Equal(DurationMinutes(10 * time.Minute))) Expect(cfg.Alerts.Low.Threshold.Value).To(Equal(80.0)) - Expect(cfg.Alerts.Low.Threshold.Units).To(Equal(nontypesglucose.MgdL)) + Expect(cfg.Alerts.Low.Threshold.Units).To(Equal(dataBloodGlucose.MgdL)) Expect(cfg.Alerts.UrgentLow.Enabled).To(Equal(false)) Expect(cfg.Alerts.UrgentLow.Threshold.Value).To(Equal(47.5)) - Expect(cfg.Alerts.UrgentLow.Threshold.Units).To(Equal(nontypesglucose.MgdL)) + Expect(cfg.Alerts.UrgentLow.Threshold.Units).To(Equal(dataBloodGlucose.MgdL)) Expect(cfg.Alerts.NotLooping.Enabled).To(Equal(true)) Expect(cfg.Alerts.NotLooping.Delay).To(Equal(DurationMinutes(4 * time.Minute))) // Expect(conf.Alerts.NoCommunication.Enabled).To(Equal(true)) @@ -902,7 +902,7 @@ var _ = Describe("Config", func() { Context("repeat", func() { var defaultAlert = LowAlert{ - Threshold: Threshold{Value: 11, Units: nontypesglucose.MmolL}, + Threshold: Threshold{Value: 11, Units: dataBloodGlucose.MmolL}, } It("accepts values of 0 (indicating disabled)", func() { @@ -983,7 +983,7 @@ var _ = Describe("Config", func() { "value": 47.5 } } -}`, mockUserID1, mockUserID2, mockDataSetID, nontypesglucose.MgdL) +}`, mockUserID1, mockUserID2, mockDataSetID, dataBloodGlucose.MgdL) cfg := &Config{} err := request.DecodeObject(context.Background(), nil, buf, cfg) Expect(err).To(MatchError("value -11m0s is not greater than or equal to 15m0s")) @@ -1001,7 +1001,7 @@ var _ = Describe("Config", func() { "value": 1 } } -}`, mockUserID1, mockUserID2, mockDataSetID, nontypesglucose.MgdL) +}`, mockUserID1, mockUserID2, mockDataSetID, dataBloodGlucose.MgdL) cfg := &Config{} err := request.DecodeObject(context.Background(), nil, buf, cfg) Expect(err).To(MatchError("json is malformed")) @@ -1138,20 +1138,20 @@ var _ = Describe("DurationMinutes", func() { var _ = Describe("Threshold", func() { It("accepts mg/dL", func() { - buf := buff(`{"units":"%s","value":42}`, nontypesglucose.MgdL) + buf := buff(`{"units":"%s","value":42}`, dataBloodGlucose.MgdL) threshold := &Threshold{} err := request.DecodeObject(context.Background(), nil, buf, threshold) Expect(err).To(BeNil()) Expect(threshold.Value).To(Equal(42.0)) - Expect(threshold.Units).To(Equal(nontypesglucose.MgdL)) + Expect(threshold.Units).To(Equal(dataBloodGlucose.MgdL)) }) It("accepts mmol/L", func() { - buf := buff(`{"units":"%s","value":42}`, nontypesglucose.MmolL) + buf := buff(`{"units":"%s","value":42}`, dataBloodGlucose.MmolL) threshold := &Threshold{} err := request.DecodeObject(context.Background(), nil, buf, threshold) Expect(err).To(BeNil()) Expect(threshold.Value).To(Equal(42.0)) - Expect(threshold.Units).To(Equal(nontypesglucose.MmolL)) + Expect(threshold.Units).To(Equal(dataBloodGlucose.MmolL)) }) It("rejects lb/gal", func() { buf := buff(`{"units":"%s","value":42}`, "lb/gal") @@ -1164,7 +1164,7 @@ var _ = Describe("Threshold", func() { Expect(err).Should(HaveOccurred()) }) It("is case-sensitive with respect to Units", func() { - badUnits := strings.ToUpper(nontypesglucose.MmolL) + badUnits := strings.ToUpper(dataBloodGlucose.MmolL) buf := buff(`{"units":"%s","value":42}`, badUnits) err := request.DecodeObject(context.Background(), nil, buf, &Threshold{}) Expect(err).Should(HaveOccurred()) @@ -1223,7 +1223,7 @@ var _ = Describe("AlertActivity", func() { d := testUrgentLowDatum() t := Threshold{ Value: 5.0, - Units: nontypesglucose.MmolL, + Units: dataBloodGlucose.MmolL, } dv, tv, err := normalizeUnits(d, t) Expect(err).To(Succeed()) @@ -1231,10 +1231,10 @@ var _ = Describe("AlertActivity", func() { Expect(dv).To(Equal(2.9)) d = testUrgentLowDatum() - d.Blood.Units = pointer.FromAny(nontypesglucose.MgdL) + d.Blood.Units = pointer.FromAny(dataBloodGlucose.MgdL) t = Threshold{ Value: 5.0, - Units: nontypesglucose.MgdL, + Units: dataBloodGlucose.MgdL, } dv, tv, err = normalizeUnits(d, t) Expect(err).To(Succeed()) @@ -1246,10 +1246,10 @@ var _ = Describe("AlertActivity", func() { Context("value in Mmol/L & threshold in mg/dL", func() { It("normalizes to Mmol/L", func() { d := testUrgentLowDatum() - d.Blood.Units = pointer.FromAny(nontypesglucose.MmolL) + d.Blood.Units = pointer.FromAny(dataBloodGlucose.MmolL) t := Threshold{ Value: 90.0, - Units: nontypesglucose.MgdL, + Units: dataBloodGlucose.MgdL, } dv, tv, err := normalizeUnits(d, t) Expect(err).To(Succeed()) @@ -1262,10 +1262,10 @@ var _ = Describe("AlertActivity", func() { It("normalizes to Mmol/L", func() { d := testUrgentLowDatum() d.Blood.Value = pointer.FromAny(90.0) - d.Blood.Units = pointer.FromAny(nontypesglucose.MgdL) + d.Blood.Units = pointer.FromAny(dataBloodGlucose.MgdL) t := Threshold{ Value: 5.0, - Units: nontypesglucose.MmolL, + Units: dataBloodGlucose.MmolL, } dv, tv, err := normalizeUnits(d, t) Expect(err).To(Succeed()) @@ -1304,7 +1304,7 @@ func testUrgentLowDatum() *Glucose { Base: types.Base{ Time: pointer.FromAny(time.Now()), }, - Units: pointer.FromAny(nontypesglucose.MmolL), + Units: pointer.FromAny(dataBloodGlucose.MmolL), Value: pointer.FromAny(2.9), }, } @@ -1316,7 +1316,7 @@ func testHighDatum() *Glucose { Base: types.Base{ Time: pointer.FromAny(time.Now()), }, - Units: pointer.FromAny(nontypesglucose.MmolL), + Units: pointer.FromAny(dataBloodGlucose.MmolL), Value: pointer.FromAny(11.0), }, } @@ -1328,7 +1328,7 @@ func testLowDatum() *Glucose { Base: types.Base{ Time: pointer.FromAny(time.Now()), }, - Units: pointer.FromAny(nontypesglucose.MmolL), + Units: pointer.FromAny(dataBloodGlucose.MmolL), Value: pointer.FromAny(3.9), }, } @@ -1340,7 +1340,7 @@ func testInRangeDatum() *Glucose { Base: types.Base{ Time: pointer.FromAny(time.Now()), }, - Units: pointer.FromAny(nontypesglucose.MmolL), + Units: pointer.FromAny(dataBloodGlucose.MmolL), Value: pointer.FromAny(6.0), }, } @@ -1380,7 +1380,7 @@ func testLowAlert() *LowAlert { Base: Base{Enabled: true}, Threshold: Threshold{ Value: 4, - Units: nontypesglucose.MmolL, + Units: dataBloodGlucose.MmolL, }, } } @@ -1389,7 +1389,7 @@ func testHighAlert() *HighAlert { Base: Base{Enabled: true}, Threshold: Threshold{ Value: 10, - Units: nontypesglucose.MmolL, + Units: dataBloodGlucose.MmolL, }, } } @@ -1398,7 +1398,7 @@ func testUrgentLowAlert() *UrgentLowAlert { Base: Base{Enabled: true}, Threshold: Threshold{ Value: 3, - Units: nontypesglucose.MmolL, + Units: dataBloodGlucose.MmolL, }, } } diff --git a/alerts/evaluator_test.go b/alerts/evaluator_test.go index a7882626ca..5be6a3acd4 100644 --- a/alerts/evaluator_test.go +++ b/alerts/evaluator_test.go @@ -9,7 +9,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - nontypesglucose "github.com/tidepool-org/platform/data/blood/glucose" + dataBloodGlucose "github.com/tidepool-org/platform/data/blood/glucose" "github.com/tidepool-org/platform/data/types/blood/glucose" "github.com/tidepool-org/platform/data/types/dosingdecision" "github.com/tidepool-org/platform/permission" @@ -64,7 +64,7 @@ var _ = Describe("Evaluator", func() { Base: Base{Enabled: true}, Threshold: Threshold{ Value: 10.0, - Units: nontypesglucose.MmolL, + Units: dataBloodGlucose.MmolL, }, }, }, @@ -78,7 +78,7 @@ var _ = Describe("Evaluator", func() { Base: Base{Enabled: true}, Threshold: Threshold{ Value: 10.0, - Units: nontypesglucose.MmolL, + Units: dataBloodGlucose.MmolL, }, }, }, diff --git a/data/events/alerts_test.go b/data/events/alerts_test.go index 5cfac5cc7f..1225bd21e8 100644 --- a/data/events/alerts_test.go +++ b/data/events/alerts_test.go @@ -12,7 +12,7 @@ import ( "go.mongodb.org/mongo-driver/mongo" "github.com/tidepool-org/platform/alerts" - nontypesglucose "github.com/tidepool-org/platform/data/blood/glucose" + dataBloodGlucose "github.com/tidepool-org/platform/data/blood/glucose" storetest "github.com/tidepool-org/platform/data/store/test" "github.com/tidepool-org/platform/data/types" "github.com/tidepool-org/platform/data/types/blood" @@ -529,7 +529,7 @@ func newTestStaticDatumMmolL(value float64) *glucose.Glucose { Time: pointer.FromTime(time.Now()), UploadID: pointer.FromAny(testDataSetID), }, - Units: pointer.FromString(nontypesglucose.MmolL), + Units: pointer.FromString(dataBloodGlucose.MmolL), Value: pointer.FromFloat64(value), }, } From a802f0222c585433f969487c4099bddee4357057 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Mon, 10 Feb 2025 16:45:25 -0700 Subject: [PATCH 32/54] renames Recorder & UsersWithoutCommunications Renames: Recorder -> LastCommunicationsRecorder UsersWithoutCommunication -> OverdueCommunications BACK-2499 BACK-2559 --- alerts/client.go | 10 +- alerts/client_test.go | 6 +- alerts/config.go | 10 +- alerts/evaluator_test.go | 30 +++--- alerts/tasks.go | 8 +- alerts/tasks_test.go | 12 +-- auth/service/api/v1/auth_service_mock.go | 1 - auth/test/mock.go | 1 - data/events/alerts.go | 18 ++-- data/events/alerts_test.go | 94 +++++++++---------- data/events/events.go | 2 +- ...der.go => last_communications_recorder.go} | 12 +-- data/service/api/v1/alerts.go | 13 +-- data/service/api/v1/alerts_test.go | 52 +++++----- data/service/api/v1/mocks/context.go | 18 ++-- .../api/v1/mocks/mocklogger_test_gen.go | 1 - .../api/v1/users_datasets_create_test.go | 2 +- data/service/context.go | 2 +- data/service/context/standard.go | 44 ++++----- data/service/service/standard.go | 20 ++-- data/store/mongo/mongo.go | 8 +- data/store/mongo/mongo_recorder.go | 17 ++-- data/store/mongo/mongo_test.go | 29 +++--- data/store/store.go | 2 +- dexcom/fetch/test/mock.go | 1 - task/test/mock.go | 1 - 26 files changed, 206 insertions(+), 208 deletions(-) rename data/events/{recorder.go => last_communications_recorder.go} (65%) diff --git a/alerts/client.go b/alerts/client.go index 96edff0d54..fae857a660 100644 --- a/alerts/client.go +++ b/alerts/client.go @@ -87,16 +87,16 @@ func (c *Client) List(ctx context.Context, followedUserID string) ([]*Config, er return configs, nil } -// UsersWithoutCommunication are those that haven't communicated in some time. +// OverdueCommunications are those that haven't communicated in some time. // // This method should only be called via an authenticated service session. -func (c *Client) UsersWithoutCommunication(ctx context.Context) ([]LastCommunication, error) { - url := c.client.ConstructURL("v1", "users", "without_communication") +func (c *Client) OverdueCommunications(ctx context.Context) ([]LastCommunication, error) { + url := c.client.ConstructURL("v1", "users", "overdue_communications") lastComms := []LastCommunication{} err := c.request(ctx, http.MethodGet, url, nil, &lastComms) if err != nil { - c.logger.Debugf("getting users without communication: \"%+v\" %T", err, err) - return nil, errors.Wrap(err, "Unable to list users without communication") + c.logger.Debugf("getting users overdue to communicate: \"%+v\" %T", err, err) + return nil, errors.Wrap(err, "Unable to list overdue communications") } return lastComms, nil } diff --git a/alerts/client_test.go b/alerts/client_test.go index 1fe6b739bd..cb647cbfa4 100644 --- a/alerts/client_test.go +++ b/alerts/client_test.go @@ -106,14 +106,14 @@ var _ = Describe("Client", func() { }) }) - Context("UsersWithoutCommunication", func() { + Context("OverdueCommunications", func() { ItReturnsAnErrorOnNon200Responses(func(ctx context.Context, client *Client) error { - _, err := client.UsersWithoutCommunication(ctx) + _, err := client.OverdueCommunications(ctx) return err }) ItReturnsANilErrorOnSuccess("[]", func(ctx context.Context, client *Client) error { - _, err := client.UsersWithoutCommunication(ctx) + _, err := client.OverdueCommunications(ctx) return err }) }) diff --git a/alerts/config.go b/alerts/config.go index b128e69e16..89ad86a984 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -680,12 +680,14 @@ type Notification struct { Sent func(time.Time) } -// RecordsRepository encapsulates queries of the records collection for use with alerts. -type RecordsRepository interface { +// LastCommunicationsRepository encapsulates queries of the [LastCommunication] records +// collection for use with alerts. +type LastCommunicationsRepository interface { // RecordReceivedDeviceData upserts the time of last communication from a user. RecordReceivedDeviceData(context.Context, LastCommunication) error - // UsersWithoutCommunication lists those users that haven't communicated for a time. - UsersWithoutCommunication(context.Context) ([]LastCommunication, error) + // OverdueCommunications lists records for those users that haven't communicated for a + // time. + OverdueCommunications(context.Context) ([]LastCommunication, error) EnsureIndexes() error } diff --git a/alerts/evaluator_test.go b/alerts/evaluator_test.go index 5be6a3acd4..f3655ca1ea 100644 --- a/alerts/evaluator_test.go +++ b/alerts/evaluator_test.go @@ -302,19 +302,19 @@ func newTestAlertsConfig(userID, dataSetID string) *Config { } type mockAlertsClient struct { - UsersWithoutCommsError error - UsersWithoutCommsResponses [][]LastCommunication - ListResponses [][]*Config - ListError error - UpsertError error - UpsertCalls []*Config + OverdueCommunicationsError error + OverdueCommunicationsResponses [][]LastCommunication + ListResponses [][]*Config + ListError error + UpsertError error + UpsertCalls []*Config } func newMockAlertsClient() *mockAlertsClient { return &mockAlertsClient{ - UsersWithoutCommsResponses: [][]LastCommunication{}, - ListResponses: [][]*Config{}, - UpsertCalls: []*Config{}, + OverdueCommunicationsResponses: [][]LastCommunication{}, + ListResponses: [][]*Config{}, + UpsertCalls: []*Config{}, } } @@ -351,15 +351,15 @@ func (c *mockAlertsClient) List(ctx context.Context, userID string) ([]*Config, return []*Config{}, nil } -func (c *mockAlertsClient) UsersWithoutCommunication(context.Context) ( +func (c *mockAlertsClient) OverdueCommunications(context.Context) ( []LastCommunication, error) { - if c.UsersWithoutCommsError != nil { - return nil, c.UsersWithoutCommsError + if c.OverdueCommunicationsError != nil { + return nil, c.OverdueCommunicationsError } - if len(c.UsersWithoutCommsResponses) > 0 { - ret := c.UsersWithoutCommsResponses[0] - c.UsersWithoutCommsResponses = c.UsersWithoutCommsResponses[1:] + if len(c.OverdueCommunicationsResponses) > 0 { + ret := c.OverdueCommunicationsResponses[0] + c.OverdueCommunicationsResponses = c.OverdueCommunicationsResponses[1:] return ret, nil } return nil, nil diff --git a/alerts/tasks.go b/alerts/tasks.go index 498fcce7f5..5079ca76c6 100644 --- a/alerts/tasks.go +++ b/alerts/tasks.go @@ -30,9 +30,9 @@ type CarePartnerRunner struct { type AlertsClient interface { List(_ context.Context, followedUserID string) ([]*Config, error) Upsert(context.Context, *Config) error - // UsersWithoutCommunication returns a slice of user ids for those users that haven't + // OverdueCommunications returns a slice of [LastCommunication] for users that haven't // uploaded data recently. - UsersWithoutCommunication(context.Context) ([]LastCommunication, error) + OverdueCommunications(context.Context) ([]LastCommunication, error) } func NewCarePartnerRunner(logger log.Logger, alerts AlertsClient, @@ -76,12 +76,12 @@ func (r *CarePartnerRunner) Run(ctx context.Context, tsk *task.Task) { } func (r *CarePartnerRunner) evaluateLastComms(ctx context.Context) error { - lastComms, err := r.alerts.UsersWithoutCommunication(ctx) + overdue, err := r.alerts.OverdueCommunications(ctx) if err != nil { return errors.Wrap(err, "listing users without communication") } - for _, lastComm := range lastComms { + for _, lastComm := range overdue { if err := r.evaluateLastComm(ctx, lastComm); err != nil { r.logger.WithError(err). WithField("followedUserID", lastComm.UserID). diff --git a/alerts/tasks_test.go b/alerts/tasks_test.go index ba4d7c0121..04eb4405e4 100644 --- a/alerts/tasks_test.go +++ b/alerts/tasks_test.go @@ -31,7 +31,7 @@ var _ = Describe("CarePartnerRunner", func() { Context("continues after logging errors", func() { It("retrieving users without communication", func() { runner, test := newCarePartnerRunnerTest() - test.Alerts.UsersWithoutCommsError = fmt.Errorf("test error") + test.Alerts.OverdueCommunicationsError = fmt.Errorf("test error") runner.Run(test.Ctx, test.Task) @@ -104,8 +104,8 @@ var _ = Describe("CarePartnerRunner", func() { It("ignores Configs that don't match the data set id", func() { runner, test := newCarePartnerRunnerTest() - firstResp := test.Alerts.UsersWithoutCommsResponses[0] - test.Alerts.UsersWithoutCommsResponses[0] = append(firstResp, LastCommunication{ + firstResp := test.Alerts.OverdueCommunicationsResponses[0] + test.Alerts.OverdueCommunicationsResponses[0] = append(firstResp, LastCommunication{ UserID: firstResp[0].UserID, DataSetID: "non-matching", LastReceivedDeviceData: firstResp[0].LastReceivedDeviceData, @@ -175,7 +175,7 @@ var _ = Describe("CarePartnerRunner", func() { act.Triggered = time.Now().Add(-time.Hour) act.Sent = time.Now().Add(-time.Hour) test.Alerts.ListResponses[0][0].Activity.NoCommunication = act - test.Alerts.UsersWithoutCommsResponses[0][0].LastReceivedDeviceData = time.Now() + test.Alerts.OverdueCommunicationsResponses[0][0].LastReceivedDeviceData = time.Now() runner.Run(test.Ctx, test.Task) @@ -192,7 +192,7 @@ var _ = Describe("CarePartnerRunner", func() { act.Sent = time.Now().Add(-time.Hour) act.Resolved = time.Now().Add(-time.Minute) test.Alerts.ListResponses[0][0].Activity.NoCommunication = act - test.Alerts.UsersWithoutCommsResponses[0][0].LastReceivedDeviceData = time.Now() + test.Alerts.OverdueCommunicationsResponses[0][0].LastReceivedDeviceData = time.Now() runner.Run(test.Ctx, test.Task) Expect(len(test.Alerts.UpsertCalls)).To(Equal(0)) @@ -238,7 +238,7 @@ func newCarePartnerRunnerTest() (*CarePartnerRunner, *carePartnerRunnerTest) { Expect(err).To(Succeed()) last := time.Now().Add(-(DefaultNoCommunicationDelay + time.Second)) - alerts.UsersWithoutCommsResponses = [][]LastCommunication{{ + alerts.OverdueCommunicationsResponses = [][]LastCommunication{{ { UserID: mockUserID2, DataSetID: mockDataSetID, diff --git a/auth/service/api/v1/auth_service_mock.go b/auth/service/api/v1/auth_service_mock.go index 00dadd0dbf..99bc534bfa 100644 --- a/auth/service/api/v1/auth_service_mock.go +++ b/auth/service/api/v1/auth_service_mock.go @@ -10,7 +10,6 @@ import ( gomock "github.com/golang/mock/gomock" api "github.com/tidepool-org/hydrophone/client" - apple "github.com/tidepool-org/platform/apple" appvalidate "github.com/tidepool-org/platform/appvalidate" auth "github.com/tidepool-org/platform/auth" diff --git a/auth/test/mock.go b/auth/test/mock.go index de0089e03b..878d3a4544 100644 --- a/auth/test/mock.go +++ b/auth/test/mock.go @@ -9,7 +9,6 @@ import ( reflect "reflect" gomock "github.com/golang/mock/gomock" - auth "github.com/tidepool-org/platform/auth" devicetokens "github.com/tidepool-org/platform/devicetokens" page "github.com/tidepool-org/platform/page" diff --git a/data/events/alerts.go b/data/events/alerts.go index 7c12cc0816..4898da12e7 100644 --- a/data/events/alerts.go +++ b/data/events/alerts.go @@ -23,14 +23,14 @@ import ( ) type Consumer struct { - Alerts AlertsClient - Data alerts.DataRepository - DeviceTokens auth.DeviceTokensClient - Evaluator AlertsEvaluator - Permissions permission.Client - Pusher Pusher - Recorder EventsRecorder - TokensProvider auth.ServerSessionTokenProvider + Alerts AlertsClient + Data alerts.DataRepository + DeviceTokens auth.DeviceTokensClient + Evaluator AlertsEvaluator + Permissions permission.Client + Pusher Pusher + LastCommunications LastCommunicationsRecorder + TokensProvider auth.ServerSessionTokenProvider Logger log.Logger } @@ -132,7 +132,7 @@ func (c *Consumer) consumeDeviceData(ctx context.Context, LastReceivedDeviceData: time.Now(), DataSetID: *datum.UploadID, } - err := c.Recorder.RecordReceivedDeviceData(ctx, lastComm) + err := c.LastCommunications.RecordReceivedDeviceData(ctx, lastComm) if err != nil { lgr.WithError(err).Info("Unable to record device data received") } diff --git a/data/events/alerts_test.go b/data/events/alerts_test.go index 1225bd21e8..4ebcb80f25 100644 --- a/data/events/alerts_test.go +++ b/data/events/alerts_test.go @@ -71,7 +71,7 @@ var _ = Describe("Consumer", func() { c, deps := newConsumerTestDeps(docs) Expect(c.Consume(deps.Context, deps.Session, kafkaMsg)).To(Succeed()) - Expect(deps.Recorder.NumCallsFor(testFollowedUserID)).To(Equal(1)) + Expect(deps.LastCommunications.NumCallsFor(testFollowedUserID)).To(Equal(1)) }) It("consumes device data events", func() { @@ -132,13 +132,13 @@ var _ = Describe("Consumer", func() { }) }) - Describe("Reporter", func() { - Describe("Record", func() { + Describe("LastCommunicationsReporter", func() { + Describe("RecordReceivedDeviceData", func() { It("records the metadata for the user id", func() { testLogger := logtest.NewLogger() ctx := log.NewContextWithLogger(context.Background(), testLogger) - mockRepo := newMockRecorderRepository() - rec := NewRecorder(mockRepo) + mockRepo := newMockLastCommunicationsRepository() + rec := NewLastCommunicationRecorder(mockRepo) lastComm := alerts.LastCommunication{ UserID: testFollowedUserID, LastReceivedDeviceData: time.Now(), @@ -153,17 +153,17 @@ var _ = Describe("Consumer", func() { }) type consumerTestDeps struct { - Alerts *mockAlertsConfigClient - Context context.Context - Cursor *mongo.Cursor - DeviceTokens *mockDeviceTokens - Evaluator *mockStaticEvaluator - Logger *logtest.Logger - Permissions *mockPermissionsClient - Pusher Pusher - Recorder *mockRecorder - Repo *storetest.DataRepository - Session *mockConsumerGroupSession + Alerts *mockAlertsConfigClient + Context context.Context + Cursor *mongo.Cursor + DeviceTokens *mockDeviceTokens + Evaluator *mockStaticEvaluator + Logger *logtest.Logger + Permissions *mockPermissionsClient + Pusher Pusher + LastCommunications *mockLastCommunicationsRecorder + Repo *storetest.DataRepository + Session *mockConsumerGroupSession } func newConsumerTestDeps(docs []interface{}) (*Consumer, *consumerTestDeps) { @@ -187,28 +187,28 @@ func newConsumerTestDeps(docs []interface{}) (*Consumer, *consumerTestDeps) { evaluator := newMockStaticEvaluator() pusher := push.NewLogPusher(logger) deviceTokens := newMockDeviceTokens() - recorder := newMockRecorder() + lastCommunications := newMockLastCommunicationsRecorder() return &Consumer{ - Alerts: alertsClient, - Evaluator: evaluator, - Data: dataRepo, - DeviceTokens: deviceTokens, - Permissions: permissions, - Pusher: pusher, - Recorder: recorder, + Alerts: alertsClient, + Evaluator: evaluator, + Data: dataRepo, + DeviceTokens: deviceTokens, + Permissions: permissions, + Pusher: pusher, + LastCommunications: lastCommunications, }, &consumerTestDeps{ - Alerts: alertsClient, - Context: ctx, - Cursor: cur, - DeviceTokens: deviceTokens, - Evaluator: evaluator, - Pusher: pusher, - Repo: dataRepo, - Session: &mockConsumerGroupSession{}, - Logger: logger, - Recorder: recorder, - Permissions: permissions, + Alerts: alertsClient, + Context: ctx, + Cursor: cur, + DeviceTokens: deviceTokens, + Evaluator: evaluator, + Pusher: pusher, + Repo: dataRepo, + Session: &mockConsumerGroupSession{}, + Logger: logger, + LastCommunications: lastCommunications, + Permissions: permissions, } } @@ -444,18 +444,18 @@ func (c *mockPermissionsClient) GetUserPermissions(ctx context.Context, requestU } } -type mockRecorder struct { +type mockLastCommunicationsRecorder struct { recordCalls map[string]int recordCallsMu sync.Mutex } -func newMockRecorder() *mockRecorder { - return &mockRecorder{ +func newMockLastCommunicationsRecorder() *mockLastCommunicationsRecorder { + return &mockLastCommunicationsRecorder{ recordCalls: map[string]int{}, } } -func (r *mockRecorder) RecordReceivedDeviceData(ctx context.Context, +func (r *mockLastCommunicationsRecorder) RecordReceivedDeviceData(ctx context.Context, lastComm alerts.LastCommunication) error { r.recordCallsMu.Lock() @@ -464,24 +464,24 @@ func (r *mockRecorder) RecordReceivedDeviceData(ctx context.Context, return nil } -func (r *mockRecorder) NumCallsFor(userID string) int { +func (r *mockLastCommunicationsRecorder) NumCallsFor(userID string) int { r.recordCallsMu.Lock() defer r.recordCallsMu.Unlock() return r.recordCalls[userID] } -type mockRecorderRepository struct { +type mockLastCommunicationsRepository struct { recordCalls map[string]int recordCallsMu sync.Mutex } -func newMockRecorderRepository() *mockRecorderRepository { - return &mockRecorderRepository{ +func newMockLastCommunicationsRepository() *mockLastCommunicationsRepository { + return &mockLastCommunicationsRepository{ recordCalls: map[string]int{}, } } -func (r *mockRecorderRepository) RecordReceivedDeviceData(ctx context.Context, +func (r *mockLastCommunicationsRepository) RecordReceivedDeviceData(ctx context.Context, lastComm alerts.LastCommunication) error { r.recordCallsMu.Lock() @@ -490,19 +490,19 @@ func (r *mockRecorderRepository) RecordReceivedDeviceData(ctx context.Context, return nil } -func (r *mockRecorderRepository) UsersWithoutCommunication(ctx context.Context) ( +func (r *mockLastCommunicationsRepository) OverdueCommunications(ctx context.Context) ( []alerts.LastCommunication, error) { return nil, nil } -func (r *mockRecorderRepository) NumCallsFor(userID string) int { +func (r *mockLastCommunicationsRepository) NumCallsFor(userID string) int { r.recordCallsMu.Lock() defer r.recordCallsMu.Unlock() return r.recordCalls[userID] } -func (r *mockRecorderRepository) EnsureIndexes() error { return nil } +func (r *mockLastCommunicationsRepository) EnsureIndexes() error { return nil } type mockDeviceTokens struct { Tokens map[string][]*devicetokens.DeviceToken diff --git a/data/events/events.go b/data/events/events.go index af74f9c655..e4795b93f1 100644 --- a/data/events/events.go +++ b/data/events/events.go @@ -547,7 +547,7 @@ func (c *CascadingConsumer) updateCascadeHeaders(headers []sarama.RecordHeader) return keep } -type EventsRecorder interface { +type LastCommunicationsRecorder interface { // RecordReceivedDeviceData to support sending care partner alerts. // // Metadata about when we last received data for any given user is diff --git a/data/events/recorder.go b/data/events/last_communications_recorder.go similarity index 65% rename from data/events/recorder.go rename to data/events/last_communications_recorder.go index 4bcee29d48..96e6ec218c 100644 --- a/data/events/recorder.go +++ b/data/events/last_communications_recorder.go @@ -9,17 +9,17 @@ import ( lognull "github.com/tidepool-org/platform/log/null" ) -type Recorder struct { - Repo alerts.RecordsRepository +type LastCommunicationRecorder struct { + Repo alerts.LastCommunicationsRepository } -func NewRecorder(repo alerts.RecordsRepository) *Recorder { - return &Recorder{ +func NewLastCommunicationRecorder(repo alerts.LastCommunicationsRepository) *LastCommunicationRecorder { + return &LastCommunicationRecorder{ Repo: repo, } } -func (r *Recorder) RecordReceivedDeviceData(ctx context.Context, +func (r *LastCommunicationRecorder) RecordReceivedDeviceData(ctx context.Context, lastComm alerts.LastCommunication) error { logger := r.log(ctx).WithFields(log.Fields{ @@ -33,7 +33,7 @@ func (r *Recorder) RecordReceivedDeviceData(ctx context.Context, return nil } -func (r *Recorder) log(ctx context.Context) log.Logger { +func (r *LastCommunicationRecorder) log(ctx context.Context) log.Logger { if ctxLogger := log.LoggerFromContext(ctx); ctxLogger != nil { return ctxLogger } diff --git a/data/service/api/v1/alerts.go b/data/service/api/v1/alerts.go index 1326e983d2..39371ad4de 100644 --- a/data/service/api/v1/alerts.go +++ b/data/service/api/v1/alerts.go @@ -25,7 +25,7 @@ func AlertsRoutes() []service.Route { service.Post("/v1/users/:userId/followers/:followerUserId/alerts", UpsertAlert, api.RequireAuth), service.Delete("/v1/users/:userId/followers/:followerUserId/alerts", DeleteAlert, api.RequireAuth), service.Get("/v1/users/:userId/followers/alerts", ListAlerts, api.RequireServer), - service.Get("/v1/users/without_communication", GetUsersWithoutCommunication, api.RequireServer), + service.Get("/v1/users/overdue_communications", ListOverdueCommunications, api.RequireServer), } } @@ -171,7 +171,7 @@ func ListAlerts(dCtx service.Context) { responder.Data(http.StatusOK, alerts) } -func GetUsersWithoutCommunication(dCtx service.Context) { +func ListOverdueCommunications(dCtx service.Context) { r := dCtx.Request() ctx := r.Context() @@ -182,17 +182,18 @@ func GetUsersWithoutCommunication(dCtx service.Context) { dCtx.RespondWithError(platform.ErrorUnauthorized()) return } - lastComms, err := dCtx.RecordsRepository().UsersWithoutCommunication(ctx) + overdue, err := dCtx.LastCommunicationsRepository().OverdueCommunications(ctx) if err != nil { - lgr.WithError(err).Debug("unable to list users without communication") + lgr.WithError(err).Debug("Unable to list overdue records") dCtx.RespondWithError(platform.ErrorInternalServerFailure()) return } - lgr.WithField("found", len(lastComms)).WithField("lastComms", lastComms).Debug("/v1/users/without_communication") + lgr.WithField("found", len(overdue)).WithField("overdue", overdue). + Debug("/v1/users/overdue_communications") responder := request.MustNewResponder(dCtx.Response(), r) - responder.Data(http.StatusOK, lastComms) + responder.Data(http.StatusOK, overdue) } // checkUserIDConsistency verifies the userIDs in a request. diff --git a/data/service/api/v1/alerts_test.go b/data/service/api/v1/alerts_test.go index cdf829208b..f517c224a7 100644 --- a/data/service/api/v1/alerts_test.go +++ b/data/service/api/v1/alerts_test.go @@ -263,9 +263,9 @@ var _ = Describe("Alerts endpoints", func() { }) }) - Describe("GetUsersWithoutCommunication", func() { + Describe("ListOverdueCommunications", func() { It("rejects unauthenticated users", func() { - testAuthenticationRequired(GetUsersWithoutCommunication) + testAuthenticationRequired(ListOverdueCommunications) }) It("succeeds, even when there are no users found", func() { @@ -273,8 +273,8 @@ var _ = Describe("Alerts endpoints", func() { dCtx := mocks.NewContext(t, "", "", nil) alertsRepo := newMockAlertsRepo() dCtx.MockAlertsRepository = alertsRepo - dCtx.MockRecordsRepository = newMockRecordsRepo() - GetUsersWithoutCommunication(dCtx) + dCtx.MockLastCommunicationsRepository = newMockLastCommunicationsRepo() + ListOverdueCommunications(dCtx) rec := dCtx.Recorder() Expect(rec.Code).To(Equal(http.StatusOK)) @@ -285,11 +285,11 @@ var _ = Describe("Alerts endpoints", func() { dCtx := mocks.NewContext(t, "", "", nil) alertsRepo := newMockAlertsRepo() dCtx.MockAlertsRepository = alertsRepo - recordsRepo := newMockRecordsRepo() - recordsRepo.UsersWithoutCommunicationError = fmt.Errorf("test error") - dCtx.MockRecordsRepository = recordsRepo + lastCommunicationsRepo := newMockLastCommunicationsRepo() + lastCommunicationsRepo.ListOverdueCommunicationsError = fmt.Errorf("test error") + dCtx.MockLastCommunicationsRepository = lastCommunicationsRepo - GetUsersWithoutCommunication(dCtx) + ListOverdueCommunications(dCtx) rec := dCtx.Recorder() Expect(rec.Code).To(Equal(http.StatusInternalServerError)) @@ -300,18 +300,18 @@ var _ = Describe("Alerts endpoints", func() { dCtx := mocks.NewContext(t, "", "", nil) alertsRepo := newMockAlertsRepo() dCtx.MockAlertsRepository = alertsRepo - recordsRepo := newMockRecordsRepo() + lastCommunicationsRepo := newMockLastCommunicationsRepo() testTime := time.Unix(123, 456) - recordsRepo.UsersWithoutCommunicationResponses = [][]alerts.LastCommunication{ + lastCommunicationsRepo.ListOverdueCommunicationsResponses = [][]alerts.LastCommunication{ { { LastReceivedDeviceData: testTime, }, }, } - dCtx.MockRecordsRepository = recordsRepo + dCtx.MockLastCommunicationsRepository = lastCommunicationsRepo - GetUsersWithoutCommunication(dCtx) + ListOverdueCommunications(dCtx) rec := dCtx.Recorder() Expect(rec.Code).To(Equal(http.StatusOK)) @@ -391,39 +391,39 @@ func (r *mockRepo) EnsureIndexes() error { return nil } -type mockRecordsRepo struct { - UsersWithoutCommunicationResponses [][]alerts.LastCommunication - UsersWithoutCommunicationError error +type mockLastCommunicationsRepo struct { + ListOverdueCommunicationsResponses [][]alerts.LastCommunication + ListOverdueCommunicationsError error } -func newMockRecordsRepo() *mockRecordsRepo { - return &mockRecordsRepo{ - UsersWithoutCommunicationResponses: [][]alerts.LastCommunication{}, +func newMockLastCommunicationsRepo() *mockLastCommunicationsRepo { + return &mockLastCommunicationsRepo{ + ListOverdueCommunicationsResponses: [][]alerts.LastCommunication{}, } } -func (r *mockRecordsRepo) RecordReceivedDeviceData(_ context.Context, +func (r *mockLastCommunicationsRepo) RecordReceivedDeviceData(_ context.Context, _ alerts.LastCommunication) error { return nil } -func (r *mockRecordsRepo) UsersWithoutCommunication(_ context.Context) ( +func (r *mockLastCommunicationsRepo) OverdueCommunications(_ context.Context) ( []alerts.LastCommunication, error) { - if r.UsersWithoutCommunicationError != nil { - return nil, r.UsersWithoutCommunicationError + if r.ListOverdueCommunicationsError != nil { + return nil, r.ListOverdueCommunicationsError } - if len(r.UsersWithoutCommunicationResponses) > 0 { - ret := r.UsersWithoutCommunicationResponses[0] - r.UsersWithoutCommunicationResponses = r.UsersWithoutCommunicationResponses[1:] + if len(r.ListOverdueCommunicationsResponses) > 0 { + ret := r.ListOverdueCommunicationsResponses[0] + r.ListOverdueCommunicationsResponses = r.ListOverdueCommunicationsResponses[1:] return ret, nil } return nil, nil } -func (r *mockRecordsRepo) EnsureIndexes() error { +func (r *mockLastCommunicationsRepo) EnsureIndexes() error { return nil } diff --git a/data/service/api/v1/mocks/context.go b/data/service/api/v1/mocks/context.go index 8b7741adc2..1d1afb20cb 100644 --- a/data/service/api/v1/mocks/context.go +++ b/data/service/api/v1/mocks/context.go @@ -24,13 +24,13 @@ type Context struct { T likeT // authDetails should be updated via the WithAuthDetails method. - authDetails *test.MockAuthDetails - RESTRequest *rest.Request - ResponseWriter rest.ResponseWriter - recorder *httptest.ResponseRecorder - MockAlertsRepository alerts.Repository - MockPermissionClient permission.Client - MockRecordsRepository alerts.RecordsRepository + authDetails *test.MockAuthDetails + RESTRequest *rest.Request + ResponseWriter rest.ResponseWriter + recorder *httptest.ResponseRecorder + MockAlertsRepository alerts.Repository + MockPermissionClient permission.Client + MockLastCommunicationsRepository alerts.LastCommunicationsRepository } func NewContext(t likeT, method, url string, body io.Reader) *Context { @@ -105,6 +105,6 @@ func (c *Context) PermissionClient() permission.Client { return c.MockPermissionClient } -func (c *Context) RecordsRepository() alerts.RecordsRepository { - return c.MockRecordsRepository +func (c *Context) LastCommunicationsRepository() alerts.LastCommunicationsRepository { + return c.MockLastCommunicationsRepository } diff --git a/data/service/api/v1/mocks/mocklogger_test_gen.go b/data/service/api/v1/mocks/mocklogger_test_gen.go index 81757d6525..65b949fcee 100644 --- a/data/service/api/v1/mocks/mocklogger_test_gen.go +++ b/data/service/api/v1/mocks/mocklogger_test_gen.go @@ -8,7 +8,6 @@ import ( reflect "reflect" gomock "github.com/golang/mock/gomock" - log "github.com/tidepool-org/platform/log" ) diff --git a/data/service/api/v1/users_datasets_create_test.go b/data/service/api/v1/users_datasets_create_test.go index da16ef557e..302a4f7474 100644 --- a/data/service/api/v1/users_datasets_create_test.go +++ b/data/service/api/v1/users_datasets_create_test.go @@ -228,6 +228,6 @@ func (c *mockDataServiceContext) SummaryReporter() *reporters.PatientRealtimeDay panic("not implemented") } -func (c *mockDataServiceContext) RecordsRepository() alerts.RecordsRepository { +func (c *mockDataServiceContext) LastCommunicationsRepository() alerts.LastCommunicationsRepository { panic("not implemented") } diff --git a/data/service/context.go b/data/service/context.go index 5c5e334c1a..5cb714a7f0 100644 --- a/data/service/context.go +++ b/data/service/context.go @@ -29,7 +29,7 @@ type Context interface { SummaryRepository() dataStore.SummaryRepository SyncTaskRepository() syncTaskStore.SyncTaskRepository AlertsRepository() alerts.Repository - RecordsRepository() alerts.RecordsRepository + LastCommunicationsRepository() alerts.LastCommunicationsRepository SummarizerRegistry() *summary.SummarizerRegistry SummaryReporter() *reporters.PatientRealtimeDaysReporter diff --git a/data/service/context/standard.go b/data/service/context/standard.go index 995a48df66..f9c72b07d9 100644 --- a/data/service/context/standard.go +++ b/data/service/context/standard.go @@ -26,22 +26,22 @@ import ( type Standard struct { *serviceContext.Responder - authClient auth.Client - metricClient metric.Client - permissionClient permission.Client - dataDeduplicatorFactory deduplicator.Factory - dataStore dataStore.Store - dataRepository dataStore.DataRepository - summaryRepository dataStore.SummaryRepository - summarizerRegistry *summary.SummarizerRegistry - summaryReporter *reporters.PatientRealtimeDaysReporter - syncTaskStore syncTaskStore.Store - syncTasksRepository syncTaskStore.SyncTaskRepository - dataClient dataClient.Client - clinicsClient clinics.Client - dataSourceClient dataSource.Client - alertsRepository alerts.Repository - recordsRepository alerts.RecordsRepository + authClient auth.Client + metricClient metric.Client + permissionClient permission.Client + dataDeduplicatorFactory deduplicator.Factory + dataStore dataStore.Store + dataRepository dataStore.DataRepository + summaryRepository dataStore.SummaryRepository + summarizerRegistry *summary.SummarizerRegistry + summaryReporter *reporters.PatientRealtimeDaysReporter + syncTaskStore syncTaskStore.Store + syncTasksRepository syncTaskStore.SyncTaskRepository + dataClient dataClient.Client + clinicsClient clinics.Client + dataSourceClient dataSource.Client + alertsRepository alerts.Repository + lastCommunicationsRepository alerts.LastCommunicationsRepository } func WithContext(authClient auth.Client, metricClient metric.Client, permissionClient permission.Client, @@ -130,8 +130,8 @@ func (s *Standard) Close() { if s.alertsRepository != nil { s.alertsRepository = nil } - if s.recordsRepository != nil { - s.recordsRepository = nil + if s.lastCommunicationsRepository != nil { + s.lastCommunicationsRepository = nil } } @@ -213,9 +213,9 @@ func (s *Standard) AlertsRepository() alerts.Repository { return s.alertsRepository } -func (s *Standard) RecordsRepository() alerts.RecordsRepository { - if s.recordsRepository == nil { - s.recordsRepository = s.dataStore.NewRecorderRepository() +func (s *Standard) LastCommunicationsRepository() alerts.LastCommunicationsRepository { + if s.lastCommunicationsRepository == nil { + s.lastCommunicationsRepository = s.dataStore.NewLastCommunicationsRepository() } - return s.recordsRepository + return s.lastCommunicationsRepository } diff --git a/data/service/service/standard.go b/data/service/service/standard.go index 7e42bf6b1e..ae3f7eb037 100644 --- a/data/service/service/standard.go +++ b/data/service/service/standard.go @@ -496,21 +496,21 @@ func (s *Standard) initializeAlertsEventsHandler() error { alertsRepo := s.dataStore.NewAlertsRepository() dataRepo := s.dataStore.NewAlertsDataRepository() - recorderRepo := s.dataStore.NewRecorderRepository() + lastCommunicationsRepo := s.dataStore.NewLastCommunicationsRepository() alertsEvaluator := alerts.NewEvaluator(alertsRepo, dataRepo, s.PermissionClient(), s.Logger(), s.AuthClient()) ec := &dataEvents.Consumer{ - Alerts: alertsRepo, - Evaluator: alertsEvaluator, - Data: dataRepo, - DeviceTokens: s.AuthClient(), - Logger: s.Logger(), - Permissions: s.PermissionClient(), - Pusher: s.pusher, - Recorder: dataEvents.NewRecorder(recorderRepo), - TokensProvider: s.AuthClient(), + Alerts: alertsRepo, + Evaluator: alertsEvaluator, + Data: dataRepo, + DeviceTokens: s.AuthClient(), + Logger: s.Logger(), + Permissions: s.PermissionClient(), + Pusher: s.pusher, + LastCommunications: dataEvents.NewLastCommunicationRecorder(lastCommunicationsRepo), + TokensProvider: s.AuthClient(), } runnerCfg := dataEvents.SaramaRunnerConfig{ diff --git a/data/store/mongo/mongo.go b/data/store/mongo/mongo.go index 41c2316616..c4f2d553cf 100644 --- a/data/store/mongo/mongo.go +++ b/data/store/mongo/mongo.go @@ -29,7 +29,7 @@ func (s *Store) EnsureIndexes() error { dataRepository := s.NewDataRepository() summaryRepository := s.NewSummaryRepository() alertsRepository := s.NewAlertsRepository() - recorderRepository := s.NewRecorderRepository() + lastCommunicationsRepository := s.NewLastCommunicationsRepository() if err := dataRepository.EnsureIndexes(); err != nil { return err @@ -43,7 +43,7 @@ func (s *Store) EnsureIndexes() error { return err } - if err := recorderRepository.EnsureIndexes(); err != nil { + if err := lastCommunicationsRepository.EnsureIndexes(); err != nil { return err } @@ -72,8 +72,8 @@ func (s *Store) NewAlertsRepository() alerts.Repository { return &r } -func (s *Store) NewRecorderRepository() alerts.RecordsRepository { - r := recorderRepo(*s.Store.GetRepository("records")) +func (s *Store) NewLastCommunicationsRepository() alerts.LastCommunicationsRepository { + r := lastCommunicationsRepo(*s.Store.GetRepository("lastCommunications")) return &r } diff --git a/data/store/mongo/mongo_recorder.go b/data/store/mongo/mongo_recorder.go index fa3000f8bd..df813a047d 100644 --- a/data/store/mongo/mongo_recorder.go +++ b/data/store/mongo/mongo_recorder.go @@ -14,10 +14,11 @@ import ( structuredmongo "github.com/tidepool-org/platform/store/structured/mongo" ) -// recorderRepo implements RecorderRepository, writing data to a MongoDB collection. -type recorderRepo structuredmongo.Repository +// lastCommunicationsRepo implements LastCommunicationsRepository, writing data to a +// MongoDB collection. +type lastCommunicationsRepo structuredmongo.Repository -func (r *recorderRepo) RecordReceivedDeviceData(ctx context.Context, +func (r *lastCommunicationsRepo) RecordReceivedDeviceData(ctx context.Context, lastComm alerts.LastCommunication) error { opts := options.Update().SetUpsert(true) @@ -28,7 +29,7 @@ func (r *recorderRepo) RecordReceivedDeviceData(ctx context.Context, return nil } -func (r *recorderRepo) EnsureIndexes() error { +func (r *lastCommunicationsRepo) EnsureIndexes() error { repo := structuredmongo.Repository(*r) return (&repo).CreateAllIndexes(context.Background(), []mongo.IndexModel{ { @@ -49,14 +50,14 @@ func (r *recorderRepo) EnsureIndexes() error { }) } -func (r *recorderRepo) filter(lastComm alerts.LastCommunication) map[string]any { +func (r *lastCommunicationsRepo) filter(lastComm alerts.LastCommunication) map[string]any { return map[string]any{ "userId": lastComm.UserID, "dataSetId": lastComm.DataSetID, } } -func (d *recorderRepo) UsersWithoutCommunication(ctx context.Context) ([]alerts.LastCommunication, error) { +func (d *lastCommunicationsRepo) OverdueCommunications(ctx context.Context) ([]alerts.LastCommunication, error) { start := time.Now().Add(-5 * time.Minute) selector := bson.M{ "lastReceivedDeviceData": bson.M{"$lte": start}, @@ -64,11 +65,11 @@ func (d *recorderRepo) UsersWithoutCommunication(ctx context.Context) ([]alerts. findOptions := options.Find().SetSort(bson.D{{Key: "lastReceivedDeviceData", Value: 1}}) cursor, err := d.Find(ctx, selector, findOptions) if err != nil { - return nil, errors.Wrapf(err, "Unable to list users without communication") + return nil, errors.Wrapf(err, "Unable to list overdue records") } records := []alerts.LastCommunication{} if err := cursor.All(ctx, &records); err != nil { - return nil, errors.Wrapf(err, "Unable to iterate users without communication cursor") + return nil, errors.Wrapf(err, "Unable to iterate overdue records") } return records, nil } diff --git a/data/store/mongo/mongo_test.go b/data/store/mongo/mongo_test.go index 9031564938..b4f7b452fe 100644 --- a/data/store/mongo/mongo_test.go +++ b/data/store/mongo/mongo_test.go @@ -243,7 +243,7 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { var alertsDataRepository alerts.DataRepository var summaryRepository dataStore.SummaryRepository var alertsRepository alerts.Repository - var recordsRepository alerts.RecordsRepository + var lastCommunicationsRepository alerts.LastCommunicationsRepository var logger = logTest.NewLogger() var store *dataStoreMongo.Store @@ -279,7 +279,7 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { dataSetCollection = store.GetCollection("deviceDataSets") summaryCollection = store.GetCollection("summary") alertsCollection = store.GetCollection("alerts") - recordsCollection = store.GetCollection("records") + recordsCollection = store.GetCollection("lastCommunications") }) }) @@ -453,10 +453,10 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { }) }) - Context("NewRecordsRepository", func() { + Context("NewLastCommunicationsRepository", func() { It("returns a new repository", func() { - recordsRepository = store.NewRecorderRepository() - Expect(recordsRepository).ToNot(BeNil()) + lastCommunicationsRepository = store.NewLastCommunicationsRepository() + Expect(lastCommunicationsRepository).ToNot(BeNil()) }) }) @@ -466,7 +466,7 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { summaryRepository = store.NewSummaryRepository() alertsRepository = store.NewAlertsRepository() alertsDataRepository = store.NewAlertsDataRepository() - recordsRepository = store.NewRecorderRepository() + lastCommunicationsRepository = store.NewLastCommunicationsRepository() Expect(repository).ToNot(BeNil()) Expect(summaryRepository).ToNot(BeNil()) Expect(alertsRepository).ToNot(BeNil()) @@ -2634,17 +2634,16 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { }) }) - Context("recorder", func() { - + Context("LastCommunicationsRecorder", func() { BeforeEach(func() { - recordsRepository = store.NewRecorderRepository() - Expect(recordsRepository).ToNot(BeNil()) + lastCommunicationsRepository = store.NewLastCommunicationsRepository() + Expect(lastCommunicationsRepository).ToNot(BeNil()) }) - Describe("UsersWithoutCommunication", func() { + Describe("OverdueCommunications", func() { It("retrieves matching records", func() { ctx := context.Background() - got, err := recordsRepository.UsersWithoutCommunication(ctx) + got, err := lastCommunicationsRepository.OverdueCommunications(ctx) Expect(err).To(Succeed()) Expect(len(got)).To(Equal(0)) }) @@ -2656,15 +2655,15 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { DataSetID: testDataSetID, LastReceivedDeviceData: time.Unix(123, 456), } - Expect(recordsRepository.RecordReceivedDeviceData(ctx, testLastComm)).To(Succeed()) + Expect(lastCommunicationsRepository.RecordReceivedDeviceData(ctx, testLastComm)).To(Succeed()) testLastComm2 := alerts.LastCommunication{ UserID: testUserID + "2", DataSetID: testDataSetID + "2", LastReceivedDeviceData: time.Now(), } - Expect(recordsRepository.RecordReceivedDeviceData(ctx, testLastComm2)).To(Succeed()) + Expect(lastCommunicationsRepository.RecordReceivedDeviceData(ctx, testLastComm2)).To(Succeed()) - got, err := recordsRepository.UsersWithoutCommunication(ctx) + got, err := lastCommunicationsRepository.OverdueCommunications(ctx) Expect(err).To(Succeed()) Expect(len(got)).To(Equal(1)) }) diff --git a/data/store/store.go b/data/store/store.go index 0c33141f1c..2ea9c9452d 100644 --- a/data/store/store.go +++ b/data/store/store.go @@ -20,7 +20,7 @@ type Store interface { NewDataRepository() DataRepository NewSummaryRepository() SummaryRepository NewAlertsRepository() alerts.Repository - NewRecorderRepository() alerts.RecordsRepository + NewLastCommunicationsRepository() alerts.LastCommunicationsRepository } // DataSetRepository is the interface for interacting and modifying diff --git a/dexcom/fetch/test/mock.go b/dexcom/fetch/test/mock.go index 1ea79b2471..65e6c878a1 100644 --- a/dexcom/fetch/test/mock.go +++ b/dexcom/fetch/test/mock.go @@ -10,7 +10,6 @@ import ( time "time" gomock "github.com/golang/mock/gomock" - auth "github.com/tidepool-org/platform/auth" data "github.com/tidepool-org/platform/data" source "github.com/tidepool-org/platform/data/source" diff --git a/task/test/mock.go b/task/test/mock.go index 7c06ef90b3..c702095423 100644 --- a/task/test/mock.go +++ b/task/test/mock.go @@ -9,7 +9,6 @@ import ( reflect "reflect" gomock "github.com/golang/mock/gomock" - page "github.com/tidepool-org/platform/page" task "github.com/tidepool-org/platform/task" ) From 1ebe7784d09585bd644516d88b868cc0e5d82c60 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Mon, 10 Feb 2025 16:49:15 -0700 Subject: [PATCH 33/54] pass a log.Logger to EvaluateNoCommunication Explicit > Implicit BACK-2499 BACK-2559 --- alerts/config.go | 6 ++---- alerts/config_test.go | 4 ++-- alerts/tasks.go | 2 +- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/alerts/config.go b/alerts/config.go index 89ad86a984..5d1cd73059 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -190,15 +190,13 @@ func isReEval(t1, t2 time.Time) bool { return t1.After(t2) } -// TODO pass in a logger -func (c *Config) EvaluateNoCommunication(ctx context.Context, last time.Time) ( - *Notification, bool) { +func (c *Config) EvaluateNoCommunication(ctx context.Context, + lgr log.Logger, last time.Time) (*Notification, bool) { if c.Alerts.NoCommunication == nil || !c.Alerts.NoCommunication.Enabled { return nil, false } - lgr := c.LoggerWithFields(log.LoggerFromContext(ctx)) ctx = log.NewContextWithLogger(ctx, lgr) nc := c.Alerts.NoCommunication.Evaluate(ctx, last) needsUpsert := c.Activity.NoCommunication.Update(nc.OutOfRange) diff --git a/alerts/config_test.go b/alerts/config_test.go index fd2dc80da9..226bbbb847 100644 --- a/alerts/config_test.go +++ b/alerts/config_test.go @@ -128,11 +128,11 @@ var _ = Describe("Config", func() { Context("when a notification is returned", func() { Describe("EvaluateNoCommunication", func() { It("injects user ids", func() { - ctx, _, cfg := newConfigTest() + ctx, lgr, cfg := newConfigTest() cfg.Alerts.NoCommunication.Enabled = true when := time.Now().Add(-(DefaultNoCommunicationDelay + time.Second)) - n, _ := cfg.EvaluateNoCommunication(ctx, when) + n, _ := cfg.EvaluateNoCommunication(ctx, lgr, when) Expect(n).ToNot(BeNil()) Expect(n.RecipientUserID).To(Equal(mockUserID1)) diff --git a/alerts/tasks.go b/alerts/tasks.go index 5079ca76c6..018e7f1426 100644 --- a/alerts/tasks.go +++ b/alerts/tasks.go @@ -111,7 +111,7 @@ func (r *CarePartnerRunner) evaluateLastComm(ctx context.Context, for _, config := range configs { lgr := config.LoggerWithFields(r.logger) lastData := lastComm.LastReceivedDeviceData - notification, needsUpsert := config.EvaluateNoCommunication(ctx, lastData) + notification, needsUpsert := config.EvaluateNoCommunication(ctx, lgr, lastData) if notification != nil { notification.Sent = r.wrapWithUpsert(ctx, lgr, config, notification.Sent) notifications = append(notifications, notification) From 10883b48a56b976eb4019e1d66f5c2beab55a9a4 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Mon, 10 Feb 2025 16:50:47 -0700 Subject: [PATCH 34/54] remove un-needed comment BACK-2499 BACK-2559 --- alerts/config.go | 1 - 1 file changed, 1 deletion(-) diff --git a/alerts/config.go b/alerts/config.go index 5d1cd73059..9557822a3c 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -200,7 +200,6 @@ func (c *Config) EvaluateNoCommunication(ctx context.Context, ctx = log.NewContextWithLogger(ctx, lgr) nc := c.Alerts.NoCommunication.Evaluate(ctx, last) needsUpsert := c.Activity.NoCommunication.Update(nc.OutOfRange) - // TODO check re-eval? I don't think so delay := c.Alerts.NoCommunication.Delay.Duration() if delay == 0 { delay = DefaultNoCommunicationDelay From 5494b1dd9ce699cb0d087b9b032564ae5004a018 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Mon, 10 Feb 2025 16:55:48 -0700 Subject: [PATCH 35/54] move care partner task definition to alerts package BACK-2499 BACK-2559 --- alerts/tasks.go | 14 +++++++++++++- alerts/tasks_test.go | 8 ++++++++ task/carepartner.go | 18 ------------------ task/carepartner_test.go | 14 -------------- task/store/mongo/mongo.go | 3 ++- 5 files changed, 23 insertions(+), 34 deletions(-) delete mode 100644 task/carepartner.go delete mode 100644 task/carepartner_test.go diff --git a/alerts/tasks.go b/alerts/tasks.go index 018e7f1426..adb68c347b 100644 --- a/alerts/tasks.go +++ b/alerts/tasks.go @@ -10,10 +10,22 @@ import ( "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" "github.com/tidepool-org/platform/permission" + "github.com/tidepool-org/platform/pointer" "github.com/tidepool-org/platform/push" "github.com/tidepool-org/platform/task" ) +const CarePartnerType = "org.tidepool.carepartner" + +func NewCarePartnerTaskCreate() *task.TaskCreate { + return &task.TaskCreate{ + Name: pointer.FromAny(CarePartnerType), + Type: CarePartnerType, + AvailableTime: &time.Time{}, + Data: map[string]interface{}{}, + } +} + type CarePartnerRunner struct { logger log.Logger @@ -50,7 +62,7 @@ func NewCarePartnerRunner(logger log.Logger, alerts AlertsClient, } func (r *CarePartnerRunner) GetRunnerType() string { - return task.CarePartnerType + return CarePartnerType } func (r *CarePartnerRunner) GetRunnerTimeout() time.Duration { diff --git a/alerts/tasks_test.go b/alerts/tasks_test.go index 04eb4405e4..54e9158ebc 100644 --- a/alerts/tasks_test.go +++ b/alerts/tasks_test.go @@ -212,6 +212,14 @@ var _ = Describe("CarePartnerRunner", func() { }) }) +var _ = Describe("NewCarePartnerTaskCreate", func() { + It("succeeds", func() { + Expect(func() { + Expect(NewCarePartnerTaskCreate()).ToNot(Equal(nil)) + }).ToNot(Panic()) + }) +}) + type carePartnerRunnerTest struct { Alerts *mockAlertsClient Config *Config diff --git a/task/carepartner.go b/task/carepartner.go deleted file mode 100644 index 31b5b6bc58..0000000000 --- a/task/carepartner.go +++ /dev/null @@ -1,18 +0,0 @@ -package task - -import ( - "time" - - "github.com/tidepool-org/platform/pointer" -) - -const CarePartnerType = "org.tidepool.carepartner" - -func NewCarePartnerTaskCreate() *TaskCreate { - return &TaskCreate{ - Name: pointer.FromAny(CarePartnerType), - Type: CarePartnerType, - AvailableTime: &time.Time{}, - Data: map[string]interface{}{}, - } -} diff --git a/task/carepartner_test.go b/task/carepartner_test.go deleted file mode 100644 index b6f3f4478b..0000000000 --- a/task/carepartner_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package task - -import ( - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var _ = Describe("NewCarePartnerTaskCreate", func() { - It("succeeds", func() { - Expect(func() { - Expect(NewCarePartnerTaskCreate()).ToNot(Equal(nil)) - }).ToNot(Panic()) - }) -}) diff --git a/task/store/mongo/mongo.go b/task/store/mongo/mongo.go index 9d5041a4d0..8232d55300 100644 --- a/task/store/mongo/mongo.go +++ b/task/store/mongo/mongo.go @@ -10,6 +10,7 @@ import ( "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" + "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/ehr/reconcile" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" @@ -178,7 +179,7 @@ func (t *TaskRepository) EnsureEHRReconcileTask(ctx context.Context) error { } func (t *TaskRepository) EnsureCarePartnerTask(ctx context.Context) error { - create := task.NewCarePartnerTaskCreate() + create := alerts.NewCarePartnerTaskCreate() return t.ensureTask(ctx, create) } From 215ffebfa62dbfdcb7ab68d440f800a0fe6a1c67 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Mon, 10 Feb 2025 16:58:02 -0700 Subject: [PATCH 36/54] make GetRunnerDeadline() use a multiple of GetRunnerDurationMaximum() Requested in code review. https://github.com/tidepool-org/platform/pull/715#discussion_r1942661771 BACK-2499 BACK-2559 --- alerts/tasks.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/alerts/tasks.go b/alerts/tasks.go index adb68c347b..b7c5f63dbc 100644 --- a/alerts/tasks.go +++ b/alerts/tasks.go @@ -70,7 +70,7 @@ func (r *CarePartnerRunner) GetRunnerTimeout() time.Duration { } func (r *CarePartnerRunner) GetRunnerDeadline() time.Time { - return time.Now().Add(30 * time.Second) + return time.Now().Add(r.GetRunnerDurationMaximum()) } func (r *CarePartnerRunner) GetRunnerDurationMaximum() time.Duration { From 597869af63b3212b3e2bc1a870bfd37eb7a408a5 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Mon, 10 Feb 2025 17:20:43 -0700 Subject: [PATCH 37/54] replace magic number with named constant BACK-2499 BACK-2559 --- alerts/config.go | 5 ++++- data/store/mongo/mongo_recorder.go | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/alerts/config.go b/alerts/config.go index 9557822a3c..823c2ee45f 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -522,7 +522,10 @@ func (a *NoCommunicationAlert) Evaluate(ctx context.Context, lastReceived time.T return er } -const DefaultNoCommunicationDelay = 5 * time.Minute +const ( + DefaultNoCommunicationDelay = 5 * time.Minute + MinimumNoCommunicationDelay = 5 * time.Minute +) const NoCommunicationMessage = "Tidepool is unable to communicate with a user's device" diff --git a/data/store/mongo/mongo_recorder.go b/data/store/mongo/mongo_recorder.go index df813a047d..a24b0b1fb0 100644 --- a/data/store/mongo/mongo_recorder.go +++ b/data/store/mongo/mongo_recorder.go @@ -58,7 +58,7 @@ func (r *lastCommunicationsRepo) filter(lastComm alerts.LastCommunication) map[s } func (d *lastCommunicationsRepo) OverdueCommunications(ctx context.Context) ([]alerts.LastCommunication, error) { - start := time.Now().Add(-5 * time.Minute) + start := time.Now().Add(-alerts.MinimumNoCommunicationDelay) selector := bson.M{ "lastReceivedDeviceData": bson.M{"$lte": start}, } From 58745b74e0c38e6ab55b3a7406f4e3d3dad955c4 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Wed, 12 Feb 2025 13:55:04 -0700 Subject: [PATCH 38/54] adds environment-based config for alerts retry delays BACK-2559 --- data/service/service/standard.go | 37 +++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 5 deletions(-) diff --git a/data/service/service/standard.go b/data/service/service/standard.go index ae3f7eb037..9cf4c98ec1 100644 --- a/data/service/service/standard.go +++ b/data/service/service/standard.go @@ -523,12 +523,19 @@ func (s *Standard) initializeAlertsEventsHandler() error { }, } - retryDelays := []time.Duration{0, 1 * time.Second} - if strings.Contains(commonConfig.KafkaTopicPrefix, "tidepool-prod") { - // Kakfa topics/partitions aren't cheap, so minimize costs outside of production. - retryDelays = append(retryDelays, 2*time.Second, 3*time.Second, 5*time.Second) + cfg := &alertsEventsHandlerConfig{Config: platform.NewConfig()} + cfg.UserAgent = s.UserAgent() + reporter := s.ConfigReporter().WithScopes("alerts", "retry") + loader := platform.NewConfigReporterLoader(reporter) + if err := cfg.Load(loader); err != nil { + return errors.Wrap(err, "unable to alerts retry delays config") } - eventsRunner := dataEvents.NewCascadingSaramaEventsRunner(runnerCfg, s.Logger(), retryDelays) + delays, err := parseCommaSeparatedDurations(reporter.GetWithDefault("delays", "1s")) + if err != nil { + return errors.Wrap(err, "Unable to read configured alerts retry delays") + } + + eventsRunner := dataEvents.NewCascadingSaramaEventsRunner(runnerCfg, s.Logger(), delays) runner := dataEvents.NewSaramaRunner(eventsRunner) if err := runner.Initialize(); err != nil { return errors.Wrap(err, "Unable to initialize alerts events handler runner") @@ -537,3 +544,23 @@ func (s *Standard) initializeAlertsEventsHandler() error { return nil } + +type alertsEventsHandlerConfig struct { + *platform.Config + RetryDelaysConfig string `envconfig:"TIDEPOOL_DATA_SERVICE_ALERTS_RETRY_DELAYS" default:"1s"` +} + +func parseCommaSeparatedDurations(s string) ([]time.Duration, error) { + out := []time.Duration{} + for _, d := range strings.Split(s, ",") { + if d == "" { + continue + } + dur, err := time.ParseDuration(d) + if err != nil { + return nil, err + } + out = append(out, dur) + } + return out, nil +} From 503eeea1becaa69e6861e4dfa12d05ca0717b0bb Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Wed, 12 Feb 2025 15:11:08 -0700 Subject: [PATCH 39/54] modify task service to allow tasks to repeat ASAP Previously, when completing a task, an available time of nil would cause the task to be marked as failed. Now, when a task completes and has available time of nil, time.Now() is substituted, which should cause the task to be run again ASAP. In addition, if the available time is in the past, it is substituted with time.Now(), so that it will run again ASAP. This supports the care partner no communication check, which wants to run 1x/second, but as that's not available with the task service (the smallest interval is 5 seconds), setting the value to 1 second intervals will run the task on each task service iteration. BACK-2559 --- alerts/tasks.go | 29 ++--------------------------- alerts/tasks_test.go | 7 +++++-- task/queue/queue.go | 5 ++++- 3 files changed, 11 insertions(+), 30 deletions(-) diff --git a/alerts/tasks.go b/alerts/tasks.go index b7c5f63dbc..34d77b7966 100644 --- a/alerts/tasks.go +++ b/alerts/tasks.go @@ -79,12 +79,12 @@ func (r *CarePartnerRunner) GetRunnerDurationMaximum() time.Duration { func (r *CarePartnerRunner) Run(ctx context.Context, tsk *task.Task) { r.logger.Info("care partner no communication check") - ctx = auth.NewContextWithServerSessionTokenProvider(ctx, r.authClient) start := time.Now() + ctx = auth.NewContextWithServerSessionTokenProvider(ctx, r.authClient) if err := r.evaluateLastComms(ctx); err != nil { r.logger.WithError(err).Warn("running care partner no communication check") } - r.scheduleNextRun(tsk, start) + tsk.RepeatAvailableAfter(time.Second - time.Since(start)) } func (r *CarePartnerRunner) evaluateLastComms(ctx context.Context) error { @@ -201,31 +201,6 @@ func (r *CarePartnerRunner) pushNotifications(ctx context.Context, } } -func (r *CarePartnerRunner) scheduleNextRun(tsk *task.Task, lastStart time.Time) { - // Ideally, we would start the next run 1 second after this run... - nextDesiredRun := lastStart.Add(time.Second) - now := time.Now() - if nextDesiredRun.Before(now) { - r.logger.Info("care partner is bumping nextDesiredRun") - // nextDesiredRun, when added to time.Now in tsk.RepeatAvailableAfter, must - // result in a time in the future or the task will be marked failed (and not run - // again). - // - // One workaround is to take a guess at how long it will take Run() to return - // and the task queue to evaluate the task's AvailableAfter time. Maybe the task - // queue could be re-worked to accept a value that indicates "as soon as - // possible"? Or if it accepted a time.Duration, then one could pass it - // time.Nanosecond to get closer to "ASAP", and then the Zero value might mean - // don't repeat. Or the Zero value could mean repeat ASAP. Or a negative value - // could mean repeat now. Whatever. It would prevent the task from being marked - // a failure for not being able to guess when the value would be read. Which - // wasn't its intent I'm sure, it just wasn't designed for tasks with the level - // of resolution and repetition expected for this purpose. - nextDesiredRun = now.Add(25 * time.Millisecond) - } - tsk.RepeatAvailableAfter(time.Until(nextDesiredRun)) -} - // Pusher is a service-agnostic interface for sending push notifications. type Pusher interface { // Push a notification to a device. diff --git a/alerts/tasks_test.go b/alerts/tasks_test.go index 54e9158ebc..d2b683fbf1 100644 --- a/alerts/tasks_test.go +++ b/alerts/tasks_test.go @@ -18,12 +18,15 @@ import ( var _ = Describe("CarePartnerRunner", func() { Describe("Run", func() { - It("schedules its next run", func() { + It("schedules its next run for 1 second", func() { runner, test := newCarePartnerRunnerTest() + start := time.Now() runner.Run(test.Ctx, test.Task) - Expect(test.Task.AvailableTime).ToNot(BeZero()) + if Expect(test.Task.AvailableTime).ToNot(BeNil()) { + Expect(*test.Task.AvailableTime).To(BeTemporally("~", start.Add(time.Second))) + } Expect(test.Task.DeadlineTime).To(BeNil()) Expect(test.Task.State).To(Equal(task.TaskStatePending)) }) diff --git a/task/queue/queue.go b/task/queue/queue.go index 7e3dfe0203..39f50524ad 100644 --- a/task/queue/queue.go +++ b/task/queue/queue.go @@ -374,7 +374,10 @@ func (q *queue) completeTask(ctx context.Context, tsk *task.Task) { func (q *queue) computeState(tsk *task.Task) { switch tsk.State { case task.TaskStatePending: - if tsk.AvailableTime == nil || time.Now().After(*tsk.AvailableTime) { + now := time.Now() + if tsk.AvailableTime == nil || tsk.AvailableTime.Before(now) { + tsk.AvailableTime = &now + } else if time.Now().After(*tsk.AvailableTime) { tsk.AppendError(errors.New("pending task requires future available time")) tsk.SetFailed() } From e8daea8c7141c7095965c171448467b6c02dc80e Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Wed, 19 Feb 2025 14:42:46 -0700 Subject: [PATCH 40/54] the alerts task's GetRunnerTimeout should be a multiple of its maximum duration BACK-2559 --- alerts/tasks.go | 8 +++++--- .../{mongo_recorder.go => mongo_last_communications.go} | 0 2 files changed, 5 insertions(+), 3 deletions(-) rename data/store/mongo/{mongo_recorder.go => mongo_last_communications.go} (100%) diff --git a/alerts/tasks.go b/alerts/tasks.go index 34d77b7966..d0d5ede942 100644 --- a/alerts/tasks.go +++ b/alerts/tasks.go @@ -66,15 +66,17 @@ func (r *CarePartnerRunner) GetRunnerType() string { } func (r *CarePartnerRunner) GetRunnerTimeout() time.Duration { - return 30 * time.Second + return r.GetRunnerDurationMaximum() } func (r *CarePartnerRunner) GetRunnerDeadline() time.Time { - return time.Now().Add(r.GetRunnerDurationMaximum()) + return time.Now().Add(3 * r.GetRunnerDurationMaximum()) } +const RunnerDurationMaximum = 30 * time.Second + func (r *CarePartnerRunner) GetRunnerDurationMaximum() time.Duration { - return 30 * time.Second + return RunnerDurationMaximum } func (r *CarePartnerRunner) Run(ctx context.Context, tsk *task.Task) { diff --git a/data/store/mongo/mongo_recorder.go b/data/store/mongo/mongo_last_communications.go similarity index 100% rename from data/store/mongo/mongo_recorder.go rename to data/store/mongo/mongo_last_communications.go From 83cc65656faac933b51be32381d5f4bfb9406b9f Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Wed, 19 Feb 2025 14:47:59 -0700 Subject: [PATCH 41/54] rename API endpoint /v1/users/overdue_communications => /v1/overdue_communications BACK-2559 --- alerts/client.go | 2 +- data/service/api/v1/alerts.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/alerts/client.go b/alerts/client.go index fae857a660..4ff7656ba8 100644 --- a/alerts/client.go +++ b/alerts/client.go @@ -91,7 +91,7 @@ func (c *Client) List(ctx context.Context, followedUserID string) ([]*Config, er // // This method should only be called via an authenticated service session. func (c *Client) OverdueCommunications(ctx context.Context) ([]LastCommunication, error) { - url := c.client.ConstructURL("v1", "users", "overdue_communications") + url := c.client.ConstructURL("v1", "overdue_communications") lastComms := []LastCommunication{} err := c.request(ctx, http.MethodGet, url, nil, &lastComms) if err != nil { diff --git a/data/service/api/v1/alerts.go b/data/service/api/v1/alerts.go index 39371ad4de..ec987757a5 100644 --- a/data/service/api/v1/alerts.go +++ b/data/service/api/v1/alerts.go @@ -25,7 +25,7 @@ func AlertsRoutes() []service.Route { service.Post("/v1/users/:userId/followers/:followerUserId/alerts", UpsertAlert, api.RequireAuth), service.Delete("/v1/users/:userId/followers/:followerUserId/alerts", DeleteAlert, api.RequireAuth), service.Get("/v1/users/:userId/followers/alerts", ListAlerts, api.RequireServer), - service.Get("/v1/users/overdue_communications", ListOverdueCommunications, api.RequireServer), + service.Get("/v1/overdue_communications", ListOverdueCommunications, api.RequireServer), } } @@ -190,7 +190,7 @@ func ListOverdueCommunications(dCtx service.Context) { } lgr.WithField("found", len(overdue)).WithField("overdue", overdue). - Debug("/v1/users/overdue_communications") + Debug("/v1/overdue_communications") responder := request.MustNewResponder(dCtx.Response(), r) responder.Data(http.StatusOK, overdue) From fab9cebd6f5dd6b8cbdfa02fb931f6afdaead62b Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Wed, 19 Feb 2025 14:52:38 -0700 Subject: [PATCH 42/54] remove unused config struct This need for this struct went away when retry delays were removed. BACK-2559 --- data/service/service/standard.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/data/service/service/standard.go b/data/service/service/standard.go index 9cf4c98ec1..11de7a6638 100644 --- a/data/service/service/standard.go +++ b/data/service/service/standard.go @@ -523,7 +523,7 @@ func (s *Standard) initializeAlertsEventsHandler() error { }, } - cfg := &alertsEventsHandlerConfig{Config: platform.NewConfig()} + cfg := platform.NewConfig() cfg.UserAgent = s.UserAgent() reporter := s.ConfigReporter().WithScopes("alerts", "retry") loader := platform.NewConfigReporterLoader(reporter) @@ -545,11 +545,6 @@ func (s *Standard) initializeAlertsEventsHandler() error { return nil } -type alertsEventsHandlerConfig struct { - *platform.Config - RetryDelaysConfig string `envconfig:"TIDEPOOL_DATA_SERVICE_ALERTS_RETRY_DELAYS" default:"1s"` -} - func parseCommaSeparatedDurations(s string) ([]time.Duration, error) { out := []time.Duration{} for _, d := range strings.Split(s, ",") { From de0015b58b7854f60864c12c1b84e880d3168f89 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Wed, 19 Feb 2025 14:55:03 -0700 Subject: [PATCH 43/54] validate overdue communication minimum value The minimum (and default value) is 5m. Validate that configs specify either 0, or a value in the range from 5m-6h. BACK-2559 --- alerts/config.go | 6 +++++- alerts/config_test.go | 4 ++-- data/store/mongo/mongo_last_communications.go | 4 +++- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/alerts/config.go b/alerts/config.go index 823c2ee45f..a1cae72a3c 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -498,7 +498,10 @@ type NoCommunicationAlert struct { func (a NoCommunicationAlert) Validate(validator structure.Validator) { a.Base.Validate(validator) dur := a.Delay.Duration() - validator.Duration("delay", &dur).InRange(0, 6*time.Hour) + if dur != 0 { + validator.Duration("delay", &dur). + InRange(MinimumNoCommunicationDelay, MaximumNoCommunicationDelay) + } } // Evaluate if the time since data was last received warrants a notification. @@ -525,6 +528,7 @@ func (a *NoCommunicationAlert) Evaluate(ctx context.Context, lastReceived time.T const ( DefaultNoCommunicationDelay = 5 * time.Minute MinimumNoCommunicationDelay = 5 * time.Minute + MaximumNoCommunicationDelay = 6 * time.Hour ) const NoCommunicationMessage = "Tidepool is unable to communicate with a user's device" diff --git a/alerts/config_test.go b/alerts/config_test.go index 226bbbb847..8a229b1608 100644 --- a/alerts/config_test.go +++ b/alerts/config_test.go @@ -782,12 +782,12 @@ var _ = Describe("Config", func() { val = validator.New(logTest.NewLogger()) b = NoCommunicationAlert{Delay: -1} b.Validate(val) - Expect(val.Error()).To(MatchError("value -1ns is not between 0s and 6h0m0s")) + Expect(val.Error()).To(MatchError("value -1ns is not between 5m0s and 6h0m0s")) val = validator.New(logTest.NewLogger()) b = NoCommunicationAlert{Delay: DurationMinutes(time.Hour*6 + time.Second)} b.Validate(val) - Expect(val.Error()).To(MatchError("value 6h0m1s is not between 0s and 6h0m0s")) + Expect(val.Error()).To(MatchError("value 6h0m1s is not between 5m0s and 6h0m0s")) }) }) }) diff --git a/data/store/mongo/mongo_last_communications.go b/data/store/mongo/mongo_last_communications.go index a24b0b1fb0..dbd7f62c5b 100644 --- a/data/store/mongo/mongo_last_communications.go +++ b/data/store/mongo/mongo_last_communications.go @@ -57,7 +57,9 @@ func (r *lastCommunicationsRepo) filter(lastComm alerts.LastCommunication) map[s } } -func (d *lastCommunicationsRepo) OverdueCommunications(ctx context.Context) ([]alerts.LastCommunication, error) { +func (d *lastCommunicationsRepo) OverdueCommunications(ctx context.Context) ( + []alerts.LastCommunication, error) { + start := time.Now().Add(-alerts.MinimumNoCommunicationDelay) selector := bson.M{ "lastReceivedDeviceData": bson.M{"$lte": start}, From e74e8106b71efb3618ce6403818a85946ca0ec61 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Wed, 19 Feb 2025 14:56:35 -0700 Subject: [PATCH 44/54] assume no alerts retry topics by default This minimizes the Kafka load for non production environments. BACK-2559 --- data/service/service/standard.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/service/service/standard.go b/data/service/service/standard.go index 11de7a6638..72615af84b 100644 --- a/data/service/service/standard.go +++ b/data/service/service/standard.go @@ -530,7 +530,7 @@ func (s *Standard) initializeAlertsEventsHandler() error { if err := cfg.Load(loader); err != nil { return errors.Wrap(err, "unable to alerts retry delays config") } - delays, err := parseCommaSeparatedDurations(reporter.GetWithDefault("delays", "1s")) + delays, err := parseCommaSeparatedDurations(reporter.GetWithDefault("delays", "0s")) if err != nil { return errors.Wrap(err, "Unable to read configured alerts retry delays") } From 271772c36bbbe9b5734b693185f03db080b239e5 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Wed, 19 Feb 2025 16:52:11 -0700 Subject: [PATCH 45/54] fix a receiver name to match others defined on the struct BACK-2559 --- data/store/mongo/mongo_last_communications.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/data/store/mongo/mongo_last_communications.go b/data/store/mongo/mongo_last_communications.go index dbd7f62c5b..92b8b76ac5 100644 --- a/data/store/mongo/mongo_last_communications.go +++ b/data/store/mongo/mongo_last_communications.go @@ -57,7 +57,7 @@ func (r *lastCommunicationsRepo) filter(lastComm alerts.LastCommunication) map[s } } -func (d *lastCommunicationsRepo) OverdueCommunications(ctx context.Context) ( +func (r *lastCommunicationsRepo) OverdueCommunications(ctx context.Context) ( []alerts.LastCommunication, error) { start := time.Now().Add(-alerts.MinimumNoCommunicationDelay) @@ -65,7 +65,7 @@ func (d *lastCommunicationsRepo) OverdueCommunications(ctx context.Context) ( "lastReceivedDeviceData": bson.M{"$lte": start}, } findOptions := options.Find().SetSort(bson.D{{Key: "lastReceivedDeviceData", Value: 1}}) - cursor, err := d.Find(ctx, selector, findOptions) + cursor, err := r.Find(ctx, selector, findOptions) if err != nil { return nil, errors.Wrapf(err, "Unable to list overdue records") } From 18d97b860c9c53f038062bbe4b55be321194dbf2 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Wed, 19 Feb 2025 16:56:22 -0700 Subject: [PATCH 46/54] merge alertsDataRepo into DataRepository As Todd pointed out in code review, there's no need for this to be separate from the existing DataRepository, as it only queries the deviceData collection. BACK-2559 --- data/service/service/standard.go | 2 +- data/store/mongo/mongo.go | 5 -- data/store/mongo/mongo_alerts.go | 78 +++++--------------------------- data/store/mongo/mongo_data.go | 55 ++++++++++++++++++++++ data/store/mongo/mongo_test.go | 4 +- data/store/store.go | 5 +- 6 files changed, 72 insertions(+), 77 deletions(-) diff --git a/data/service/service/standard.go b/data/service/service/standard.go index 72615af84b..1f96d27fcf 100644 --- a/data/service/service/standard.go +++ b/data/service/service/standard.go @@ -495,7 +495,7 @@ func (s *Standard) initializeAlertsEventsHandler() error { } alertsRepo := s.dataStore.NewAlertsRepository() - dataRepo := s.dataStore.NewAlertsDataRepository() + dataRepo := s.dataStore.NewDataRepository() lastCommunicationsRepo := s.dataStore.NewLastCommunicationsRepository() alertsEvaluator := alerts.NewEvaluator(alertsRepo, dataRepo, s.PermissionClient(), diff --git a/data/store/mongo/mongo.go b/data/store/mongo/mongo.go index c4f2d553cf..7bcccfd252 100644 --- a/data/store/mongo/mongo.go +++ b/data/store/mongo/mongo.go @@ -76,8 +76,3 @@ func (s *Store) NewLastCommunicationsRepository() alerts.LastCommunicationsRepos r := lastCommunicationsRepo(*s.Store.GetRepository("lastCommunications")) return &r } - -func (s *Store) NewAlertsDataRepository() alerts.DataRepository { - r := alertsDataRepo(*s.Store.GetRepository("deviceData")) - return &r -} diff --git a/data/store/mongo/mongo_alerts.go b/data/store/mongo/mongo_alerts.go index 5887f8f834..91f2d90196 100644 --- a/data/store/mongo/mongo_alerts.go +++ b/data/store/mongo/mongo_alerts.go @@ -3,16 +3,12 @@ package mongo import ( "context" "fmt" - "time" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" "github.com/tidepool-org/platform/alerts" - "github.com/tidepool-org/platform/data/types/blood/glucose" - "github.com/tidepool-org/platform/data/types/blood/glucose/continuous" - "github.com/tidepool-org/platform/data/types/dosingdecision" "github.com/tidepool-org/platform/errors" structuredmongo "github.com/tidepool-org/platform/store/structured/mongo" ) @@ -24,7 +20,7 @@ type alertsRepo structuredmongo.Repository // // Once set, UploadID, UserID, and FollowedUserID cannot be changed. This is to prevent a // user from granting themselves access to another data set. -func (r *alertsRepo) Upsert(ctx context.Context, conf *alerts.Config) error { +func (a *alertsRepo) Upsert(ctx context.Context, conf *alerts.Config) error { opts := options.Update().SetUpsert(true) filter := bson.D{ {Key: "userId", Value: conf.UserID}, @@ -35,7 +31,7 @@ func (r *alertsRepo) Upsert(ctx context.Context, conf *alerts.Config) error { "$set": bson.M{"alerts": conf.Alerts, "activity": conf.Activity}, "$setOnInsert": filter, } - _, err := r.UpdateOne(ctx, filter, doc, opts) + _, err := a.UpdateOne(ctx, filter, doc, opts) if err != nil { return fmt.Errorf("upserting alerts.Config: %w", err) } @@ -43,8 +39,8 @@ func (r *alertsRepo) Upsert(ctx context.Context, conf *alerts.Config) error { } // Delete will delete the given Config. -func (r *alertsRepo) Delete(ctx context.Context, cfg *alerts.Config) error { - _, err := r.DeleteMany(ctx, r.filter(cfg), nil) +func (a *alertsRepo) Delete(ctx context.Context, cfg *alerts.Config) error { + _, err := a.DeleteMany(ctx, a.filter(cfg), nil) if err != nil { return fmt.Errorf("upserting alerts.Config: %w", err) } @@ -52,11 +48,11 @@ func (r *alertsRepo) Delete(ctx context.Context, cfg *alerts.Config) error { } // List will retrieve any Configs that are defined by followers of the given user. -func (r *alertsRepo) List(ctx context.Context, followedUserID string) ([]*alerts.Config, error) { +func (a *alertsRepo) List(ctx context.Context, followedUserID string) ([]*alerts.Config, error) { filter := bson.D{ {Key: "followedUserId", Value: followedUserID}, } - cursor, err := r.Find(ctx, filter, nil) + cursor, err := a.Find(ctx, filter, nil) if err != nil { return nil, errors.Wrapf(err, "Unable to list alerts.Config(s) for followed user %s", followedUserID) } @@ -72,8 +68,8 @@ func (r *alertsRepo) List(ctx context.Context, followedUserID string) ([]*alerts } // Get will retrieve the given Config. -func (r *alertsRepo) Get(ctx context.Context, cfg *alerts.Config) (*alerts.Config, error) { - res := r.FindOne(ctx, r.filter(cfg), nil) +func (a *alertsRepo) Get(ctx context.Context, cfg *alerts.Config) (*alerts.Config, error) { + res := a.FindOne(ctx, a.filter(cfg), nil) if res.Err() != nil { return nil, fmt.Errorf("getting alerts.Config: %w", res.Err()) } @@ -85,8 +81,8 @@ func (r *alertsRepo) Get(ctx context.Context, cfg *alerts.Config) (*alerts.Confi } // EnsureIndexes to maintain index constraints. -func (r *alertsRepo) EnsureIndexes() error { - repo := structuredmongo.Repository(*r) +func (a *alertsRepo) EnsureIndexes() error { + repo := structuredmongo.Repository(*a) return (&repo).CreateAllIndexes(context.Background(), []mongo.IndexModel{ { Keys: bson.D{ @@ -100,61 +96,9 @@ func (r *alertsRepo) EnsureIndexes() error { }) } -func (r *alertsRepo) filter(cfg *alerts.Config) interface{} { +func (a *alertsRepo) filter(cfg *alerts.Config) interface{} { return bson.D{ {Key: "userId", Value: cfg.UserID}, {Key: "followedUserId", Value: cfg.FollowedUserID}, } } - -type alertsDataRepo structuredmongo.Repository - -func (d *alertsDataRepo) GetAlertableData(ctx context.Context, - params alerts.GetAlertableDataParams) (*alerts.GetAlertableDataResponse, error) { - - if params.End.IsZero() { - params.End = time.Now() - } - - cursor, err := d.getAlertableData(ctx, params, dosingdecision.Type) - if err != nil { - return nil, err - } - dosingDecisions := []*dosingdecision.DosingDecision{} - if err := cursor.All(ctx, &dosingDecisions); err != nil { - return nil, errors.Wrap(err, "Unable to load alertable dosing documents") - } - cursor, err = d.getAlertableData(ctx, params, continuous.Type) - if err != nil { - return nil, err - } - glucoseData := []*glucose.Glucose{} - if err := cursor.All(ctx, &glucoseData); err != nil { - return nil, errors.Wrap(err, "Unable to load alertable glucose documents") - } - response := &alerts.GetAlertableDataResponse{ - DosingDecisions: dosingDecisions, - Glucose: glucoseData, - } - - return response, nil -} - -func (d *alertsDataRepo) getAlertableData(ctx context.Context, - params alerts.GetAlertableDataParams, typ string) (*mongo.Cursor, error) { - - selector := bson.M{ - "_active": true, - "uploadId": params.UploadID, - "type": typ, - "_userId": params.UserID, - "time": bson.M{"$gte": params.Start, "$lte": params.End}, - } - findOptions := options.Find().SetSort(bson.D{{Key: "time", Value: -1}}) - cursor, err := d.Find(ctx, selector, findOptions) - if err != nil { - format := "Unable to find alertable %s data in dataset %s" - return nil, errors.Wrapf(err, format, typ, params.UploadID) - } - return cursor, nil -} diff --git a/data/store/mongo/mongo_data.go b/data/store/mongo/mongo_data.go index c827999bc9..62789b9c67 100644 --- a/data/store/mongo/mongo_data.go +++ b/data/store/mongo/mongo_data.go @@ -8,9 +8,14 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/data" "github.com/tidepool-org/platform/data/store" + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/data/types/blood/glucose/continuous" + "github.com/tidepool-org/platform/data/types/dosingdecision" "github.com/tidepool-org/platform/data/types/upload" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" @@ -198,3 +203,53 @@ func (d *DataRepository) DestroyDataForUserByID(ctx context.Context, userID stri func isTypeUpload(typ []string) bool { return slices.Contains(typ, strings.ToLower(upload.Type)) } + +func (d *DataRepository) GetAlertableData(ctx context.Context, + params alerts.GetAlertableDataParams) (*alerts.GetAlertableDataResponse, error) { + + if params.End.IsZero() { + params.End = time.Now() + } + + cursor, err := d.getAlertableData(ctx, params, dosingdecision.Type) + if err != nil { + return nil, err + } + dosingDecisions := []*dosingdecision.DosingDecision{} + if err := cursor.All(ctx, &dosingDecisions); err != nil { + return nil, errors.Wrap(err, "Unable to load alertable dosing documents") + } + cursor, err = d.getAlertableData(ctx, params, continuous.Type) + if err != nil { + return nil, err + } + glucoseData := []*glucose.Glucose{} + if err := cursor.All(ctx, &glucoseData); err != nil { + return nil, errors.Wrap(err, "Unable to load alertable glucose documents") + } + response := &alerts.GetAlertableDataResponse{ + DosingDecisions: dosingDecisions, + Glucose: glucoseData, + } + + return response, nil +} + +func (d *DataRepository) getAlertableData(ctx context.Context, + params alerts.GetAlertableDataParams, typ string) (*mongo.Cursor, error) { + + selector := bson.M{ + "_active": true, + "uploadId": params.UploadID, + "type": typ, + "_userId": params.UserID, + "time": bson.M{"$gte": params.Start, "$lte": params.End}, + } + findOptions := options.Find().SetSort(bson.D{{Key: "time", Value: -1}}) + cursor, err := d.DatumRepository.Find(ctx, selector, findOptions) + if err != nil { + format := "Unable to find alertable %s data in dataset %s" + return nil, errors.Wrapf(err, format, typ, params.UploadID) + } + return cursor, nil +} diff --git a/data/store/mongo/mongo_test.go b/data/store/mongo/mongo_test.go index b4f7b452fe..465548a960 100644 --- a/data/store/mongo/mongo_test.go +++ b/data/store/mongo/mongo_test.go @@ -465,7 +465,7 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { repository = store.NewDataRepository() summaryRepository = store.NewSummaryRepository() alertsRepository = store.NewAlertsRepository() - alertsDataRepository = store.NewAlertsDataRepository() + alertsDataRepository = store.NewDataRepository() lastCommunicationsRepository = store.NewLastCommunicationsRepository() Expect(repository).ToNot(BeNil()) Expect(summaryRepository).ToNot(BeNil()) @@ -2427,7 +2427,7 @@ var _ = Describe("Mongo", Label("mongodb", "slow", "integration"), func() { Expect(repository.CreateDataSet(ctx, testSet)).To(Succeed()) testSetData := testDataSetData(testSet) Expect(repository.CreateDataSetData(ctx, testSet, testSetData)).To(Succeed()) - alertsDataRepository = store.NewAlertsDataRepository() + alertsDataRepository = store.NewDataRepository() Expect(alertsDataRepository).ToNot(BeNil()) params := alerts.GetAlertableDataParams{ diff --git a/data/store/store.go b/data/store/store.go index 2ea9c9452d..f68649d0d0 100644 --- a/data/store/store.go +++ b/data/store/store.go @@ -65,11 +65,12 @@ type DatumRepository interface { DistinctUserIDs(ctx context.Context, typ []string) ([]string, error) } -// DataRepository is the combined interface of DataSetRepository and -// DatumRepository. +// DataRepository is the combined interface of DataSetRepository, +// DatumRepository, and [alerts.DataRepository]. type DataRepository interface { DataSetRepository DatumRepository + alerts.DataRepository } type Filter struct { From 687c45333f85d9d1fb9c60be5367aa5681acea46 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Fri, 21 Feb 2025 09:30:59 -0700 Subject: [PATCH 47/54] rename method receiver BACK-2559 --- data/store/mongo/mongo_last_communications.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/data/store/mongo/mongo_last_communications.go b/data/store/mongo/mongo_last_communications.go index 92b8b76ac5..b620e07094 100644 --- a/data/store/mongo/mongo_last_communications.go +++ b/data/store/mongo/mongo_last_communications.go @@ -18,19 +18,19 @@ import ( // MongoDB collection. type lastCommunicationsRepo structuredmongo.Repository -func (r *lastCommunicationsRepo) RecordReceivedDeviceData(ctx context.Context, +func (l *lastCommunicationsRepo) RecordReceivedDeviceData(ctx context.Context, lastComm alerts.LastCommunication) error { opts := options.Update().SetUpsert(true) - _, err := r.UpdateOne(ctx, r.filter(lastComm), bson.M{"$set": lastComm}, opts) + _, err := l.UpdateOne(ctx, l.filter(lastComm), bson.M{"$set": lastComm}, opts) if err != nil { return fmt.Errorf("upserting alerts.LastCommunication: %w", err) } return nil } -func (r *lastCommunicationsRepo) EnsureIndexes() error { - repo := structuredmongo.Repository(*r) +func (l *lastCommunicationsRepo) EnsureIndexes() error { + repo := structuredmongo.Repository(*l) return (&repo).CreateAllIndexes(context.Background(), []mongo.IndexModel{ { Keys: bson.D{ @@ -50,14 +50,14 @@ func (r *lastCommunicationsRepo) EnsureIndexes() error { }) } -func (r *lastCommunicationsRepo) filter(lastComm alerts.LastCommunication) map[string]any { +func (l *lastCommunicationsRepo) filter(lastComm alerts.LastCommunication) map[string]any { return map[string]any{ "userId": lastComm.UserID, "dataSetId": lastComm.DataSetID, } } -func (r *lastCommunicationsRepo) OverdueCommunications(ctx context.Context) ( +func (l *lastCommunicationsRepo) OverdueCommunications(ctx context.Context) ( []alerts.LastCommunication, error) { start := time.Now().Add(-alerts.MinimumNoCommunicationDelay) @@ -65,7 +65,7 @@ func (r *lastCommunicationsRepo) OverdueCommunications(ctx context.Context) ( "lastReceivedDeviceData": bson.M{"$lte": start}, } findOptions := options.Find().SetSort(bson.D{{Key: "lastReceivedDeviceData", Value: 1}}) - cursor, err := r.Find(ctx, selector, findOptions) + cursor, err := l.Find(ctx, selector, findOptions) if err != nil { return nil, errors.Wrapf(err, "Unable to list overdue records") } From 93a978a81bab120afb3ca534d55b916c9a820254 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Fri, 21 Feb 2025 11:23:58 -0700 Subject: [PATCH 48/54] consolidate care partner alerts pusher configs The tasks service and data service can both push alerts for care partners. This change consolidates that configuration into one set of environment variables loaded via the alerts package. I wish that alerts didn't need to know about envconfig, but at least for the moment it's the only way to consolidate the information about the configuration into a single re-usable struct. Naming of the pusher in both services is prefixed with "alerts" to communicate that this pusher is configured for care partner alerts. BACK-2559 --- alerts/pusher.go | 78 ++++++++++++++++++++++++++++++++ alerts/pusher_test.go | 75 ++++++++++++++++++++++++++++++ alerts/tasks.go | 15 ------ data/service/service/standard.go | 27 +++-------- push/push.go | 54 ++++++---------------- push/push_test.go | 44 ------------------ task/service/service/service.go | 20 ++------ 7 files changed, 177 insertions(+), 136 deletions(-) create mode 100644 alerts/pusher.go create mode 100644 alerts/pusher_test.go diff --git a/alerts/pusher.go b/alerts/pusher.go new file mode 100644 index 0000000000..1563e4fc43 --- /dev/null +++ b/alerts/pusher.go @@ -0,0 +1,78 @@ +package alerts + +import ( + "context" + + "github.com/kelseyhightower/envconfig" + + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/push" +) + +// Pusher is a service-agnostic interface for sending push notifications. +type Pusher interface { + // Push a notification to a device. + Push(context.Context, *devicetokens.DeviceToken, *push.Notification) error +} + +// ToPushNotification converts Notification to push.Notification. +func ToPushNotification(notification *Notification) *push.Notification { + return &push.Notification{ + Message: notification.Message, + } +} + +type cpaPusherEnvconfig struct { + // SigningKey is the raw token signing key received from Apple (.p8 file containing + // PEM-encoded private key) + // + // https://developer.apple.com/documentation/usernotifications/sending-notification-requests-to-apns + SigningKey []byte `envconfig:"TIDEPOOL_CARE_PARTNER_ALERTS_APNS_SIGNING_KEY" required:"true"` + KeyID string `envconfig:"TIDEPOOL_CARE_PARTNER_ALERTS_APNS_KEY_ID" required:"true"` + BundleID string `envconfig:"TIDEPOOL_CARE_PARTNER_ALERTS_APNS_BUNDLE_ID" required:"true"` + TeamID string `envconfig:"TIDEPOOL_CARE_PARTNER_ALERTS_APNS_TEAM_ID" required:"true"` +} + +// NewPusher handles the loading of care partner configuration for push notifications. +func NewPusher() (*push.APNSPusher, error) { + config, err := loadPusherViaEnvconfig() + if err != nil { + return nil, errors.Wrap(err, "unable to care partner pusher config") + } + + client, err := push.NewAPNS2Client(config.SigningKey, config.KeyID, config.TeamID) + if err != nil { + return nil, errors.Wrap(err, "unable to create care partner pusher client") + } + + return push.NewAPNSPusher(client, config.BundleID), nil +} + +func loadPusherViaEnvconfig() (*cpaPusherEnvconfig, error) { + c := &cpaPusherEnvconfig{} + if err := envconfig.Process("", c); err != nil { + return nil, errors.Wrap(err, "Unable to process APNs pusher config") + } + + // envconfig's "required" tag won't error on values that are defined but empty, so + // manually check + + if len(c.SigningKey) == 0 { + return nil, errors.New("Unable to build APNSPusherConfig: APNs signing key is blank") + } + + if c.BundleID == "" { + return nil, errors.New("Unable to build APNSPusherConfig: bundleID is blank") + } + + if c.KeyID == "" { + return nil, errors.New("Unable to build APNSPusherConfig: keyID is blank") + } + + if c.TeamID == "" { + return nil, errors.New("Unable to build APNSPusherConfig: teamID is blank") + } + + return c, nil +} diff --git a/alerts/pusher_test.go b/alerts/pusher_test.go new file mode 100644 index 0000000000..d9f7f71db0 --- /dev/null +++ b/alerts/pusher_test.go @@ -0,0 +1,75 @@ +package alerts + +import ( + "os" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("APNSPusher", func() { + Describe("NewAPNSPusherFromEnv", func() { + It("succeeds", func() { + configureEnvconfig() + pusher, err := NewPusher() + Expect(err).To(Succeed()) + Expect(pusher).ToNot(Equal(nil)) + }) + }) +}) + +var _ = Describe("LoadAPNSPusherConfigFromEnv", func() { + BeforeEach(func() { + configureEnvconfig() + }) + + It("errors if key data is empty or blank", func() { + GinkgoT().Setenv("TIDEPOOL_CARE_PARTNER_ALERTS_APNS_SIGNING_KEY", "") + _, err := NewPusher() + Expect(err).To(MatchError(ContainSubstring("APNs signing key is blank"))) + + os.Unsetenv("TIDEPOOL_CARE_PARTNER_ALERTS_APNS_SIGNING_KEY") + _, err = NewPusher() + Expect(err).To(MatchError(ContainSubstring("TIDEPOOL_CARE_PARTNER_ALERTS_APNS_SIGNING_KEY missing value"))) + }) + + It("errors if key data is invalid", func() { + GinkgoT().Setenv("TIDEPOOL_CARE_PARTNER_ALERTS_APNS_SIGNING_KEY", "invalid") + _, err := NewPusher() + Expect(err).To(MatchError(ContainSubstring("AuthKey must be a valid .p8 PEM file"))) + }) + + It("errors if bundleID is blank", func() { + GinkgoT().Setenv("TIDEPOOL_CARE_PARTNER_ALERTS_APNS_BUNDLE_ID", "") + _, err := NewPusher() + Expect(err).To(MatchError(ContainSubstring("bundleID is blank"))) + }) + + It("errors if teamID is blank", func() { + GinkgoT().Setenv("TIDEPOOL_CARE_PARTNER_ALERTS_APNS_TEAM_ID", "") + _, err := NewPusher() + Expect(err).To(MatchError(ContainSubstring("teamID is blank"))) + }) + + It("errors if keyID is blank", func() { + GinkgoT().Setenv("TIDEPOOL_CARE_PARTNER_ALERTS_APNS_KEY_ID", "") + _, err := NewPusher() + Expect(err).To(MatchError(ContainSubstring("keyID is blank"))) + }) +}) + +func configureEnvconfig() { + GinkgoT().Setenv("TIDEPOOL_CARE_PARTNER_ALERTS_APNS_SIGNING_KEY", string(validTestKey)) + GinkgoT().Setenv("TIDEPOOL_CARE_PARTNER_ALERTS_APNS_KEY_ID", "key") + GinkgoT().Setenv("TIDEPOOL_CARE_PARTNER_ALERTS_APNS_TEAM_ID", "team") + GinkgoT().Setenv("TIDEPOOL_CARE_PARTNER_ALERTS_APNS_BUNDLE_ID", "bundle") +} + +// validTestKey is a random private key for testing +var validTestKey = []byte(`-----BEGIN PRIVATE KEY----- +MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDDNrXT9ZRWPUAAg38Qi +Z553y7sGqOgMxUCG36eCIcRCy1QiTJBgGDxIhWvkE8Sx4N6hZANiAATrsRyRXLa0 +Tgczq8tmFomMP212HdkPF3gFEl/CkqGHUodR2EdZBW1zVcmuLjIN4zvqVVXMJm/U +eHZz9xAZ95y3irAfkMuOD/Bw88UYvhKnipOHBeS8BwqyfFQ+NRB6xYU= +-----END PRIVATE KEY----- +`) diff --git a/alerts/tasks.go b/alerts/tasks.go index d0d5ede942..48eb6df601 100644 --- a/alerts/tasks.go +++ b/alerts/tasks.go @@ -6,12 +6,10 @@ import ( "time" "github.com/tidepool-org/platform/auth" - "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" "github.com/tidepool-org/platform/permission" "github.com/tidepool-org/platform/pointer" - "github.com/tidepool-org/platform/push" "github.com/tidepool-org/platform/task" ) @@ -202,16 +200,3 @@ func (r *CarePartnerRunner) pushNotifications(ctx context.Context, } } } - -// Pusher is a service-agnostic interface for sending push notifications. -type Pusher interface { - // Push a notification to a device. - Push(context.Context, *devicetokens.DeviceToken, *push.Notification) error -} - -// ToPushNotification converts Notification to push.Notification. -func ToPushNotification(notification *Notification) *push.Notification { - return &push.Notification{ - Message: notification.Message, - } -} diff --git a/data/service/service/standard.go b/data/service/service/standard.go index 1f96d27fcf..96f2fe46d7 100644 --- a/data/service/service/standard.go +++ b/data/service/service/standard.go @@ -6,7 +6,6 @@ import ( "time" "github.com/IBM/sarama" - "github.com/kelseyhightower/envconfig" eventsCommon "github.com/tidepool-org/go-common/events" @@ -47,7 +46,7 @@ type Standard struct { dataClient *Client clinicsClient *clinics.Client dataSourceClient *dataSourceServiceClient.Client - pusher dataEvents.Pusher + alertsPusher dataEvents.Pusher userEventsHandler events.Runner alertsEventsHandler events.Runner api *api.Standard @@ -95,7 +94,7 @@ func (s *Standard) Initialize(provider application.Provider) error { if err := s.initializeSaramaLogger(); err != nil { return err } - if err := s.initializePusher(); err != nil { + if err := s.initializeAlertsPusher(); err != nil { return err } if err := s.initializeUserEventsHandler(); err != nil { @@ -451,27 +450,15 @@ func (s *Standard) initializeSaramaLogger() error { return nil } -func (s *Standard) initializePusher() error { +func (s *Standard) initializeAlertsPusher() error { var err error - - apns2Config := &struct { - SigningKey []byte `envconfig:"TIDEPOOL_DATA_SERVICE_PUSHER_APNS_SIGNING_KEY"` - KeyID string `envconfig:"TIDEPOOL_DATA_SERVICE_PUSHER_APNS_KEY_ID"` - BundleID string `envconfig:"TIDEPOOL_DATA_SERVICE_PUSHER_APNS_BUNDLE_ID"` - TeamID string `envconfig:"TIDEPOOL_DATA_SERVICE_PUSHER_APNS_TEAM_ID"` - }{} - if err := envconfig.Process("", apns2Config); err != nil { - return errors.Wrap(err, "Unable to process APNs pusher config") - } - var pusher dataEvents.Pusher - pusher, err = push.NewAPNSPusherFromKeyData(apns2Config.SigningKey, apns2Config.KeyID, - apns2Config.TeamID, apns2Config.BundleID) + pusher, err = alerts.NewPusher() if err != nil { - s.Logger().WithError(err).Warn("falling back to logging of push notifications") + s.Logger().WithError(err).Warn("falling back to logging of alerts push notifications") pusher = push.NewLogPusher(s.Logger()) } - s.pusher = pusher + s.alertsPusher = pusher return nil } @@ -508,7 +495,7 @@ func (s *Standard) initializeAlertsEventsHandler() error { DeviceTokens: s.AuthClient(), Logger: s.Logger(), Permissions: s.PermissionClient(), - Pusher: s.pusher, + Pusher: s.alertsPusher, LastCommunications: dataEvents.NewLastCommunicationRecorder(lastCommunicationsRepo), TokensProvider: s.AuthClient(), } diff --git a/push/push.go b/push/push.go index d865e4e3ea..47323989ad 100644 --- a/push/push.go +++ b/push/push.go @@ -34,8 +34,7 @@ type APNSPusher struct { clientMu sync.Mutex } -// NewAPNSPusher creates a Pusher for sending device notifications via Apple's -// APNs. +// NewAPNSPusher creates an APNSPusher for sending device notifications via Apple's APNs. func NewAPNSPusher(client APNS2Client, bundleID string) *APNSPusher { return &APNSPusher{ BundleID: bundleID, @@ -43,44 +42,6 @@ func NewAPNSPusher(client APNS2Client, bundleID string) *APNSPusher { } } -// NewAPNSPusherFromKeyData creates an APNSPusher for sending device -// notifications via Apple's APNs. -// -// The signingKey is the raw token signing key received from Apple (.p8 file -// containing PEM-encoded private key), along with its respective team id, key -// id, and application bundle id. -// -// https://developer.apple.com/documentation/usernotifications/sending-notification-requests-to-apns -func NewAPNSPusherFromKeyData(signingKey []byte, keyID, teamID, bundleID string) (*APNSPusher, error) { - if len(signingKey) == 0 { - return nil, errors.New("Unable to build APNSPusher: APNs signing key is blank") - } - - if bundleID == "" { - return nil, errors.New("Unable to build APNSPusher: bundleID is blank") - } - - if keyID == "" { - return nil, errors.New("Unable to build APNSPusher: keyID is blank") - } - - if teamID == "" { - return nil, errors.New("Unable to build APNSPusher: teamID is blank") - } - - authKey, err := token.AuthKeyFromBytes(signingKey) - if err != nil { - return nil, err - } - token := &token.Token{ - AuthKey: authKey, - KeyID: keyID, - TeamID: teamID, - } - client := &apns2Client{Client: apns2.NewTokenClient(token)} - return NewAPNSPusher(client, bundleID), nil -} - func (p *APNSPusher) Push(ctx context.Context, deviceToken *devicetokens.DeviceToken, notification *Notification) error { @@ -146,6 +107,19 @@ type apns2Client struct { *apns2.Client } +func NewAPNS2Client(signingKey []byte, keyID, teamID string) (*apns2Client, error) { + authKey, err := token.AuthKeyFromBytes(signingKey) + if err != nil { + return nil, err + } + token := &token.Token{ + AuthKey: authKey, + KeyID: keyID, + TeamID: teamID, + } + return &apns2Client{apns2.NewTokenClient(token)}, nil +} + func (c apns2Client) Development() APNS2Client { d := c.Client.Development() return &apns2Client{Client: d} diff --git a/push/push_test.go b/push/push_test.go index 11496ffabc..5922f85e25 100644 --- a/push/push_test.go +++ b/push/push_test.go @@ -49,50 +49,6 @@ func testDeps() (context.Context, *APNSPusher, *pushTestDeps) { } var _ = Describe("APNSPusher", func() { - Describe("NewAPNSPusherFromKeyData", func() { - It("errors if key data is empty or blank", func() { - _, err := NewAPNSPusherFromKeyData([]byte(""), "key", "team", "bundle") - Expect(err).To(MatchError(ContainSubstring("APNs signing key is blank"))) - - _, err = NewAPNSPusherFromKeyData(nil, "key", "team", "bundle") - Expect(err).To(MatchError(ContainSubstring("APNs signing key is blank"))) - }) - - It("errors if key data is invalid", func() { - _, err := NewAPNSPusherFromKeyData([]byte("foo"), "key", "team", "bundle") - Expect(err).To(MatchError(ContainSubstring("AuthKey must be a valid .p8 PEM file"))) - }) - - It("errors if bundleID is blank", func() { - _, err := NewAPNSPusherFromKeyData([]byte("hi"), "key", "team", "") - Expect(err).To(MatchError(ContainSubstring("bundleID is blank"))) - }) - - It("errors if teamID is blank", func() { - _, err := NewAPNSPusherFromKeyData([]byte("hi"), "key", "", "bundle") - Expect(err).To(MatchError(ContainSubstring("teamID is blank"))) - }) - - It("errors if keyID is blank", func() { - _, err := NewAPNSPusherFromKeyData([]byte("hi"), "", "team", "bundle") - Expect(err).To(MatchError(ContainSubstring("keyID is blank"))) - }) - - It("succeeds", func() { - // random private key for testing - data := []byte(`-----BEGIN PRIVATE KEY----- -MIG2AgEAMBAGByqGSM49AgEGBSuBBAAiBIGeMIGbAgEBBDDNrXT9ZRWPUAAg38Qi -Z553y7sGqOgMxUCG36eCIcRCy1QiTJBgGDxIhWvkE8Sx4N6hZANiAATrsRyRXLa0 -Tgczq8tmFomMP212HdkPF3gFEl/CkqGHUodR2EdZBW1zVcmuLjIN4zvqVVXMJm/U -eHZz9xAZ95y3irAfkMuOD/Bw88UYvhKnipOHBeS8BwqyfFQ+NRB6xYU= ------END PRIVATE KEY----- -`) - pusher, err := NewAPNSPusherFromKeyData(data, "key", "team", "bundle") - Expect(err).To(Succeed()) - Expect(pusher).ToNot(Equal(nil)) - }) - }) - Describe("Push", func() { It("requires an Apple token", func() { ctx, pusher, deps := testDeps() diff --git a/task/service/service/service.go b/task/service/service/service.go index dbd5da9695..4eda8d0c9c 100644 --- a/task/service/service/service.go +++ b/task/service/service/service.go @@ -3,8 +3,6 @@ package service import ( "context" - "github.com/kelseyhightower/envconfig" - "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/application" "github.com/tidepool-org/platform/client" @@ -82,7 +80,7 @@ func (s *Service) Initialize(provider application.Provider) error { if err := s.initializeAlertsClient(); err != nil { return err } - if err := s.initializePusher(); err != nil { + if err := s.initializeAlertsPusher(); err != nil { return err } if err := s.initializePermissionClient(); err != nil { @@ -410,22 +408,10 @@ func (s *Service) initializeAlertsClient() error { return nil } -func (s *Service) initializePusher() error { +func (s *Service) initializeAlertsPusher() error { var err error - - apns2Config := &struct { - SigningKey []byte `envconfig:"TIDEPOOL_TASK_SERVICE_PUSHER_APNS_SIGNING_KEY"` - KeyID string `envconfig:"TIDEPOOL_TASK_SERVICE_PUSHER_APNS_KEY_ID"` - BundleID string `envconfig:"TIDEPOOL_TASK_SERVICE_PUSHER_APNS_BUNDLE_ID"` - TeamID string `envconfig:"TIDEPOOL_TASK_SERVICE_PUSHER_APNS_TEAM_ID"` - }{} - if err := envconfig.Process("", apns2Config); err != nil { - return errors.Wrap(err, "Unable to process APNs pusher config") - } - var pusher events.Pusher - pusher, err = push.NewAPNSPusherFromKeyData(apns2Config.SigningKey, apns2Config.KeyID, - apns2Config.TeamID, apns2Config.BundleID) + pusher, err = alerts.NewPusher() if err != nil { s.Logger().WithError(err).Warn("falling back to logging of push notifications") pusher = push.NewLogPusher(s.Logger()) From 44fecf7c96f4de604100bac47038ada79d31045c Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Wed, 12 Mar 2025 13:24:17 -0600 Subject: [PATCH 49/54] mark skipped activity updates BACK-2559 --- data/events/alerts.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/data/events/alerts.go b/data/events/alerts.go index 4898da12e7..be2616f3a8 100644 --- a/data/events/alerts.go +++ b/data/events/alerts.go @@ -76,6 +76,8 @@ func (c *Consumer) consumeAlertsConfigs(ctx context.Context, if isActivityAndActivityOnly(updatedFields) { lgr.WithField("updatedFields", updatedFields). Debug("alerts config is an activity update, will skip") + lgr.WithField("message", msg).Debug("marked") + session.MarkMessage(msg, "") return nil } From ea8740ad05a2525eef63d40821c0de4d05d4a4f2 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Fri, 14 Mar 2025 14:36:44 -0600 Subject: [PATCH 50/54] prefix kafka consumer group ids This matches what's done with existing consumer groups. BACK-2559 --- data/service/service/standard.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/data/service/service/standard.go b/data/service/service/standard.go index 96f2fe46d7..a4d3f58a82 100644 --- a/data/service/service/standard.go +++ b/data/service/service/standard.go @@ -480,6 +480,7 @@ func (s *Standard) initializeAlertsEventsHandler() error { for _, topic := range topics { prefixedTopics = append(prefixedTopics, topicPrefix+topic) } + commonConfig.SaramaConfig.ClientID = topicPrefix + "alerts" alertsRepo := s.dataStore.NewAlertsRepository() dataRepo := s.dataStore.NewDataRepository() @@ -502,7 +503,7 @@ func (s *Standard) initializeAlertsEventsHandler() error { runnerCfg := dataEvents.SaramaRunnerConfig{ Brokers: commonConfig.KafkaBrokers, - GroupID: "alerts", + GroupID: topicPrefix + "alerts", Topics: prefixedTopics, Sarama: commonConfig.SaramaConfig, MessageConsumer: &dataEvents.AlertsEventsConsumer{ From 452934ec6d44779319c8e9b530069dbf58e22e94 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Tue, 18 Mar 2025 14:41:55 -0600 Subject: [PATCH 51/54] add logging when an alerts event can't be consumed Kafka/Sarama won't log it (at least not by default), and this isn't something we expect to see, so log it. BACK-2559 --- data/events/events.go | 2 ++ data/service/service/standard.go | 1 + 2 files changed, 3 insertions(+) diff --git a/data/events/events.go b/data/events/events.go index e4795b93f1..dbe425790c 100644 --- a/data/events/events.go +++ b/data/events/events.go @@ -171,6 +171,7 @@ func CappedExponentialBinaryDelay(cap time.Duration) func(int) time.Duration { type AlertsEventsConsumer struct { Consumer asyncevents.SaramaMessageConsumer + Logger log.Logger } func (c *AlertsEventsConsumer) Consume(ctx context.Context, @@ -178,6 +179,7 @@ func (c *AlertsEventsConsumer) Consume(ctx context.Context, err := c.Consumer.Consume(ctx, session, message) if err != nil { session.MarkMessage(message, fmt.Sprintf("I have given up after error: %s", err)) + c.Logger.WithError(err).Info("Unable to consume alerts event") return err } return nil diff --git a/data/service/service/standard.go b/data/service/service/standard.go index a4d3f58a82..c873d535f7 100644 --- a/data/service/service/standard.go +++ b/data/service/service/standard.go @@ -508,6 +508,7 @@ func (s *Standard) initializeAlertsEventsHandler() error { Sarama: commonConfig.SaramaConfig, MessageConsumer: &dataEvents.AlertsEventsConsumer{ Consumer: ec, + Logger: s.Logger(), }, } From b33531ad93c8c89209563bc8344ded4f1dd947aa Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Tue, 18 Mar 2025 16:08:34 -0600 Subject: [PATCH 52/54] deserialize CPA kafka deviceData messages into a types.Blood Previously a glucose.Glucose was used, which was fine in that we only look at the UserID and UploadID fields, but there are other incompatible fields that can "spoil" the deserialization. BACK-2559 --- data/events/alerts.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/data/events/alerts.go b/data/events/alerts.go index be2616f3a8..3421c90c26 100644 --- a/data/events/alerts.go +++ b/data/events/alerts.go @@ -11,6 +11,7 @@ import ( "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/auth" + "github.com/tidepool-org/platform/data/types" "github.com/tidepool-org/platform/data/types/blood/glucose" "github.com/tidepool-org/platform/data/types/dosingdecision" "github.com/tidepool-org/platform/devicetokens" @@ -115,13 +116,16 @@ func isActivityAndActivityOnly(updatedFields []string) bool { func (c *Consumer) consumeDeviceData(ctx context.Context, session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) error { - datum := &Glucose{} + lgr := c.logger(ctx) + lgr.Debug("consuming device data message") + + // The actual type should be either a glucose.Glucose or a + // dosingdecision.DosingDecision, but they both use types.Base, and that's where the + // only fields we need are defined. + datum := &types.Base{} if _, err := unmarshalMessageValue(msg.Value, datum); err != nil { return err } - lgr := c.logger(ctx) - lgr.WithField("data", datum).Info("consuming a device data message") - if datum.UserID == nil { return errors.New("Unable to retrieve alerts configs: userID is nil") } @@ -150,7 +154,7 @@ func (c *Consumer) consumeDeviceData(ctx context.Context, c.pushNotifications(ctx, notes) session.MarkMessage(msg, "") - lgr.WithField("message", msg).Debug("marked") + lgr.WithField("msg", msg).Debug("marked") return nil } From 4aabbaa6bb63ad20a4eb459774d3099ab88c4a44 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Fri, 21 Mar 2025 07:46:52 -0600 Subject: [PATCH 53/54] add a context logger This logger has some fields in it that can be useful for debugging. BACK-2559 --- alerts/evaluator.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/alerts/evaluator.go b/alerts/evaluator.go index 9608c7f243..9a757a7833 100644 --- a/alerts/evaluator.go +++ b/alerts/evaluator.go @@ -76,7 +76,8 @@ func (e *Evaluator) EvaluateData(ctx context.Context, followedUserID, dataSetID } for _, config := range configs { lgr := config.LoggerWithFields(e.Logger) - notification, needsUpsert := e.genNotificationForConfig(ctx, lgr, config, resp) + lgrCtx := log.NewContextWithLogger(ctx, lgr) + notification, needsUpsert := e.genNotificationForConfig(lgrCtx, lgr, config, resp) if notification != nil { notifications = append(notifications, notification) } From e1394ad0cdc35528f66ff20faddcee1636350252 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Fri, 21 Mar 2025 09:16:36 -0600 Subject: [PATCH 54/54] improved logging of marked messages Fields named message are quietly dropped by the platform log package. Also, logging []byte isn't super useful. BACK-2559 --- data/events/alerts.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/data/events/alerts.go b/data/events/alerts.go index 3421c90c26..cf0da13d3f 100644 --- a/data/events/alerts.go +++ b/data/events/alerts.go @@ -77,7 +77,7 @@ func (c *Consumer) consumeAlertsConfigs(ctx context.Context, if isActivityAndActivityOnly(updatedFields) { lgr.WithField("updatedFields", updatedFields). Debug("alerts config is an activity update, will skip") - lgr.WithField("message", msg).Debug("marked") + lgr.WithField("msg.Key", string(msg.Key)).Debug("marked") session.MarkMessage(msg, "") return nil } @@ -97,7 +97,7 @@ func (c *Consumer) consumeAlertsConfigs(ctx context.Context, c.pushNotifications(ctx, notes) session.MarkMessage(msg, "") - lgr.WithField("message", msg).Debug("marked") + lgr.WithField("msg.Key", string(msg.Key)).Debug("marked") return nil } @@ -154,7 +154,7 @@ func (c *Consumer) consumeDeviceData(ctx context.Context, c.pushNotifications(ctx, notes) session.MarkMessage(msg, "") - lgr.WithField("msg", msg).Debug("marked") + lgr.WithField("msg.Key", string(msg.Key)).Debug("marked") return nil }