From 4515bd5444f205df107d6cc8c80c23391c0e6ee4 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Mon, 6 May 2024 09:53:25 -0600 Subject: [PATCH 01/15] adds List and Get methods to alerts client The Get endpoint already exists on the service, so only the List endpoint needed to be added there. BACK-2554 --- alerts/client.go | 39 +++++++++++++++++++++++++----- alerts/config.go | 1 + data/service/api/v1/alerts.go | 37 ++++++++++++++++++++++++++++ data/service/api/v1/alerts_test.go | 21 +++++++++++++--- data/store/mongo/mongo_alerts.go | 21 ++++++++++++++++ 5 files changed, 110 insertions(+), 9 deletions(-) diff --git a/alerts/client.go b/alerts/client.go index bc6db1f888..9abaaae9f3 100644 --- a/alerts/client.go +++ b/alerts/client.go @@ -9,6 +9,7 @@ import ( "github.com/tidepool-org/platform/auth" "github.com/tidepool-org/platform/client" + "github.com/tidepool-org/platform/errors" platformlog "github.com/tidepool-org/platform/log" "github.com/tidepool-org/platform/log/null" "github.com/tidepool-org/platform/platform" @@ -43,6 +44,8 @@ type PlatformClient interface { requestBody interface{}, responseBody interface{}, inspectors ...request.ResponseInspector) error } +// TokenProvider retrieves session tokens for calling the alerts API. +// // client.External is one implementation type TokenProvider interface { // ServerSessionToken provides a server-to-server API authentication token. @@ -51,12 +54,12 @@ type TokenProvider interface { // request performs common operations before passing a request off to the // underlying platform.Client. -func (c *Client) request(ctx context.Context, method, url string, body any) error { +func (c *Client) request(ctx context.Context, method, url string, reqBody, resBody any) error { // Platform's client.Client expects a logger to exist in the request's // context. If it doesn't exist, request processing will panic. loggingCtx := platformlog.NewContextWithLogger(ctx, c.logger) // Make sure the auth token is injected into the request's headers. - return c.requestWithAuth(loggingCtx, method, url, body) + return c.requestWithAuth(loggingCtx, method, url, reqBody, resBody) } // requestWithAuth injects an auth token before calling platform.Client.RequestData. @@ -65,24 +68,48 @@ func (c *Client) request(ctx context.Context, method, url string, body any) erro // platform.Client. It might be nice to be able to use a mutator, but the auth // is specifically handled by the platform.Client via the context field, and // if left blank, platform.Client errors. -func (c *Client) requestWithAuth(ctx context.Context, method, url string, body any) error { +func (c *Client) requestWithAuth(ctx context.Context, method, url string, reqBody, resBody any) error { authCtx, err := c.ctxWithAuth(ctx) if err != nil { return err } - return c.client.RequestData(authCtx, method, url, nil, body, nil) + return c.client.RequestData(authCtx, method, url, nil, reqBody, resBody) } // Upsert updates cfg if it exists or creates it if it doesn't. func (c *Client) Upsert(ctx context.Context, cfg *Config) error { url := c.client.ConstructURL("v1", "users", cfg.FollowedUserID, "followers", cfg.UserID, "alerts") - return c.request(ctx, http.MethodPost, url, cfg) + return c.request(ctx, http.MethodPost, url, cfg, nil) } // Delete the alerts config. func (c *Client) Delete(ctx context.Context, cfg *Config) error { url := c.client.ConstructURL("v1", "users", cfg.FollowedUserID, "followers", cfg.UserID, "alerts") - return c.request(ctx, http.MethodDelete, url, nil) + return c.request(ctx, http.MethodDelete, url, nil, nil) +} + +// Get a user's alerts configuration for the followed user. +func (c *Client) Get(ctx context.Context, followedUserID, userID string) (*Config, error) { + url := c.client.ConstructURL("v1", "users", followedUserID, "followers", userID, "alerts") + cfg := &Config{} + err := c.request(ctx, http.MethodGet, url, nil, cfg) + if err != nil { + return nil, errors.Wrap(err, "Unable to request alerts config") + } + return cfg, nil +} + +// List the alerts configurations that follow the given user. +// +// This method should only be called via an authenticated service session. +func (c *Client) List(ctx context.Context, followedUserID string) ([]*Config, error) { + url := c.client.ConstructURL("v1", "users", followedUserID, "followers", "alerts") + configs := []*Config{} + err := c.request(ctx, http.MethodGet, url, nil, &configs) + if err != nil { + return nil, errors.Wrap(err, "Unable to request alerts configs list") + } + return configs, nil } // ctxWithAuth injects a server session token into the context. diff --git a/alerts/config.go b/alerts/config.go index 67f2b1d72c..d9931b0f9a 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -239,6 +239,7 @@ type Repository interface { Get(ctx context.Context, conf *Config) (*Config, error) Upsert(ctx context.Context, conf *Config) error Delete(ctx context.Context, conf *Config) error + List(ctx context.Context, userID string) ([]*Config, error) EnsureIndexes() error } diff --git a/data/service/api/v1/alerts.go b/data/service/api/v1/alerts.go index d07891247e..70941b9e20 100644 --- a/data/service/api/v1/alerts.go +++ b/data/service/api/v1/alerts.go @@ -24,6 +24,7 @@ func AlertsRoutes() []service.Route { service.Get("/v1/users/:userId/followers/:followerUserId/alerts", GetAlert, api.RequireAuth), service.Post("/v1/users/:userId/followers/:followerUserId/alerts", UpsertAlert, api.RequireAuth), service.Delete("/v1/users/:userId/followers/:followerUserId/alerts", DeleteAlert, api.RequireAuth), + service.Get("/v1/users/:userId/followers/alerts", ListAlerts, api.RequireServer), } } @@ -134,6 +135,42 @@ func UpsertAlert(dCtx service.Context) { } } +func ListAlerts(dCtx service.Context) { + r := dCtx.Request() + ctx := r.Context() + authDetails := request.GetAuthDetails(ctx) + repo := dCtx.AlertsRepository() + lgr := log.LoggerFromContext(ctx) + + if err := checkAuthentication(authDetails); err != nil { + lgr.Debug("authentication failed") + dCtx.RespondWithError(platform.ErrorUnauthorized()) + return + } + + pathsUserID := r.PathParam("userId") + if err := checkUserIDConsistency(authDetails, pathsUserID); err != nil { + lgr.WithFields(log.Fields{"path": pathsUserID, "auth": authDetails.UserID()}). + Debug("user id consistency failed") + dCtx.RespondWithError(platform.ErrorUnauthorized()) + return + } + + alerts, err := repo.List(ctx, pathsUserID) + if err != nil { + dCtx.RespondWithInternalServerFailure("listing alerts configs", err) + lgr.WithError(err).Error("listing alerts config") + return + } + if len(alerts) == 0 { + dCtx.RespondWithError(ErrorUserIDNotFound(pathsUserID)) + lgr.Debug("no alerts configs found") + } + + responder := request.MustNewResponder(dCtx.Response(), r) + responder.Data(http.StatusOK, alerts) +} + // checkUserIDConsistency verifies the userIDs in a request. // // For safety reasons, if these values don't agree, return an error. diff --git a/data/service/api/v1/alerts_test.go b/data/service/api/v1/alerts_test.go index c3b4b2f2a5..d48be38a6f 100644 --- a/data/service/api/v1/alerts_test.go +++ b/data/service/api/v1/alerts_test.go @@ -160,12 +160,15 @@ var _ = Describe("Alerts endpoints", func() { }) type mockRepo struct { - UserID string - Error error + UserID string + Error error + AlertsForUserID map[string][]*alerts.Config } func newMockRepo() *mockRepo { - return &mockRepo{} + return &mockRepo{ + AlertsForUserID: make(map[string][]*alerts.Config), + } } func (r *mockRepo) ReturnsError(err error) { @@ -202,6 +205,18 @@ func (r *mockRepo) Delete(ctx context.Context, conf *alerts.Config) error { return nil } +func (r *mockRepo) List(ctx context.Context, userID string) ([]*alerts.Config, error) { + if r.Error != nil { + return nil, r.Error + } + r.UserID = userID + alerts, ok := r.AlertsForUserID[userID] + if !ok { + return nil, nil + } + return alerts, nil +} + func (r *mockRepo) EnsureIndexes() error { return nil } diff --git a/data/store/mongo/mongo_alerts.go b/data/store/mongo/mongo_alerts.go index ee313f3ffb..489db755fe 100644 --- a/data/store/mongo/mongo_alerts.go +++ b/data/store/mongo/mongo_alerts.go @@ -9,6 +9,7 @@ import ( "go.mongodb.org/mongo-driver/mongo/options" "github.com/tidepool-org/platform/alerts" + "github.com/tidepool-org/platform/errors" structuredmongo "github.com/tidepool-org/platform/store/structured/mongo" ) @@ -34,6 +35,26 @@ func (r *alertsRepo) Delete(ctx context.Context, cfg *alerts.Config) error { return nil } +// List will retrieve any Configs that are defined by followers of the given user. +func (r *alertsRepo) List(ctx context.Context, followedUserID string) ([]*alerts.Config, error) { + filter := bson.D{ + {Key: "followedUserId", Value: followedUserID}, + } + cursor, err := r.Find(ctx, filter, nil) + if err != nil { + return nil, errors.Wrapf(err, "Unable to list alerts.Config(s) for followed user %s", followedUserID) + } + defer cursor.Close(ctx) + out := []*alerts.Config{} + if err := cursor.All(ctx, &out); err != nil { + return nil, errors.Wrapf(err, "Unable to decode alerts.Config(s) for followed user %s", followedUserID) + } + if err := cursor.Err(); err != nil { + return nil, errors.Wrapf(err, "Unexpected error for followed user %s", followedUserID) + } + return out, nil +} + // Get will retrieve the given Config. func (r *alertsRepo) Get(ctx context.Context, cfg *alerts.Config) (*alerts.Config, error) { res := r.FindOne(ctx, r.filter(cfg), nil) From 5775f560fcbacd3a4087c29848d972ea642acb51 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Mon, 6 May 2024 10:22:02 -0600 Subject: [PATCH 02/15] lift Repeat out of the base alert config Through discussions it was confirmed that Repeat is not universal to all alerts. So it's lifted out of the Base alert and re-inserted into those alerts where it should be present (namely Low and High alerts only). BACK-2554 --- alerts/config.go | 28 ++++++----- alerts/config_test.go | 106 ++++++++++++++++++++++-------------------- 2 files changed, 72 insertions(+), 62 deletions(-) diff --git a/alerts/config.go b/alerts/config.go index d9931b0f9a..b83cf2b25f 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -72,16 +72,10 @@ func (a Alerts) Validate(validator structure.Validator) { type Base struct { // Enabled controls whether notifications should be sent for this alert. Enabled bool `json:"enabled" bson:"enabled"` - // Repeat is measured in minutes. - // - // A value of 0 (the default) disables repeat notifications. - Repeat DurationMinutes `json:"repeat,omitempty" bson:"repeat"` } func (b Base) Validate(validator structure.Validator) { validator.Bool("enabled", &b.Enabled) - dur := b.Repeat.Duration() - validator.Duration("repeat", &dur).Using(validateRepeat) } const ( @@ -110,7 +104,7 @@ type UrgentLowAlert struct { Base `bson:",inline"` // Threshold is compared the current value to determine if an alert should // be triggered. - Threshold `json:"threshold"` + Threshold `json:"threshold" bson:"threshold"` } func (a UrgentLowAlert) Validate(validator structure.Validator) { @@ -149,13 +143,19 @@ type LowAlert struct { // be triggered. Threshold `json:"threshold"` Delay DurationMinutes `json:"delay,omitempty"` + // Repeat is measured in minutes. + // + // A value of 0 (the default) disables repeat notifications. + Repeat DurationMinutes `json:"repeat,omitempty" bson:"repeat"` } func (a LowAlert) Validate(validator structure.Validator) { a.Base.Validate(validator) - dur := a.Delay.Duration() - validator.Duration("delay", &dur).InRange(0, 2*time.Hour) + delayDur := a.Delay.Duration() + validator.Duration("delay", &delayDur).InRange(0, 2*time.Hour) a.Threshold.Validate(validator) + repeatDur := a.Repeat.Duration() + validator.Duration("repeat", &repeatDur).Using(validateRepeat) } // HighAlert extends Base with a threshold and a delay. @@ -165,13 +165,19 @@ type HighAlert struct { // be triggered. Threshold `json:"threshold"` Delay DurationMinutes `json:"delay,omitempty"` + // Repeat is measured in minutes. + // + // A value of 0 (the default) disables repeat notifications. + Repeat DurationMinutes `json:"repeat,omitempty" bson:"repeat"` } func (a HighAlert) Validate(validator structure.Validator) { a.Base.Validate(validator) a.Threshold.Validate(validator) - dur := a.Delay.Duration() - validator.Duration("delay", &dur).InRange(0, 6*time.Hour) + delayDur := a.Delay.Duration() + validator.Duration("delay", &delayDur).InRange(0, 6*time.Hour) + repeatDur := a.Repeat.Duration() + validator.Duration("repeat", &repeatDur).Using(validateRepeat) } // DurationMinutes reads a JSON integer and converts it to a time.Duration. diff --git a/alerts/config_test.go b/alerts/config_test.go index 5cfa983643..8fc6e0240a 100644 --- a/alerts/config_test.go +++ b/alerts/config_test.go @@ -43,7 +43,6 @@ var _ = Describe("Config", func() { }, "urgentLow": { "enabled": false, - "repeat": 30, "threshold": { "units": "mg/dL", "value": 47.5 @@ -60,12 +59,10 @@ var _ = Describe("Config", func() { }, "notLooping": { "enabled": true, - "repeat": 32, "delay": 4 }, "noCommunication": { "enabled": true, - "repeat": 33, "delay": 6 } }`, mockUserID1, mockUserID2, mockUploadID) @@ -86,14 +83,11 @@ var _ = Describe("Config", func() { Expect(conf.Low.Threshold.Value).To(Equal(80.0)) Expect(conf.Low.Threshold.Units).To(Equal(glucose.MgdL)) Expect(conf.UrgentLow.Enabled).To(Equal(false)) - Expect(conf.UrgentLow.Repeat).To(Equal(DurationMinutes(30 * time.Minute))) Expect(conf.UrgentLow.Threshold.Value).To(Equal(47.5)) Expect(conf.UrgentLow.Threshold.Units).To(Equal(glucose.MgdL)) Expect(conf.NotLooping.Enabled).To(Equal(true)) - Expect(conf.NotLooping.Repeat).To(Equal(DurationMinutes(32 * time.Minute))) Expect(conf.NotLooping.Delay).To(Equal(DurationMinutes(4 * time.Minute))) Expect(conf.NoCommunication.Enabled).To(Equal(true)) - Expect(conf.NoCommunication.Repeat).To(Equal(DurationMinutes(33 * time.Minute))) Expect(conf.NoCommunication.Delay).To(Equal(DurationMinutes(6 * time.Minute))) }) @@ -322,32 +316,41 @@ var _ = Describe("Config", func() { }) Context("repeat", func() { + var defaultAlert = LowAlert{ + Threshold: Threshold{Value: 11, Units: glucose.MmolL}, + } + It("accepts values of 0 (indicating disabled)", func() { val := validator.New() - b := Base{Repeat: 0} - b.Validate(val) + l := defaultAlert + l.Repeat = 0 + l.Validate(val) Expect(val.Error()).To(Succeed()) }) It("accepts values of 15 minutes to 4 hours (inclusive)", func() { val := validator.New() - b := Base{Repeat: DurationMinutes(15 * time.Minute)} - b.Validate(val) + l := defaultAlert + l.Repeat = DurationMinutes(15 * time.Minute) + l.Validate(val) Expect(val.Error()).To(Succeed()) val = validator.New() - b = Base{Repeat: DurationMinutes(4 * time.Hour)} - b.Validate(val) + l = defaultAlert + l.Repeat = DurationMinutes(4 * time.Hour) + l.Validate(val) Expect(val.Error()).To(Succeed()) val = validator.New() - b = Base{Repeat: DurationMinutes(4*time.Hour + 1)} - b.Validate(val) + l = defaultAlert + l.Repeat = DurationMinutes(4*time.Hour + 1) + l.Validate(val) Expect(val.Error()).NotTo(Succeed()) val = validator.New() - b = Base{Repeat: DurationMinutes(15*time.Minute - 1)} - b.Validate(val) + l = defaultAlert + l.Repeat = DurationMinutes(15*time.Minute - 1) + l.Validate(val) Expect(val.Error()).NotTo(Succeed()) }) }) @@ -359,67 +362,68 @@ var _ = Describe("Config", func() { err := request.DecodeObject(nil, buf, threshold) Expect(err).To(MatchError("json is malformed")) }) - It("validates repeat minutes (negative)", func() { + }) + + Context("low", func() { + It("accepts a blank repeat", func() { buf := buff(`{ "userId": "%s", "followedUserId": "%s", "uploadId": "%s", - "urgentLow": { - "enabled": false, - "repeat": -11, + "low": { + "enabled": true, + "delay": 10, "threshold": { - "units": "%s", - "value": 47.5 + "units": "mg/dL", + "value": 80 } } -}`, mockUserID1, mockUserID2, mockUploadID, glucose.MgdL) - cfg := &Config{} - err := request.DecodeObject(nil, buf, cfg) - Expect(err).To(MatchError("value -11m0s is not greater than or equal to 15m0s")) +}`, mockUserID1, mockUserID2, mockUploadID) + conf := &Config{} + err := request.DecodeObject(nil, buf, conf) + Expect(err).To(Succeed()) + Expect(conf.Low.Repeat).To(Equal(DurationMinutes(0))) }) - It("validates repeat minutes (string)", func() { - buf := buff(`{ + }) + It("validates repeat minutes (negative)", func() { + buf := buff(`{ "userId": "%s", "followedUserId": "%s", - "urgentLow": { + "uploadId": "%s", + "low": { "enabled": false, - "repeat": "a", + "repeat": -11, "threshold": { "units": "%s", - "value": 1 + "value": 47.5 } } -}`, mockUserID1, mockUserID2, glucose.MgdL) - cfg := &Config{} - err := request.DecodeObject(nil, buf, cfg) - Expect(err).To(MatchError("json is malformed")) - }) +}`, mockUserID1, mockUserID2, mockUploadID, glucose.MgdL) + cfg := &Config{} + err := request.DecodeObject(nil, buf, cfg) + Expect(err).To(MatchError("value -11m0s is not greater than or equal to 15m0s")) }) - - Context("low", func() { - It("accepts a blank repeat", func() { - buf := buff(`{ + It("validates repeat minutes (string)", func() { + buf := buff(`{ "userId": "%s", "followedUserId": "%s", "uploadId": "%s", "low": { - "enabled": true, - "delay": 10, + "enabled": false, + "repeat": "a", "threshold": { - "units": "mg/dL", - "value": 80 + "units": "%s", + "value": 1 } } -}`, mockUserID1, mockUserID2, mockUploadID) - conf := &Config{} - err := request.DecodeObject(nil, buf, conf) - Expect(err).To(Succeed()) - Expect(conf.Low.Repeat).To(Equal(DurationMinutes(0))) - }) +}`, mockUserID1, mockUserID2, mockUploadID, glucose.MgdL) + cfg := &Config{} + err := request.DecodeObject(nil, buf, cfg) + Expect(err).To(MatchError("json is malformed")) }) }) -var _ = Describe("Duration", func() { +var _ = Describe("DurationMinutes", func() { It("parses 42", func() { d := DurationMinutes(0) err := d.UnmarshalJSON([]byte(`42`)) From cdda45800967453a34b519ecd431b59aaa3c638b Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Mon, 6 May 2024 12:46:40 -0600 Subject: [PATCH 03/15] adds activity tracking to alert configurations These activity properties will track the times at which alerts were sent, resolved, or acknowledged. BACK-2554 --- alerts/config.go | 20 ++++++++++++++++++ alerts/config_test.go | 48 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+) diff --git a/alerts/config.go b/alerts/config.go index b83cf2b25f..b6f8334656 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -72,12 +72,32 @@ func (a Alerts) Validate(validator structure.Validator) { type Base struct { // Enabled controls whether notifications should be sent for this alert. Enabled bool `json:"enabled" bson:"enabled"` + + // Activity tracks when events related to the alert occurred. + Activity `json:"-" bson:"activity,omitempty"` } func (b Base) Validate(validator structure.Validator) { validator.Bool("enabled", &b.Enabled) } +type Activity struct { + // Triggered records the last time this alert was triggered. + Triggered time.Time `json:"triggered" bson:"triggered"` + // Sent records the last time this alert was sent. + Sent time.Time `json:"sent" bson:"sent"` + // Resolved records the last time this alert was resolved. + Resolved time.Time `json:"resolved" bson:"resolved"` +} + +func (a Activity) IsActive() bool { + return a.Triggered.After(a.Resolved) +} + +func (a Activity) IsSent() bool { + return a.Sent.After(a.Triggered) +} + const ( // RepeatMin is the minimum duration for a repeat setting (if not 0). RepeatMin = 15 * time.Minute diff --git a/alerts/config_test.go b/alerts/config_test.go index 8fc6e0240a..1d17b5a852 100644 --- a/alerts/config_test.go +++ b/alerts/config_test.go @@ -125,6 +125,54 @@ var _ = Describe("Config", func() { }) }) + Context("Base", func() { + Context("Activity", func() { + Context("IsActive()", func() { + It("is true", func() { + triggered := time.Now() + resolved := triggered.Add(-time.Nanosecond) + a := Activity{ + Triggered: triggered, + Resolved: resolved, + } + Expect(a.IsActive()).To(BeTrue()) + }) + + It("is false", func() { + triggered := time.Now() + resolved := triggered.Add(time.Nanosecond) + a := Activity{ + Triggered: triggered, + Resolved: resolved, + } + Expect(a.IsActive()).To(BeFalse()) + }) + }) + + Context("IsSent()", func() { + It("is true", func() { + triggered := time.Now() + sent := triggered.Add(time.Nanosecond) + a := Activity{ + Triggered: triggered, + Sent: sent, + } + Expect(a.IsSent()).To(BeTrue()) + }) + + It("is false", func() { + triggered := time.Now() + notified := triggered.Add(-time.Nanosecond) + a := Activity{ + Triggered: triggered, + Sent: notified, + } + Expect(a.IsSent()).To(BeFalse()) + }) + }) + }) + }) + Context("UrgentLowAlert", func() { Context("Threshold", func() { It("accepts values between 0 and 1000 mg/dL", func() { From f10bf8448ddb59014059c75982d87649d42b4ce7 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Tue, 7 May 2024 08:10:09 -0600 Subject: [PATCH 04/15] adds auth endpoint to retrieve a user's device tokens This endpoint will be used by upcoming changes to the auth client to allow care partner backend processes to retrieve device tokens in order to send mobile device push notifications. BACK-2554 --- auth/service/api/v1/devicetokens.go | 25 ++++ auth/service/api/v1/devicetokens_test.go | 67 ++++++++- auth/service/service/client.go | 14 ++ auth/service/service/client_test.go | 143 +++++++++++++++++++ auth/store/mongo/device_tokens_repository.go | 14 ++ auth/store/test/device_token_repository.go | 12 ++ devicetokens/devicetokens.go | 1 + 7 files changed, 274 insertions(+), 2 deletions(-) diff --git a/auth/service/api/v1/devicetokens.go b/auth/service/api/v1/devicetokens.go index c19c654343..99d6b2ede1 100644 --- a/auth/service/api/v1/devicetokens.go +++ b/auth/service/api/v1/devicetokens.go @@ -13,6 +13,7 @@ import ( func (r *Router) DeviceTokensRoutes() []*rest.Route { return []*rest.Route{ rest.Post("/v1/users/:userId/device_tokens", api.RequireUser(r.UpsertDeviceToken)), + rest.Get("/v1/users/:userId/device_tokens", api.RequireAuth(r.GetDeviceTokens)), } } @@ -39,3 +40,27 @@ func (r *Router) UpsertDeviceToken(res rest.ResponseWriter, req *rest.Request) { return } } + +func (r *Router) GetDeviceTokens(res rest.ResponseWriter, req *rest.Request) { + responder := request.MustNewResponder(res, req) + ctx := req.Request.Context() + authDetails := request.GetAuthDetails(ctx) + repo := r.AuthStore().NewDeviceTokenRepository() + userID := req.PathParam("userId") + + if userID != authDetails.UserID() && !authDetails.IsService() { + responder.Error(http.StatusForbidden, request.ErrorUnauthorized()) + return + } + + docs, err := repo.GetAllByUserID(ctx, userID) + if err != nil { + responder.Error(http.StatusInternalServerError, err) + return + } + tokens := make([]devicetokens.DeviceToken, 0, len(docs)) + for _, doc := range docs { + tokens = append(tokens, doc.DeviceToken) + } + responder.Data(http.StatusOK, tokens) +} diff --git a/auth/service/api/v1/devicetokens_test.go b/auth/service/api/v1/devicetokens_test.go index 1033b7cc9c..ef61df746e 100644 --- a/auth/service/api/v1/devicetokens_test.go +++ b/auth/service/api/v1/devicetokens_test.go @@ -3,6 +3,7 @@ package v1 import ( "bytes" "context" + "encoding/json" "fmt" "io" "net/http" @@ -12,14 +13,18 @@ import ( . "github.com/onsi/gomega" serviceTest "github.com/tidepool-org/platform/auth/service/test" + storetest "github.com/tidepool-org/platform/auth/store/test" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/request" "github.com/tidepool-org/platform/service/test" ) var _ = Describe("Device tokens endpoints", func() { var rtr *Router + var svc *serviceTest.Service + BeforeEach(func() { - svc := serviceTest.NewService() + svc = serviceTest.NewService() var err error rtr, err = NewRouter(svc) Expect(err).ToNot(HaveOccurred()) @@ -66,6 +71,65 @@ var _ = Describe("Device tokens endpoints", func() { }) + Describe("List", func() { + It("succeeds with valid input", func() { + res := test.NewMockRestResponseWriter() + req := newDeviceTokensTestRequest(nil, nil, "") + + rtr.GetDeviceTokens(res, req) + + Expect(res.Code).To(Equal(http.StatusOK)) + }) + + It("rejects non-service users", func() { + svcDetails := test.NewMockAuthDetails(request.MethodAccessToken, "test-user", test.TestToken2) + req := newDeviceTokensTestRequest(svcDetails, nil, "") + res := test.NewMockRestResponseWriter() + + rtr.GetDeviceTokens(res, req) + + Expect(res.Code).To(Equal(http.StatusForbidden)) + }) + + It("may return multiple documents", func() { + repo := &storetest.DeviceTokenRepository{ + Documents: []*devicetokens.Document{ + { + DeviceToken: devicetokens.DeviceToken{}, + }, + { + DeviceToken: devicetokens.DeviceToken{}, + }, + }, + } + raw := rtr.Service.AuthStore().(*storetest.Store) + raw.NewDeviceTokenRepositoryImpl = repo + res := test.NewMockRestResponseWriter() + req := newDeviceTokensTestRequest(nil, nil, "") + + rtr.GetDeviceTokens(res, req) + + Expect(res.Code).To(Equal(http.StatusOK)) + got := []*devicetokens.DeviceToken{} + err := json.Unmarshal(res.Body.Bytes(), &got) + Expect(err).To(Succeed()) + Expect(got).To(HaveLen(2)) + }) + + It("handles repository errors", func() { + repo := &storetest.DeviceTokenRepository{ + Error: fmt.Errorf("test error"), + } + raw := rtr.Service.AuthStore().(*storetest.Store) + raw.NewDeviceTokenRepositoryImpl = repo + res := test.NewMockRestResponseWriter() + req := newDeviceTokensTestRequest(nil, nil, "") + + rtr.GetDeviceTokens(res, req) + + Expect(res.Code).To(Equal(http.StatusInternalServerError)) + }) + }) }) func buff(template string, args ...any) *bytes.Buffer { @@ -91,5 +155,4 @@ func newDeviceTokensTestRequest(auth request.AuthDetails, body io.Reader, userID Request: httpReq, PathParams: map[string]string{"userId": userIDFromPath}, } - } diff --git a/auth/service/service/client.go b/auth/service/service/client.go index e8a973a866..ed6ab61873 100644 --- a/auth/service/service/client.go +++ b/auth/service/service/client.go @@ -6,6 +6,7 @@ import ( "github.com/tidepool-org/platform/auth" "github.com/tidepool-org/platform/auth/client" authStore "github.com/tidepool-org/platform/auth/store" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" "github.com/tidepool-org/platform/page" @@ -116,6 +117,19 @@ func (c *Client) deleteProviderSession(ctx context.Context, repository authStore } } +func (c *Client) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + repo := c.authStore.NewDeviceTokenRepository() + docs, err := repo.GetAllByUserID(ctx, userID) + if err != nil { + return nil, err + } + tokens := make([]*devicetokens.DeviceToken, 0, len(docs)) + for _, doc := range docs { + tokens = append(tokens, &doc.DeviceToken) + } + return tokens, nil +} + func (c *Client) GetProviderSession(ctx context.Context, id string) (*auth.ProviderSession, error) { repository := c.authStore.NewProviderSessionRepository() return repository.GetProviderSession(ctx, id) diff --git a/auth/service/service/client_test.go b/auth/service/service/client_test.go index 9a8a94e85d..a08ccb978f 100644 --- a/auth/service/service/client_test.go +++ b/auth/service/service/client_test.go @@ -1,8 +1,151 @@ package service_test import ( + "context" + "fmt" + "time" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + . "github.com/onsi/gomega/ghttp" + + "github.com/tidepool-org/platform/auth/client" + "github.com/tidepool-org/platform/auth/service/service" + "github.com/tidepool-org/platform/auth/store" + platformclient "github.com/tidepool-org/platform/client" + "github.com/tidepool-org/platform/devicetokens" + logtest "github.com/tidepool-org/platform/log/test" + "github.com/tidepool-org/platform/platform" + "github.com/tidepool-org/platform/provider" ) var _ = Describe("Client", func() { + var testUserID = "test-user-id" + var testDeviceToken1 = &devicetokens.DeviceToken{ + Apple: &devicetokens.AppleDeviceToken{ + Token: []byte("test"), + Environment: "sandbox", + }, + } + + newTestServiceClient := func(url string, authStore store.Store) *service.Client { + var err error + extCfg := &client.ExternalConfig{ + Config: &platform.Config{ + Config: &platformclient.Config{ + Address: url, + UserAgent: "test", + }, + ServiceSecret: "", + }, + ServerSessionTokenSecret: "test token", + ServerSessionTokenTimeout: time.Minute, + } + authAs := platform.AuthorizeAsService + name := "test auth client" + logger := logtest.NewLogger() + if authStore == nil { + authStore = &mockAuthStore{ + DeviceTokenRepository: &mockDeviceTokenRepository{ + Tokens: map[string][]*devicetokens.DeviceToken{ + testUserID: { + testDeviceToken1, + }, + }, + }, + } + } + providerFactory := &mockProviderFactory{} + serviceClient, err := service.NewClient(extCfg, authAs, name, logger, authStore, providerFactory) + Expect(err).To(Succeed()) + return serviceClient + } + + Describe("GetDeviceTokens", func() { + It("returns a slice of tokens", func() { + ctx := context.Background() + server := NewServer() + defer server.Close() + serviceClient := newTestServiceClient(server.URL(), nil) + + tokens, err := serviceClient.GetDeviceTokens(ctx, testUserID) + + Expect(err).To(Succeed()) + Expect(tokens).To(HaveLen(1)) + Expect(tokens[0]).To(Equal(testDeviceToken1)) + }) + + It("handles errors from the underlying repo", func() { + ctx := context.Background() + server := NewServer() + defer server.Close() + authStore := &mockAuthStore{ + DeviceTokenRepository: &mockDeviceTokenRepository{ + Error: fmt.Errorf("test error"), + }, + } + serviceClient := newTestServiceClient(server.URL(), authStore) + + _, err := serviceClient.GetDeviceTokens(ctx, testUserID) + + Expect(err).To(HaveOccurred()) + }) + }) }) + +type mockAuthStore struct { + store.DeviceTokenRepository +} + +func (s *mockAuthStore) NewProviderSessionRepository() store.ProviderSessionRepository { + return nil +} + +func (s *mockAuthStore) NewRestrictedTokenRepository() store.RestrictedTokenRepository { + return nil +} + +func (s *mockAuthStore) NewDeviceTokenRepository() store.DeviceTokenRepository { + return s.DeviceTokenRepository +} + +type mockProviderFactory struct{} + +func (f *mockProviderFactory) Get(typ string, name string) (provider.Provider, error) { + return nil, nil +} + +type mockDeviceTokenRepository struct { + Error error + Tokens map[string][]*devicetokens.DeviceToken +} + +func (r *mockDeviceTokenRepository) GetAllByUserID(ctx context.Context, userID string) ([]*devicetokens.Document, error) { + if r.Error != nil { + return nil, r.Error + } + + if tokens, ok := r.Tokens[userID]; ok { + docs := make([]*devicetokens.Document, 0, len(tokens)) + for _, token := range tokens { + docs = append(docs, &devicetokens.Document{DeviceToken: *token}) + } + return docs, nil + } + return nil, nil +} + +func (r *mockDeviceTokenRepository) Upsert(ctx context.Context, doc *devicetokens.Document) error { + if r.Error != nil { + return r.Error + } + return nil +} + +func (r *mockDeviceTokenRepository) EnsureIndexes() error { + if r.Error != nil { + return r.Error + } + return nil +} diff --git a/auth/store/mongo/device_tokens_repository.go b/auth/store/mongo/device_tokens_repository.go index 4a257ca9f0..d338c27ea3 100644 --- a/auth/store/mongo/device_tokens_repository.go +++ b/auth/store/mongo/device_tokens_repository.go @@ -16,6 +16,20 @@ import ( // MongoDB collection. type deviceTokenRepo structuredmongo.Repository +func (r *deviceTokenRepo) GetAllByUserID(ctx context.Context, userID string) ([]*devicetokens.Document, error) { + f := bson.M{"userId": userID} + cursor, err := r.Find(ctx, f, nil) + if err != nil { + return nil, err + } + defer cursor.Close(ctx) + docs := make([]*devicetokens.Document, 0, cursor.RemainingBatchLength()) + if err := cursor.All(ctx, &docs); err != nil { + return nil, err + } + return docs, nil +} + // Upsert will create or update the given Config. func (r *deviceTokenRepo) Upsert(ctx context.Context, doc *devicetokens.Document) error { // The presence of UserID and TokenID should be enforced with a mongodb diff --git a/auth/store/test/device_token_repository.go b/auth/store/test/device_token_repository.go index 4847596895..dbb40d9200 100644 --- a/auth/store/test/device_token_repository.go +++ b/auth/store/test/device_token_repository.go @@ -9,6 +9,8 @@ import ( type DeviceTokenRepository struct { *authTest.DeviceTokenAccessor + Documents []*devicetokens.Document + Error error } func NewDeviceTokenRepository() *DeviceTokenRepository { @@ -21,6 +23,16 @@ func (r *DeviceTokenRepository) Expectations() { r.DeviceTokenAccessor.Expectations() } +func (r *DeviceTokenRepository) GetAllByUserID(ctx context.Context, userID string) ([]*devicetokens.Document, error) { + if r.Error != nil { + return nil, r.Error + } + if len(r.Documents) > 0 { + return r.Documents, nil + } + return nil, nil +} + func (r *DeviceTokenRepository) Upsert(ctx context.Context, doc *devicetokens.Document) error { return nil } diff --git a/devicetokens/devicetokens.go b/devicetokens/devicetokens.go index a8fb790a3d..721f110653 100644 --- a/devicetokens/devicetokens.go +++ b/devicetokens/devicetokens.go @@ -100,6 +100,7 @@ type AppleBlob []byte // Repository abstracts persistent storage for Token data. type Repository interface { + GetAllByUserID(ctx context.Context, userID string) ([]*Document, error) Upsert(ctx context.Context, doc *Document) error EnsureIndexes() error From fdce5dcb383d6cb387f699c1df070849e363164a Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Tue, 7 May 2024 08:45:22 -0600 Subject: [PATCH 05/15] adds the ability to retrieve device tokens to the auth client This functionality will be used by care partner processes to retrieve device tokens in order to send mobile device push notifications in response to care partner alerts being triggered. BACK-2554 --- auth/auth.go | 8 +++++ auth/client/client.go | 20 ++++++++++++ auth/client/client_test.go | 60 ++++++++++++++++++++++++++++++++++ auth/test/client.go | 2 ++ auth/test/external_accessor.go | 9 +++++ auth/test/mock.go | 54 ++++++++++++++++++++++++++++++ 6 files changed, 153 insertions(+) diff --git a/auth/auth.go b/auth/auth.go index 1bd548170b..2f085843a7 100644 --- a/auth/auth.go +++ b/auth/auth.go @@ -3,6 +3,7 @@ package auth import ( "context" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/request" ) @@ -18,6 +19,7 @@ type Client interface { ProviderSessionAccessor RestrictedTokenAccessor ExternalAccessor + DeviceTokensClient } type ExternalAccessor interface { @@ -47,3 +49,9 @@ func ServerSessionTokenFromContext(ctx context.Context) string { } return "" } + +// DeviceTokensClient provides access to the tokens used to authenticate +// mobile device push notifications. +type DeviceTokensClient interface { + GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) +} diff --git a/auth/client/client.go b/auth/client/client.go index fd92f6efbf..82ed198e20 100644 --- a/auth/client/client.go +++ b/auth/client/client.go @@ -5,6 +5,7 @@ import ( "net/http" "github.com/tidepool-org/platform/auth" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" "github.com/tidepool-org/platform/page" @@ -308,6 +309,25 @@ func (c *Client) DeleteRestrictedToken(ctx context.Context, id string) error { return c.client.RequestData(ctx, http.MethodDelete, url, nil, nil, nil) } +// GetDeviceTokens belonging to a given user. +func (c *Client) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + ctx = log.NewContextWithLogger(ctx, c.logger) + if c.client.IsAuthorizeAsService() { + serverSessionToken, err := c.ServerSessionToken() + if err != nil { + return nil, err + } + ctx = auth.NewContextWithServerSessionToken(ctx, serverSessionToken) + } + url := c.client.ConstructURL("v1", "users", userID, "device_tokens") + tokens := []*devicetokens.DeviceToken{} + err := c.client.RequestData(ctx, http.MethodGet, url, nil, nil, &tokens) + if err != nil { + return nil, errors.Wrap(err, "Unable to request device token data") + } + return tokens, nil +} + type ConfigLoader interface { Load(*Config) error } diff --git a/auth/client/client_test.go b/auth/client/client_test.go index 76fdad9723..b01f89a4f2 100644 --- a/auth/client/client_test.go +++ b/auth/client/client_test.go @@ -2,6 +2,7 @@ package client_test import ( "context" + "encoding/json" "net/http" "time" @@ -12,6 +13,7 @@ import ( "github.com/tidepool-org/platform/auth" authClient "github.com/tidepool-org/platform/auth/client" authTest "github.com/tidepool-org/platform/auth/test" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/errors" errorsTest "github.com/tidepool-org/platform/errors/test" "github.com/tidepool-org/platform/log" @@ -461,6 +463,64 @@ var _ = Describe("Client", func() { }) }) }) + + Describe("GetDeviceTokens", func() { + var testUserID = "test-user-id" + var testUserIDBadResponse = "test-user-id-bad-response" + var testTokens = map[string]any{ + testUserID: []*devicetokens.DeviceToken{{ + Apple: &devicetokens.AppleDeviceToken{ + Token: []byte("blah"), + Environment: "sandbox", + }, + }}, + testUserIDBadResponse: []map[string]any{ + { + "Apple": "", + }, + }, + } + + It("returns a token", func() { + body, err := json.Marshal(testTokens[testUserID]) + Expect(err).To(Succeed()) + server.AppendHandlers( + CombineHandlers( + VerifyRequest("GET", "/v1/users/"+testUserID+"/device_tokens"), + RespondWith(http.StatusOK, body)), + ) + + tokens, err := client.GetDeviceTokens(ctx, testUserID) + Expect(err).To(Succeed()) + Expect(tokens).To(HaveLen(1)) + Expect([]byte(tokens[0].Apple.Token)).To(Equal([]byte("blah"))) + Expect(tokens[0].Apple.Environment).To(Equal("sandbox")) + }) + + It("returns an error when receiving malformed responses", func() { + body, err := json.Marshal(testTokens[testUserIDBadResponse]) + Expect(err).To(Succeed()) + server.AppendHandlers( + CombineHandlers( + VerifyRequest("GET", "/v1/users/"+testUserIDBadResponse+"/device_tokens"), + RespondWith(http.StatusOK, body)), + ) + + _, err = client.GetDeviceTokens(ctx, testUserIDBadResponse) + Expect(err).To(HaveOccurred()) + }) + + It("returns an error on non-200 responses", func() { + server.AppendHandlers( + CombineHandlers( + VerifyRequest("GET", "/v1/users/"+testUserID+"/device_tokens"), + RespondWith(http.StatusBadRequest, nil)), + ) + _, err := client.GetDeviceTokens(ctx, testUserID) + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("Unable to request device token data"))) + }) + }) }) }) }) diff --git a/auth/test/client.go b/auth/test/client.go index e500f69d34..9fba8f4e5c 100644 --- a/auth/test/client.go +++ b/auth/test/client.go @@ -4,6 +4,7 @@ type Client struct { *ProviderSessionAccessor *RestrictedTokenAccessor *ExternalAccessor + *DeviceTokensClient } func NewClient() *Client { @@ -11,6 +12,7 @@ func NewClient() *Client { ProviderSessionAccessor: NewProviderSessionAccessor(), RestrictedTokenAccessor: NewRestrictedTokenAccessor(), ExternalAccessor: NewExternalAccessor(), + DeviceTokensClient: NewDeviceTokensClient(), } } diff --git a/auth/test/external_accessor.go b/auth/test/external_accessor.go index 6cc1ffe6dc..121c207587 100644 --- a/auth/test/external_accessor.go +++ b/auth/test/external_accessor.go @@ -3,6 +3,7 @@ package test import ( "context" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/request" ) @@ -154,3 +155,11 @@ func (e *ExternalAccessor) AssertOutputsEmpty() { panic("EnsureAuthorizedUserOutputs is not empty") } } + +func NewDeviceTokensClient() *DeviceTokensClient { return &DeviceTokensClient{} } + +type DeviceTokensClient struct{} + +func (c *DeviceTokensClient) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + return nil, nil +} diff --git a/auth/test/mock.go b/auth/test/mock.go index 055a6a994f..301bd18e75 100644 --- a/auth/test/mock.go +++ b/auth/test/mock.go @@ -11,6 +11,7 @@ import ( gomock "github.com/golang/mock/gomock" auth "github.com/tidepool-org/platform/auth" + devicetokens "github.com/tidepool-org/platform/devicetokens" page "github.com/tidepool-org/platform/page" request "github.com/tidepool-org/platform/request" ) @@ -167,6 +168,21 @@ func (mr *MockClientMockRecorder) EnsureAuthorizedUser(ctx, targetUserID, permis return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EnsureAuthorizedUser", reflect.TypeOf((*MockClient)(nil).EnsureAuthorizedUser), ctx, targetUserID, permission) } +// GetDeviceTokens mocks base method. +func (m *MockClient) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDeviceTokens", ctx, userID) + ret0, _ := ret[0].([]*devicetokens.DeviceToken) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDeviceTokens indicates an expected call of GetDeviceTokens. +func (mr *MockClientMockRecorder) GetDeviceTokens(ctx, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeviceTokens", reflect.TypeOf((*MockClient)(nil).GetDeviceTokens), ctx, userID) +} + // GetProviderSession mocks base method. func (m *MockClient) GetProviderSession(ctx context.Context, id string) (*auth.ProviderSession, error) { m.ctrl.T.Helper() @@ -382,3 +398,41 @@ func (mr *MockExternalAccessorMockRecorder) ValidateSessionToken(ctx, token inte mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateSessionToken", reflect.TypeOf((*MockExternalAccessor)(nil).ValidateSessionToken), ctx, token) } + +// MockDeviceTokensClient is a mock of DeviceTokensClient interface. +type MockDeviceTokensClient struct { + ctrl *gomock.Controller + recorder *MockDeviceTokensClientMockRecorder +} + +// MockDeviceTokensClientMockRecorder is the mock recorder for MockDeviceTokensClient. +type MockDeviceTokensClientMockRecorder struct { + mock *MockDeviceTokensClient +} + +// NewMockDeviceTokensClient creates a new mock instance. +func NewMockDeviceTokensClient(ctrl *gomock.Controller) *MockDeviceTokensClient { + mock := &MockDeviceTokensClient{ctrl: ctrl} + mock.recorder = &MockDeviceTokensClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDeviceTokensClient) EXPECT() *MockDeviceTokensClientMockRecorder { + return m.recorder +} + +// GetDeviceTokens mocks base method. +func (m *MockDeviceTokensClient) GetDeviceTokens(ctx context.Context, userID string) ([]*devicetokens.DeviceToken, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDeviceTokens", ctx, userID) + ret0, _ := ret[0].([]*devicetokens.DeviceToken) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDeviceTokens indicates an expected call of GetDeviceTokens. +func (mr *MockDeviceTokensClientMockRecorder) GetDeviceTokens(ctx, userID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeviceTokens", reflect.TypeOf((*MockDeviceTokensClient)(nil).GetDeviceTokens), ctx, userID) +} From 2b77a38560696631cfc6157479454462dc0c0d68 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Tue, 7 May 2024 09:18:47 -0600 Subject: [PATCH 06/15] remove unused device tokens repo from data This was missed when moving device tokens from the data service to the auth service in commit a0f5a84. BACK-2554 --- data/service/api/v1/mocks/context.go | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/data/service/api/v1/mocks/context.go b/data/service/api/v1/mocks/context.go index d0ac5c33d2..86c804b906 100644 --- a/data/service/api/v1/mocks/context.go +++ b/data/service/api/v1/mocks/context.go @@ -10,7 +10,6 @@ import ( "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/data/service/context" - "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/permission" "github.com/tidepool-org/platform/request" servicecontext "github.com/tidepool-org/platform/service/context" @@ -23,13 +22,12 @@ type Context struct { T likeT // authDetails should be updated via the WithAuthDetails method. - authDetails *test.MockAuthDetails - RESTRequest *rest.Request - ResponseWriter rest.ResponseWriter - recorder *httptest.ResponseRecorder - MockAlertsRepository alerts.Repository - MockDeviceTokensRepository devicetokens.Repository - MockPermissionClient permission.Client + authDetails *test.MockAuthDetails + RESTRequest *rest.Request + ResponseWriter rest.ResponseWriter + recorder *httptest.ResponseRecorder + MockAlertsRepository alerts.Repository + MockPermissionClient permission.Client } func NewContext(t likeT, method, url string, body io.Reader) *Context { @@ -98,10 +96,6 @@ func (c *Context) AlertsRepository() alerts.Repository { return c.MockAlertsRepository } -func (c *Context) DeviceTokensRepository() devicetokens.Repository { - return c.MockDeviceTokensRepository -} - func (c *Context) PermissionClient() permission.Client { return c.MockPermissionClient } From c7e19c1ec8bc9d8b0d28f7631d7629534182a146 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Tue, 7 May 2024 10:51:10 -0600 Subject: [PATCH 07/15] adds a pusher client for sending APNs push notifications Basic steps are taken to allow for other push notification services to be easily added in the future. BACK-2554 --- go.mod | 1 + go.sum | 9 + push/push.go | 132 ++++++ push/push_suite_test.go | 11 + push/push_test.go | 147 +++++++ vendor/github.com/sideshow/apns2/.gitignore | 31 ++ vendor/github.com/sideshow/apns2/LICENSE | 22 + vendor/github.com/sideshow/apns2/README.md | 216 ++++++++++ vendor/github.com/sideshow/apns2/client.go | 238 +++++++++++ .../sideshow/apns2/client_manager.go | 162 +++++++ .../github.com/sideshow/apns2/notification.go | 148 +++++++ .../sideshow/apns2/payload/builder.go | 402 ++++++++++++++++++ vendor/github.com/sideshow/apns2/response.go | 156 +++++++ .../github.com/sideshow/apns2/token/token.go | 107 +++++ vendor/modules.txt | 5 + 15 files changed, 1787 insertions(+) create mode 100644 push/push.go create mode 100644 push/push_suite_test.go create mode 100644 push/push_test.go create mode 100644 vendor/github.com/sideshow/apns2/.gitignore create mode 100644 vendor/github.com/sideshow/apns2/LICENSE create mode 100644 vendor/github.com/sideshow/apns2/README.md create mode 100644 vendor/github.com/sideshow/apns2/client.go create mode 100644 vendor/github.com/sideshow/apns2/client_manager.go create mode 100644 vendor/github.com/sideshow/apns2/notification.go create mode 100644 vendor/github.com/sideshow/apns2/payload/builder.go create mode 100644 vendor/github.com/sideshow/apns2/response.go create mode 100644 vendor/github.com/sideshow/apns2/token/token.go diff --git a/go.mod b/go.mod index 38ba47b4dc..732d4c5ce7 100644 --- a/go.mod +++ b/go.mod @@ -20,6 +20,7 @@ require ( github.com/onsi/gomega v1.33.1 github.com/prometheus/client_golang v1.19.1 github.com/rinchsan/device-check-go v1.3.0 + github.com/sideshow/apns2 v0.23.0 github.com/tidepool-org/clinic/client v0.0.0-20240629034458-1365c8963143 github.com/tidepool-org/devices/api v0.0.0-20240412011010-75b16d8daec0 github.com/tidepool-org/go-common v0.12.2-0.20240612192926-de6d5c5a742c diff --git a/go.sum b/go.sum index e8c2efa09c..a58a9f5d2c 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,8 @@ github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbi github.com/IBM/sarama v1.43.2 h1:HABeEqRUh32z8yzY2hGB/j8mHSzC/HA9zlEjqFNCzSw= github.com/IBM/sarama v1.43.2/go.mod h1:Kyo4WkF24Z+1nz7xeVUFWIuKVV8RS3wM8mkvPKMdXFQ= github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20201120081800-1786d5ef83d4/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/ant0ine/go-json-rest v3.3.2+incompatible h1:nBixrkLFiDNAW0hauKDLc8yJI6XfrQumWvytE1Hk14E= github.com/ant0ine/go-json-rest v3.3.2+incompatible/go.mod h1:q6aCt0GfU6LhpBsnZ/2U+mwe+0XB5WStbmwyoPfc+sk= github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= @@ -51,6 +53,7 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= @@ -160,6 +163,8 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sideshow/apns2 v0.23.0 h1:lpkikaZ995GIcKk6AFsYzHyezCrsrfEDvUWcWkEGErY= +github.com/sideshow/apns2 v0.23.0/go.mod h1:7Fceu+sL0XscxrfLSkAoH6UtvKefq3Kq1n4W3ayQZqE= github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -212,6 +217,7 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20170512130425-ab89591268e0/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -232,6 +238,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20220403103023-749bd193bc2b/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= @@ -253,6 +260,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -294,6 +302,7 @@ google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/push/push.go b/push/push.go new file mode 100644 index 0000000000..419cd395b3 --- /dev/null +++ b/push/push.go @@ -0,0 +1,132 @@ +// Package push provides clients for sending mobile device push notifications. +package push + +import ( + "context" + "encoding/hex" + "net/http" + "sync" + + "github.com/sideshow/apns2" + "github.com/sideshow/apns2/payload" + "github.com/sideshow/apns2/token" + + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/log" +) + +// Notification models a provider-independent push notification. +type Notification struct { + Message string +} + +// APNSPusher implements push notifications via Apple APNs. +type APNSPusher struct { + BundleID string + + client APNS2Client + clientMu sync.Mutex +} + +// NewAPNSPusher creates a Pusher for sending device notifications via Apple's +// APNs. +func NewAPNSPusher(client APNS2Client, bundleID string) *APNSPusher { + return &APNSPusher{ + BundleID: bundleID, + client: client, + } +} + +// NewAPNSPusherFromKeyData creates an APNSPusher for sending device +// notifications via Apple's APNs. +// +// The signingKey is the raw token signing key received from Apple (.p8 file +// containing PEM-encoded private key), along with its respective team id, key +// id, and application bundle id. +// +// https://developer.apple.com/documentation/usernotifications/sending-notification-requests-to-apns +func NewAPNSPusherFromKeyData(signingKey []byte, keyID, teamID, bundleID string) (*APNSPusher, error) { + authKey, err := token.AuthKeyFromBytes(signingKey) + if err != nil { + return nil, err + } + token := &token.Token{ + AuthKey: authKey, + KeyID: keyID, + TeamID: teamID, + } + client := &apns2Client{Client: apns2.NewTokenClient(token)} + return NewAPNSPusher(client, bundleID), nil +} + +func (p *APNSPusher) Push(ctx context.Context, deviceToken *devicetokens.DeviceToken, note *Notification) error { + if deviceToken.Apple == nil { + return errors.New("Unable to push notification: APNSPusher can only use Apple device tokens but the Apple token is nil") + } + + hexToken := hex.EncodeToString(deviceToken.Apple.Token) + appleNote := p.buildAppleNotification(hexToken, note) + resp, err := p.safePush(ctx, deviceToken.Apple.Environment, appleNote) + if err != nil { + return errors.Wrap(err, "Unable to push notification") + } + if resp.StatusCode != http.StatusOK { + return errors.Newf("Unable to push notification: APNs returned non-200 status: %d, %s", resp.StatusCode, resp.Reason) + } + if logger := log.LoggerFromContext(ctx); logger != nil { + logger.WithFields(log.Fields{ + "apnsID": resp.ApnsID, + }).Info("notification pushed") + } + + return nil +} + +// safePush guards the environment setup and push method with a mutex. +// +// This prevents the environment from being changed out from under +// you. Unlikely, but better safe than sorry. +func (p *APNSPusher) safePush(ctx context.Context, env string, note *apns2.Notification) (*apns2.Response, error) { + p.clientMu.Lock() + defer p.clientMu.Unlock() + if env == devicetokens.AppleEnvProduction { + p.client.Production() + } else { + p.client.Development() + } + return p.client.PushWithContext(ctx, note) +} + +func (p *APNSPusher) buildAppleNotification(hexToken string, note *Notification) *apns2.Notification { + payload := payload.NewPayload(). + Alert(note.Message). + AlertBody(note.Message) + return &apns2.Notification{ + DeviceToken: hexToken, + Payload: payload, + Topic: p.BundleID, + } +} + +// APNS2Client abstracts the apns2 library for easier testing. +type APNS2Client interface { + Development() APNS2Client + Production() APNS2Client + PushWithContext(apns2.Context, *apns2.Notification) (*apns2.Response, error) +} + +// apns2Client adapts the apns2.Client to APNS2Client so it can be replaced for testing. +type apns2Client struct { + *apns2.Client +} + +func (c apns2Client) Development() APNS2Client { + d := c.Client.Development() + return &apns2Client{Client: d} +} + +func (c apns2Client) Production() APNS2Client { + p := c.Client.Production() + return &apns2Client{Client: p} +} diff --git a/push/push_suite_test.go b/push/push_suite_test.go new file mode 100644 index 0000000000..a5b73e9d49 --- /dev/null +++ b/push/push_suite_test.go @@ -0,0 +1,11 @@ +package push + +import ( + "testing" + + "github.com/tidepool-org/platform/test" +) + +func TestSuite(t *testing.T) { + test.Test(t) +} diff --git a/push/push_test.go b/push/push_test.go new file mode 100644 index 0000000000..5922f85e25 --- /dev/null +++ b/push/push_test.go @@ -0,0 +1,147 @@ +package push + +import ( + "context" + "fmt" + "net/http" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/sideshow/apns2" + + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/log" + testlog "github.com/tidepool-org/platform/log/test" +) + +const ( + testBundleID = "test-bundle-id" +) + +var ( + testDeviceToken []byte = []byte("dGVzdGluZyAxIDIgMw==") +) + +type pushTestDeps struct { + Client *mockAPNS2Client + Token *devicetokens.DeviceToken + Notification *Notification +} + +func testDeps() (context.Context, *APNSPusher, *pushTestDeps) { + ctx := context.Background() + mockClient := &mockAPNS2Client{ + Response: &apns2.Response{ + StatusCode: http.StatusOK, + }, + } + pusher := NewAPNSPusher(mockClient, testBundleID) + deps := &pushTestDeps{ + Client: mockClient, + Token: &devicetokens.DeviceToken{ + Apple: &devicetokens.AppleDeviceToken{ + Token: testDeviceToken, + }, + }, + Notification: &Notification{}, + } + return ctx, pusher, deps +} + +var _ = Describe("APNSPusher", func() { + Describe("Push", func() { + It("requires an Apple token", func() { + ctx, pusher, deps := testDeps() + deps.Token.Apple = nil + + err := pusher.Push(ctx, deps.Token, deps.Notification) + + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("can only use Apple device tokens"))) + }) + + Context("its environment", func() { + + for _, env := range []string{devicetokens.AppleEnvProduction, devicetokens.AppleEnvSandbox} { + It("is set via its token", func() { + ctx, pusher, deps := testDeps() + deps.Token.Apple.Environment = env + + err := pusher.Push(ctx, deps.Token, deps.Notification) + + Expect(err).To(Succeed()) + // This is reaching into the implementation of + // APNS2Client, but there's no other way to test this. + Expect(deps.Client.Env).To(Equal(env)) + }) + } + }) + + It("reports upstream errors", func() { + ctx, pusher, deps := testDeps() + deps.Client.Error = fmt.Errorf("test error") + + err := pusher.Push(ctx, deps.Token, deps.Notification) + + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("test error"))) + }) + + Context("when a logger is available", func() { + It("logs", func() { + ctx, pusher, deps := testDeps() + testLogger := testlog.NewLogger() + ctx = log.NewContextWithLogger(ctx, testLogger) + deps.Client.Response = &apns2.Response{ + StatusCode: http.StatusOK, + ApnsID: "test-id", + } + + err := pusher.Push(ctx, deps.Token, deps.Notification) + + Expect(err).To(Succeed()) + testLogger.AssertInfo("notification pushed", log.Fields{ + "apnsID": "test-id", + }) + }) + }) + + It("reports non-200 responses as errors", func() { + ctx, pusher, deps := testDeps() + deps.Client.Response = &apns2.Response{ + StatusCode: http.StatusBadRequest, + } + + err := pusher.Push(ctx, deps.Token, deps.Notification) + + Expect(err).To(HaveOccurred()) + Expect(err).To(MatchError(ContainSubstring("APNs returned non-200 status"))) + }) + }) +}) + +type mockAPNS2Client struct { + Response *apns2.Response + Error error + Env string +} + +func (c *mockAPNS2Client) Development() APNS2Client { + c.Env = devicetokens.AppleEnvSandbox + return c +} + +func (c *mockAPNS2Client) Production() APNS2Client { + c.Env = devicetokens.AppleEnvProduction + return c +} + +func (c *mockAPNS2Client) PushWithContext(_ apns2.Context, _ *apns2.Notification) (*apns2.Response, error) { + if c.Error != nil { + return nil, c.Error + } + if c.Response != nil { + return c.Response, nil + } + return nil, nil +} diff --git a/vendor/github.com/sideshow/apns2/.gitignore b/vendor/github.com/sideshow/apns2/.gitignore new file mode 100644 index 0000000000..5b77d5d22e --- /dev/null +++ b/vendor/github.com/sideshow/apns2/.gitignore @@ -0,0 +1,31 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +/*.p12 +/*.pem +/*.cer +/*.p8 + +.DS_Store \ No newline at end of file diff --git a/vendor/github.com/sideshow/apns2/LICENSE b/vendor/github.com/sideshow/apns2/LICENSE new file mode 100644 index 0000000000..59abbcf40e --- /dev/null +++ b/vendor/github.com/sideshow/apns2/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 Adam Jones + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/sideshow/apns2/README.md b/vendor/github.com/sideshow/apns2/README.md new file mode 100644 index 0000000000..32e04190ce --- /dev/null +++ b/vendor/github.com/sideshow/apns2/README.md @@ -0,0 +1,216 @@ +# APNS/2 + +APNS/2 is a go package designed for simple, flexible and fast Apple Push Notifications on iOS, OSX and Safari using the new HTTP/2 Push provider API. + +[![Build Status](https://github.com/sideshow/apns2/actions/workflows/tests.yml/badge.svg)](https://github.com/sideshow/apns2/actions/workflows/tests.yml) [![Coverage Status](https://coveralls.io/repos/sideshow/apns2/badge.svg?branch=master&service=github)](https://coveralls.io/github/sideshow/apns2?branch=master) [![GoDoc](https://godoc.org/github.com/sideshow/apns2?status.svg)](https://godoc.org/github.com/sideshow/apns2) + +## Features + +- Uses new Apple APNs HTTP/2 connection +- Fast - See [notes on speed](https://github.com/sideshow/apns2/wiki/APNS-HTTP-2-Push-Speed) +- Works with go 1.7 and later +- Supports new Apple Token Based Authentication (JWT) +- Supports new iOS 10 features such as Collapse IDs, Subtitles and Mutable Notifications +- Supports new iOS 15 features interruptionLevel and relevanceScore +- Supports persistent connections to APNs +- Supports VoIP/PushKit notifications (iOS 8 and later) +- Modular & easy to use +- Tested and working in APNs production environment + +## Install + +- Make sure you have [Go](https://golang.org/doc/install) installed and have set your [GOPATH](https://golang.org/doc/code.html#GOPATH). +- Install apns2: + +```sh +go get -u github.com/sideshow/apns2 +``` + +If you are running the test suite you will also need to install testify: + +```sh +go get -u github.com/stretchr/testify +``` + +## Example + +```go +package main + +import ( + "log" + "fmt" + + "github.com/sideshow/apns2" + "github.com/sideshow/apns2/certificate" +) + +func main() { + + cert, err := certificate.FromP12File("../cert.p12", "") + if err != nil { + log.Fatal("Cert Error:", err) + } + + notification := &apns2.Notification{} + notification.DeviceToken = "11aa01229f15f0f0c52029d8cf8cd0aeaf2365fe4cebc4af26cd6d76b7919ef7" + notification.Topic = "com.sideshow.Apns2" + notification.Payload = []byte(`{"aps":{"alert":"Hello!"}}`) // See Payload section below + + // If you want to test push notifications for builds running directly from XCode (Development), use + // client := apns2.NewClient(cert).Development() + // For apps published to the app store or installed as an ad-hoc distribution use Production() + + client := apns2.NewClient(cert).Production() + res, err := client.Push(notification) + + if err != nil { + log.Fatal("Error:", err) + } + + fmt.Printf("%v %v %v\n", res.StatusCode, res.ApnsID, res.Reason) +} +``` + +## JWT Token Example + +Instead of using a `.p12` or `.pem` certificate as above, you can optionally use +APNs JWT _Provider Authentication Tokens_. First you will need a signing key (`.p8` file), Key ID and Team ID [from Apple](http://help.apple.com/xcode/mac/current/#/dev54d690a66). Once you have these details, you can create a new client: + +```go +authKey, err := token.AuthKeyFromFile("../AuthKey_XXX.p8") +if err != nil { + log.Fatal("token error:", err) +} + +token := &token.Token{ + AuthKey: authKey, + // KeyID from developer account (Certificates, Identifiers & Profiles -> Keys) + KeyID: "ABC123DEFG", + // TeamID from developer account (View Account -> Membership) + TeamID: "DEF123GHIJ", +} +... + +client := apns2.NewTokenClient(token) +res, err := client.Push(notification) +``` + +- You can use one APNs signing key to authenticate tokens for multiple apps. +- A signing key works for both the development and production environments. +- A signing key doesn’t expire but can be revoked. + +## Notification + +At a minimum, a _Notification_ needs a _DeviceToken_, a _Topic_ and a _Payload_. + +```go +notification := &apns2.Notification{ + DeviceToken: "11aa01229f15f0f0c52029d8cf8cd0aeaf2365fe4cebc4af26cd6d76b7919ef7", + Topic: "com.sideshow.Apns2", + Payload: []byte(`{"aps":{"alert":"Hello!"}}`), +} +``` + +You can also set an optional _ApnsID_, _Expiration_ or _Priority_. + +```go +notification.ApnsID = "40636A2C-C093-493E-936A-2A4333C06DEA" +notification.Expiration = time.Now() +notification.Priority = apns2.PriorityLow +``` + +## Payload + +You can use raw bytes for the `notification.Payload` as above, or you can use the payload builder package which makes it easy to construct APNs payloads. + +```go +// {"aps":{"alert":"hello","badge":1},"key":"val"} + +payload := payload.NewPayload().Alert("hello").Badge(1).Custom("key", "val") + +notification.Payload = payload +client.Push(notification) +``` + +Refer to the [payload](https://godoc.org/github.com/sideshow/apns2/payload) docs for more info. + +## Response, Error handling + +APNS/2 draws the distinction between a valid response from Apple indicating whether or not the _Notification_ was sent or not, and an unrecoverable or unexpected _Error_; + +- An `Error` is returned if a non-recoverable error occurs, i.e. if there is a problem with the underlying _http.Client_ connection or _Certificate_, the payload was not sent, or a valid _Response_ was not received. +- A `Response` is returned if the payload was successfully sent to Apple and a documented response was received. This struct will contain more information about whether or not the push notification succeeded, its _apns-id_ and if applicable, more information around why it did not succeed. + +To check if a `Notification` was successfully sent; + +```go +res, err := client.Push(notification) +if err != nil { + log.Println("There was an error", err) + return +} + +if res.Sent() { + log.Println("Sent:", res.ApnsID) +} else { + fmt.Printf("Not Sent: %v %v %v\n", res.StatusCode, res.ApnsID, res.Reason) +} +``` + +## Context & Timeouts + +For better control over request cancellations and timeouts APNS/2 supports +contexts. Using a context can be helpful if you want to cancel all pushes when +the parent process is cancelled, or need finer grained control over individual +push timeouts. See the [Google post](https://blog.golang.org/context) for more +information on contexts. + +```go +ctx, cancel = context.WithTimeout(context.Background(), 10 * time.Second) +res, err := client.PushWithContext(ctx, notification) +defer cancel() +``` + +## Speed & Performance + +Also see the wiki page on [APNS HTTP 2 Push Speed](https://github.com/sideshow/apns2/wiki/APNS-HTTP-2-Push-Speed). + +For best performance, you should hold on to an `apns2.Client` instance and not re-create it every push. The underlying TLS connection itself can take a few seconds to connect and negotiate, so if you are setting up an `apns2.Client` and tearing it down every push, then this will greatly affect performance. (Apple suggest keeping the connection open all the time). + +You should also limit the amount of `apns2.Client` instances. The underlying transport has a http connection pool itself, so a single client instance will be enough for most users (One instance can potentially do 4,000+ pushes per second). If you need more than this then one instance per CPU core is a good starting point. + +Speed is greatly affected by the location of your server and the quality of your network connection. If you're just testing locally, behind a proxy or if your server is outside USA then you're not going to get great performance. With a good server located in AWS, you should be able to get [decent throughput](https://github.com/sideshow/apns2/wiki/APNS-HTTP-2-Push-Speed). + +## Command line tool + +APNS/2 has a command line tool that can be installed with `go get github.com/sideshow/apns2/apns2`. Usage: + +``` +apns2 --help +usage: apns2 --certificate-path=CERTIFICATE-PATH --topic=TOPIC [] + +Listens to STDIN to send notifications and writes APNS response code and reason to STDOUT. + +The expected format is: +Example: aff0c63d9eaa63ad161bafee732d5bc2c31f66d552054718ff19ce314371e5d0 {"aps": {"alert": "hi"}} +Flags: + --help Show context-sensitive help (also try --help-long and --help-man). + -c, --certificate-path=CERTIFICATE-PATH + Path to certificate file. + -t, --topic=TOPIC The topic of the remote notification, which is typically the bundle ID for your app + -m, --mode="production" APNS server to send notifications to. `production` or `development`. Defaults to `production` + --version Show application version. +``` + +## License + +The MIT License (MIT) + +Copyright (c) 2016 Adam Jones + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/sideshow/apns2/client.go b/vendor/github.com/sideshow/apns2/client.go new file mode 100644 index 0000000000..cd98dd4228 --- /dev/null +++ b/vendor/github.com/sideshow/apns2/client.go @@ -0,0 +1,238 @@ +// Package apns2 is a go Apple Push Notification Service (APNs) provider that +// allows you to send remote notifications to your iOS, tvOS, and OS X +// apps, using the new APNs HTTP/2 network protocol. +package apns2 + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "io" + "net" + "net/http" + "strconv" + "time" + + "github.com/sideshow/apns2/token" + "golang.org/x/net/http2" +) + +// Apple HTTP/2 Development & Production urls +const ( + HostDevelopment = "https://api.sandbox.push.apple.com" + HostProduction = "https://api.push.apple.com" +) + +// DefaultHost is a mutable var for testing purposes +var DefaultHost = HostDevelopment + +var ( + // HTTPClientTimeout specifies a time limit for requests made by the + // HTTPClient. The timeout includes connection time, any redirects, + // and reading the response body. + HTTPClientTimeout = 60 * time.Second + + // ReadIdleTimeout is the timeout after which a health check using a ping + // frame will be carried out if no frame is received on the connection. If + // zero, no health check is performed. + ReadIdleTimeout = 15 * time.Second + + // TCPKeepAlive specifies the keep-alive period for an active network + // connection. If zero, keep-alive probes are sent with a default value + // (currently 15 seconds) + TCPKeepAlive = 15 * time.Second + + // TLSDialTimeout is the maximum amount of time a dial will wait for a connect + // to complete. + TLSDialTimeout = 20 * time.Second +) + +// DialTLS is the default dial function for creating TLS connections for +// non-proxied HTTPS requests. +var DialTLS = func(network, addr string, cfg *tls.Config) (net.Conn, error) { + dialer := &net.Dialer{ + Timeout: TLSDialTimeout, + KeepAlive: TCPKeepAlive, + } + return tls.DialWithDialer(dialer, network, addr, cfg) +} + +// Client represents a connection with the APNs +type Client struct { + Host string + Certificate tls.Certificate + Token *token.Token + HTTPClient *http.Client +} + +// A Context carries a deadline, a cancellation signal, and other values across +// API boundaries. Context's methods may be called by multiple goroutines +// simultaneously. +type Context interface { + context.Context +} + +type connectionCloser interface { + CloseIdleConnections() +} + +// NewClient returns a new Client with an underlying http.Client configured with +// the correct APNs HTTP/2 transport settings. It does not connect to the APNs +// until the first Notification is sent via the Push method. +// +// As per the Apple APNs Provider API, you should keep a handle on this client +// so that you can keep your connections with APNs open across multiple +// notifications; don’t repeatedly open and close connections. APNs treats rapid +// connection and disconnection as a denial-of-service attack. +// +// If your use case involves multiple long-lived connections, consider using +// the ClientManager, which manages clients for you. +func NewClient(certificate tls.Certificate) *Client { + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{certificate}, + } + if len(certificate.Certificate) > 0 { + tlsConfig.BuildNameToCertificate() + } + transport := &http2.Transport{ + TLSClientConfig: tlsConfig, + DialTLS: DialTLS, + ReadIdleTimeout: ReadIdleTimeout, + } + return &Client{ + HTTPClient: &http.Client{ + Transport: transport, + Timeout: HTTPClientTimeout, + }, + Certificate: certificate, + Host: DefaultHost, + } +} + +// NewTokenClient returns a new Client with an underlying http.Client configured +// with the correct APNs HTTP/2 transport settings. It does not connect to the APNs +// until the first Notification is sent via the Push method. +// +// As per the Apple APNs Provider API, you should keep a handle on this client +// so that you can keep your connections with APNs open across multiple +// notifications; don’t repeatedly open and close connections. APNs treats rapid +// connection and disconnection as a denial-of-service attack. +func NewTokenClient(token *token.Token) *Client { + transport := &http2.Transport{ + DialTLS: DialTLS, + ReadIdleTimeout: ReadIdleTimeout, + } + return &Client{ + Token: token, + HTTPClient: &http.Client{ + Transport: transport, + Timeout: HTTPClientTimeout, + }, + Host: DefaultHost, + } +} + +// Development sets the Client to use the APNs development push endpoint. +func (c *Client) Development() *Client { + c.Host = HostDevelopment + return c +} + +// Production sets the Client to use the APNs production push endpoint. +func (c *Client) Production() *Client { + c.Host = HostProduction + return c +} + +// Push sends a Notification to the APNs gateway. If the underlying http.Client +// is not currently connected, this method will attempt to reconnect +// transparently before sending the notification. It will return a Response +// indicating whether the notification was accepted or rejected by the APNs +// gateway, or an error if something goes wrong. +// +// Use PushWithContext if you need better cancellation and timeout control. +func (c *Client) Push(n *Notification) (*Response, error) { + return c.PushWithContext(context.Background(), n) +} + +// PushWithContext sends a Notification to the APNs gateway. Context carries a +// deadline and a cancellation signal and allows you to close long running +// requests when the context timeout is exceeded. Context can be nil, for +// backwards compatibility. +// +// If the underlying http.Client is not currently connected, this method will +// attempt to reconnect transparently before sending the notification. It will +// return a Response indicating whether the notification was accepted or +// rejected by the APNs gateway, or an error if something goes wrong. +func (c *Client) PushWithContext(ctx Context, n *Notification) (*Response, error) { + payload, err := json.Marshal(n) + if err != nil { + return nil, err + } + + url := c.Host + "/3/device/" + n.DeviceToken + request, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(payload)) + if err != nil { + return nil, err + } + + if c.Token != nil { + c.setTokenHeader(request) + } + + setHeaders(request, n) + + response, err := c.HTTPClient.Do(request) + if err != nil { + return nil, err + } + defer response.Body.Close() + + r := &Response{} + r.StatusCode = response.StatusCode + r.ApnsID = response.Header.Get("apns-id") + + decoder := json.NewDecoder(response.Body) + if err := decoder.Decode(r); err != nil && err != io.EOF { + return &Response{}, err + } + return r, nil +} + +// CloseIdleConnections closes any underlying connections which were previously +// connected from previous requests but are now sitting idle. It will not +// interrupt any connections currently in use. +func (c *Client) CloseIdleConnections() { + c.HTTPClient.Transport.(connectionCloser).CloseIdleConnections() +} + +func (c *Client) setTokenHeader(r *http.Request) { + bearer := c.Token.GenerateIfExpired() + r.Header.Set("authorization", "bearer "+bearer) +} + +func setHeaders(r *http.Request, n *Notification) { + r.Header.Set("Content-Type", "application/json; charset=utf-8") + if n.Topic != "" { + r.Header.Set("apns-topic", n.Topic) + } + if n.ApnsID != "" { + r.Header.Set("apns-id", n.ApnsID) + } + if n.CollapseID != "" { + r.Header.Set("apns-collapse-id", n.CollapseID) + } + if n.Priority > 0 { + r.Header.Set("apns-priority", strconv.Itoa(n.Priority)) + } + if !n.Expiration.IsZero() { + r.Header.Set("apns-expiration", strconv.FormatInt(n.Expiration.Unix(), 10)) + } + if n.PushType != "" { + r.Header.Set("apns-push-type", string(n.PushType)) + } else { + r.Header.Set("apns-push-type", string(PushTypeAlert)) + } + +} diff --git a/vendor/github.com/sideshow/apns2/client_manager.go b/vendor/github.com/sideshow/apns2/client_manager.go new file mode 100644 index 0000000000..bb4bdf900d --- /dev/null +++ b/vendor/github.com/sideshow/apns2/client_manager.go @@ -0,0 +1,162 @@ +package apns2 + +import ( + "container/list" + "crypto/sha1" + "crypto/tls" + "sync" + "time" +) + +type managerItem struct { + key [sha1.Size]byte + client *Client + lastUsed time.Time +} + +// ClientManager is a way to manage multiple connections to the APNs. +type ClientManager struct { + // MaxSize is the maximum number of clients allowed in the manager. When + // this limit is reached, the least recently used client is evicted. Set + // zero for no limit. + MaxSize int + + // MaxAge is the maximum age of clients in the manager. Upon retrieval, if + // a client has remained unused in the manager for this duration or longer, + // it is evicted and nil is returned. Set zero to disable this + // functionality. + MaxAge time.Duration + + // Factory is the function which constructs clients if not found in the + // manager. + Factory func(certificate tls.Certificate) *Client + + cache map[[sha1.Size]byte]*list.Element + ll *list.List + mu sync.Mutex + once sync.Once +} + +// NewClientManager returns a new ClientManager for prolonged, concurrent usage +// of multiple APNs clients. ClientManager is flexible enough to work best for +// your use case. When a client is not found in the manager, Get will return +// the result of calling Factory, which can be a Client or nil. +// +// Having multiple clients per certificate in the manager is not allowed. +// +// By default, MaxSize is 64, MaxAge is 10 minutes, and Factory always returns +// a Client with default options. +func NewClientManager() *ClientManager { + manager := &ClientManager{ + MaxSize: 64, + MaxAge: 10 * time.Minute, + Factory: NewClient, + } + + manager.initInternals() + + return manager +} + +// Add adds a Client to the manager. You can use this to individually configure +// Clients in the manager. +func (m *ClientManager) Add(client *Client) { + m.initInternals() + m.mu.Lock() + defer m.mu.Unlock() + + key := cacheKey(client.Certificate) + now := time.Now() + if ele, hit := m.cache[key]; hit { + item := ele.Value.(*managerItem) + item.client = client + item.lastUsed = now + m.ll.MoveToFront(ele) + return + } + ele := m.ll.PushFront(&managerItem{key, client, now}) + m.cache[key] = ele + if m.MaxSize != 0 && m.ll.Len() > m.MaxSize { + m.mu.Unlock() + m.removeOldest() + m.mu.Lock() + } +} + +// Get gets a Client from the manager. If a Client is not found in the manager +// or if a Client has remained in the manager longer than MaxAge, Get will call +// the ClientManager's Factory function, store the result in the manager if +// non-nil, and return it. +func (m *ClientManager) Get(certificate tls.Certificate) *Client { + m.initInternals() + m.mu.Lock() + defer m.mu.Unlock() + + key := cacheKey(certificate) + now := time.Now() + if ele, hit := m.cache[key]; hit { + item := ele.Value.(*managerItem) + if m.MaxAge != 0 && item.lastUsed.Before(now.Add(-m.MaxAge)) { + c := m.Factory(certificate) + if c == nil { + return nil + } + item.client = c + } + item.lastUsed = now + m.ll.MoveToFront(ele) + return item.client + } + + c := m.Factory(certificate) + if c == nil { + return nil + } + m.mu.Unlock() + m.Add(c) + m.mu.Lock() + return c +} + +// Len returns the current size of the ClientManager. +func (m *ClientManager) Len() int { + if m.cache == nil { + return 0 + } + m.mu.Lock() + defer m.mu.Unlock() + return m.ll.Len() +} + +func (m *ClientManager) initInternals() { + m.once.Do(func() { + m.cache = map[[sha1.Size]byte]*list.Element{} + m.ll = list.New() + }) +} + +func (m *ClientManager) removeOldest() { + m.mu.Lock() + ele := m.ll.Back() + m.mu.Unlock() + if ele != nil { + m.removeElement(ele) + } +} + +func (m *ClientManager) removeElement(e *list.Element) { + m.mu.Lock() + defer m.mu.Unlock() + m.ll.Remove(e) + delete(m.cache, e.Value.(*managerItem).key) +} + +func cacheKey(certificate tls.Certificate) [sha1.Size]byte { + var data []byte + + for _, cert := range certificate.Certificate { + data = append(data, cert...) + } + + return sha1.Sum(data) +} diff --git a/vendor/github.com/sideshow/apns2/notification.go b/vendor/github.com/sideshow/apns2/notification.go new file mode 100644 index 0000000000..69bf312de5 --- /dev/null +++ b/vendor/github.com/sideshow/apns2/notification.go @@ -0,0 +1,148 @@ +package apns2 + +import ( + "encoding/json" + "time" +) + +// EPushType defines the value for the apns-push-type header +type EPushType string + +const ( + // PushTypeAlert is used for notifications that trigger a user interaction — + // for example, an alert, badge, or sound. If you set this push type, the + // topic field must use your app’s bundle ID as the topic. If the + // notification requires immediate action from the user, set notification + // priority to 10; otherwise use 5. The alert push type is required on + // watchOS 6 and later. It is recommended on macOS, iOS, tvOS, and iPadOS. + PushTypeAlert EPushType = "alert" + + // PushTypeBackground is used for notifications that deliver content in the + // background, and don’t trigger any user interactions. If you set this push + // type, the topic field must use your app’s bundle ID as the topic. Always + // use priority 5. Using priority 10 is an error. The background push type + // is required on watchOS 6 and later. It is recommended on macOS, iOS, + // tvOS, and iPadOS. + PushTypeBackground EPushType = "background" + + // PushTypeLocation is used for notifications that request a user’s + // location. If you set this push type, the topic field must use your app’s + // bundle ID with .location-query appended to the end. The location push + // type is recommended for iOS and iPadOS. It isn’t available on macOS, + // tvOS, and watchOS. If the location query requires an immediate response + // from the Location Push Service Extension, set notification apns-priority + // to 10; otherwise, use 5. The location push type supports only token-based + // authentication. + PushTypeLocation EPushType = "location" + + // PushTypeVOIP is used for notifications that provide information about an + // incoming Voice-over-IP (VoIP) call. If you set this push type, the topic + // field must use your app’s bundle ID with .voip appended to the end. If + // you’re using certificate-based authentication, you must also register the + // certificate for VoIP services. The voip push type is not available on + // watchOS. It is recommended on macOS, iOS, tvOS, and iPadOS. + PushTypeVOIP EPushType = "voip" + + // PushTypeComplication is used for notifications that contain update + // information for a watchOS app’s complications. If you set this push type, + // the topic field must use your app’s bundle ID with .complication appended + // to the end. If you’re using certificate-based authentication, you must + // also register the certificate for WatchKit services. The complication + // push type is recommended for watchOS and iOS. It is not available on + // macOS, tvOS, and iPadOS. + PushTypeComplication EPushType = "complication" + + // PushTypeFileProvider is used to signal changes to a File Provider + // extension. If you set this push type, the topic field must use your app’s + // bundle ID with .pushkit.fileprovider appended to the end. The + // fileprovider push type is not available on watchOS. It is recommended on + // macOS, iOS, tvOS, and iPadOS. + PushTypeFileProvider EPushType = "fileprovider" + + // PushTypeMDM is used for notifications that tell managed devices to + // contact the MDM server. If you set this push type, you must use the topic + // from the UID attribute in the subject of your MDM push certificate. + PushTypeMDM EPushType = "mdm" +) + +const ( + // PriorityLow will tell APNs to send the push message at a time that takes + // into account power considerations for the device. Notifications with this + // priority might be grouped and delivered in bursts. They are throttled, + // and in some cases are not delivered. + PriorityLow = 5 + + // PriorityHigh will tell APNs to send the push message immediately. + // Notifications with this priority must trigger an alert, sound, or badge + // on the target device. It is an error to use this priority for a push + // notification that contains only the content-available key. + PriorityHigh = 10 +) + +// Notification represents the the data and metadata for a APNs Remote Notification. +type Notification struct { + + // An optional canonical UUID that identifies the notification. The + // canonical form is 32 lowercase hexadecimal digits, displayed in five + // groups separated by hyphens in the form 8-4-4-4-12. An example UUID is as + // follows: + // + // 123e4567-e89b-12d3-a456-42665544000 + // + // If you don't set this, a new UUID is created by APNs and returned in the + // response. + ApnsID string + + // A string which allows multiple notifications with the same collapse + // identifier to be displayed to the user as a single notification. The + // value should not exceed 64 bytes. + CollapseID string + + // A string containing hexadecimal bytes of the device token for the target + // device. + DeviceToken string + + // The topic of the remote notification, which is typically the bundle ID + // for your app. The certificate you create in the Apple Developer Member + // Center must include the capability for this topic. If your certificate + // includes multiple topics, you must specify a value for this header. If + // you omit this header and your APNs certificate does not specify multiple + // topics, the APNs server uses the certificate’s Subject as the default + // topic. + Topic string + + // An optional time at which the notification is no longer valid and can be + // discarded by APNs. If this value is in the past, APNs treats the + // notification as if it expires immediately and does not store the + // notification or attempt to redeliver it. If this value is left as the + // default (ie, Expiration.IsZero()) an expiration header will not added to + // the http request. + Expiration time.Time + + // The priority of the notification. Specify ether apns.PriorityHigh (10) or + // apns.PriorityLow (5) If you don't set this, the APNs server will set the + // priority to 10. + Priority int + + // A byte array containing the JSON-encoded payload of this push notification. + // Refer to "The Remote Notification Payload" section in the Apple Local and + // Remote Notification Programming Guide for more info. + Payload interface{} + + // The pushtype of the push notification. If this values is left as the + // default an apns-push-type header with value 'alert' will be added to the + // http request. + PushType EPushType +} + +// MarshalJSON converts the notification payload to JSON. +func (n *Notification) MarshalJSON() ([]byte, error) { + switch payload := n.Payload.(type) { + case string: + return []byte(payload), nil + case []byte: + return payload, nil + default: + return json.Marshal(payload) + } +} diff --git a/vendor/github.com/sideshow/apns2/payload/builder.go b/vendor/github.com/sideshow/apns2/payload/builder.go new file mode 100644 index 0000000000..a2ff30da10 --- /dev/null +++ b/vendor/github.com/sideshow/apns2/payload/builder.go @@ -0,0 +1,402 @@ +// Package payload is a helper package which contains a payload +// builder to make constructing notification payloads easier. +package payload + +import "encoding/json" + +// InterruptionLevel defines the value for the payload aps interruption-level +type EInterruptionLevel string + +const ( + // InterruptionLevelPassive is used to indicate that notification be delivered in a passive manner. + InterruptionLevelPassive EInterruptionLevel = "passive" + + // InterruptionLevelActive is used to indicate the importance and delivery timing of a notification. + InterruptionLevelActive EInterruptionLevel = "active" + + // InterruptionLevelTimeSensitive is used to indicate the importance and delivery timing of a notification. + InterruptionLevelTimeSensitive EInterruptionLevel = "time-sensitive" + + // InterruptionLevelCritical is used to indicate the importance and delivery timing of a notification. + // This interruption level requires an approved entitlement from Apple. + // See: https://developer.apple.com/documentation/usernotifications/unnotificationinterruptionlevel/ + InterruptionLevelCritical EInterruptionLevel = "critical" +) + +// Payload represents a notification which holds the content that will be +// marshalled as JSON. +type Payload struct { + content map[string]interface{} +} + +type aps struct { + Alert interface{} `json:"alert,omitempty"` + Badge interface{} `json:"badge,omitempty"` + Category string `json:"category,omitempty"` + ContentAvailable int `json:"content-available,omitempty"` + InterruptionLevel EInterruptionLevel `json:"interruption-level,omitempty"` + MutableContent int `json:"mutable-content,omitempty"` + RelevanceScore interface{} `json:"relevance-score,omitempty"` + Sound interface{} `json:"sound,omitempty"` + ThreadID string `json:"thread-id,omitempty"` + URLArgs []string `json:"url-args,omitempty"` +} + +type alert struct { + Action string `json:"action,omitempty"` + ActionLocKey string `json:"action-loc-key,omitempty"` + Body string `json:"body,omitempty"` + LaunchImage string `json:"launch-image,omitempty"` + LocArgs []string `json:"loc-args,omitempty"` + LocKey string `json:"loc-key,omitempty"` + Title string `json:"title,omitempty"` + Subtitle string `json:"subtitle,omitempty"` + TitleLocArgs []string `json:"title-loc-args,omitempty"` + TitleLocKey string `json:"title-loc-key,omitempty"` + SummaryArg string `json:"summary-arg,omitempty"` + SummaryArgCount int `json:"summary-arg-count,omitempty"` +} + +type sound struct { + Critical int `json:"critical,omitempty"` + Name string `json:"name,omitempty"` + Volume float32 `json:"volume,omitempty"` +} + +// NewPayload returns a new Payload struct +func NewPayload() *Payload { + return &Payload{ + map[string]interface{}{ + "aps": &aps{}, + }, + } +} + +// Alert sets the aps alert on the payload. +// This will display a notification alert message to the user. +// +// {"aps":{"alert":alert}}` +func (p *Payload) Alert(alert interface{}) *Payload { + p.aps().Alert = alert + return p +} + +// Badge sets the aps badge on the payload. +// This will display a numeric badge on the app icon. +// +// {"aps":{"badge":b}} +func (p *Payload) Badge(b int) *Payload { + p.aps().Badge = b + return p +} + +// ZeroBadge sets the aps badge on the payload to 0. +// This will clear the badge on the app icon. +// +// {"aps":{"badge":0}} +func (p *Payload) ZeroBadge() *Payload { + p.aps().Badge = 0 + return p +} + +// UnsetBadge removes the badge attribute from the payload. +// This will leave the badge on the app icon unchanged. +// If you wish to clear the app icon badge, use ZeroBadge() instead. +// +// {"aps":{}} +func (p *Payload) UnsetBadge() *Payload { + p.aps().Badge = nil + return p +} + +// Sound sets the aps sound on the payload. +// This will play a sound from the app bundle, or the default sound otherwise. +// +// {"aps":{"sound":sound}} +func (p *Payload) Sound(sound interface{}) *Payload { + p.aps().Sound = sound + return p +} + +// ContentAvailable sets the aps content-available on the payload to 1. +// This will indicate to the app that there is new content available to download +// and launch the app in the background. +// +// {"aps":{"content-available":1}} +func (p *Payload) ContentAvailable() *Payload { + p.aps().ContentAvailable = 1 + return p +} + +// MutableContent sets the aps mutable-content on the payload to 1. +// This will indicate to the to the system to call your Notification Service +// extension to mutate or replace the notification's content. +// +// {"aps":{"mutable-content":1}} +func (p *Payload) MutableContent() *Payload { + p.aps().MutableContent = 1 + return p +} + +// Custom payload + +// Custom sets a custom key and value on the payload. +// This will add custom key/value data to the notification payload at root level. +// +// {"aps":{}, key:value} +func (p *Payload) Custom(key string, val interface{}) *Payload { + p.content[key] = val + return p +} + +// Alert dictionary + +// AlertTitle sets the aps alert title on the payload. +// This will display a short string describing the purpose of the notification. +// Apple Watch & Safari display this string as part of the notification interface. +// +// {"aps":{"alert":{"title":title}}} +func (p *Payload) AlertTitle(title string) *Payload { + p.aps().alert().Title = title + return p +} + +// AlertTitleLocKey sets the aps alert title localization key on the payload. +// This is the key to a title string in the Localizable.strings file for the +// current localization. See Localized Formatted Strings in Apple documentation +// for more information. +// +// {"aps":{"alert":{"title-loc-key":key}}} +func (p *Payload) AlertTitleLocKey(key string) *Payload { + p.aps().alert().TitleLocKey = key + return p +} + +// AlertTitleLocArgs sets the aps alert title localization args on the payload. +// These are the variable string values to appear in place of the format +// specifiers in title-loc-key. See Localized Formatted Strings in Apple +// documentation for more information. +// +// {"aps":{"alert":{"title-loc-args":args}}} +func (p *Payload) AlertTitleLocArgs(args []string) *Payload { + p.aps().alert().TitleLocArgs = args + return p +} + +// AlertSubtitle sets the aps alert subtitle on the payload. +// This will display a short string describing the purpose of the notification. +// Apple Watch & Safari display this string as part of the notification interface. +// +// {"aps":{"alert":{"subtitle":"subtitle"}}} +func (p *Payload) AlertSubtitle(subtitle string) *Payload { + p.aps().alert().Subtitle = subtitle + return p +} + +// AlertBody sets the aps alert body on the payload. +// This is the text of the alert message. +// +// {"aps":{"alert":{"body":body}}} +func (p *Payload) AlertBody(body string) *Payload { + p.aps().alert().Body = body + return p +} + +// AlertLaunchImage sets the aps launch image on the payload. +// This is the filename of an image file in the app bundle. The image is used +// as the launch image when users tap the action button or move the action +// slider. +// +// {"aps":{"alert":{"launch-image":image}}} +func (p *Payload) AlertLaunchImage(image string) *Payload { + p.aps().alert().LaunchImage = image + return p +} + +// AlertLocArgs sets the aps alert localization args on the payload. +// These are the variable string values to appear in place of the format +// specifiers in loc-key. See Localized Formatted Strings in Apple +// documentation for more information. +// +// {"aps":{"alert":{"loc-args":args}}} +func (p *Payload) AlertLocArgs(args []string) *Payload { + p.aps().alert().LocArgs = args + return p +} + +// AlertLocKey sets the aps alert localization key on the payload. +// This is the key to an alert-message string in the Localizable.strings file +// for the current localization. See Localized Formatted Strings in Apple +// documentation for more information. +// +// {"aps":{"alert":{"loc-key":key}}} +func (p *Payload) AlertLocKey(key string) *Payload { + p.aps().alert().LocKey = key + return p +} + +// AlertAction sets the aps alert action on the payload. +// This is the label of the action button, if the user sets the notifications +// to appear as alerts. This label should be succinct, such as “Details” or +// “Read more”. If omitted, the default value is “Show”. +// +// {"aps":{"alert":{"action":action}}} +func (p *Payload) AlertAction(action string) *Payload { + p.aps().alert().Action = action + return p +} + +// AlertActionLocKey sets the aps alert action localization key on the payload. +// This is the the string used as a key to get a localized string in the current +// localization to use for the notfication right button’s title instead of +// “View”. See Localized Formatted Strings in Apple documentation for more +// information. +// +// {"aps":{"alert":{"action-loc-key":key}}} +func (p *Payload) AlertActionLocKey(key string) *Payload { + p.aps().alert().ActionLocKey = key + return p +} + +// AlertSummaryArg sets the aps alert summary arg key on the payload. +// This is the string that is used as a key to fill in an argument +// at the bottom of a notification to provide more context, such as +// a name associated with the sender of the notification. +// +// {"aps":{"alert":{"summary-arg":key}}} +func (p *Payload) AlertSummaryArg(key string) *Payload { + p.aps().alert().SummaryArg = key + return p +} + +// AlertSummaryArgCount sets the aps alert summary arg count key on the payload. +// This integer sets a custom "weight" on the notification, effectively +// allowing a notification to be viewed internally as two. For example if +// a notification encompasses 3 messages, you can set it to 3. +// +// {"aps":{"alert":{"summary-arg-count":key}}} +func (p *Payload) AlertSummaryArgCount(key int) *Payload { + p.aps().alert().SummaryArgCount = key + return p +} + +// General + +// Category sets the aps category on the payload. +// This is a string value that represents the identifier property of the +// UIMutableUserNotificationCategory object you created to define custom actions. +// +// {"aps":{"category":category}} +func (p *Payload) Category(category string) *Payload { + p.aps().Category = category + return p +} + +// Mdm sets the mdm on the payload. +// This is for Apple Mobile Device Management (mdm) payloads. +// +// {"aps":{}:"mdm":mdm} +func (p *Payload) Mdm(mdm string) *Payload { + p.content["mdm"] = mdm + return p +} + +// ThreadID sets the aps thread id on the payload. +// This is for the purpose of updating the contents of a View Controller in a +// Notification Content app extension when a new notification arrives. If a +// new notification arrives whose thread-id value matches the thread-id of the +// notification already being displayed, the didReceiveNotification method +// is called. +// +// {"aps":{"thread-id":id}} +func (p *Payload) ThreadID(threadID string) *Payload { + p.aps().ThreadID = threadID + return p +} + +// URLArgs sets the aps category on the payload. +// This specifies an array of values that are paired with the placeholders +// inside the urlFormatString value of your website.json file. +// See Apple Notification Programming Guide for Websites. +// +// {"aps":{"url-args":urlArgs}} +func (p *Payload) URLArgs(urlArgs []string) *Payload { + p.aps().URLArgs = urlArgs + return p +} + +// SoundName sets the name value on the aps sound dictionary. +// This function makes the notification a critical alert, which should be pre-approved by Apple. +// See: https://developer.apple.com/contact/request/notifications-critical-alerts-entitlement/ +// +// {"aps":{"sound":{"critical":1,"name":name,"volume":1.0}}} +func (p *Payload) SoundName(name string) *Payload { + p.aps().sound().Name = name + return p +} + +// SoundVolume sets the volume value on the aps sound dictionary. +// This function makes the notification a critical alert, which should be pre-approved by Apple. +// See: https://developer.apple.com/contact/request/notifications-critical-alerts-entitlement/ +// +// {"aps":{"sound":{"critical":1,"name":"default","volume":volume}}} +func (p *Payload) SoundVolume(volume float32) *Payload { + p.aps().sound().Volume = volume + return p +} + +// InterruptionLevel defines the value for the payload aps interruption-level +// This is to indicate the importance and delivery timing of a notification. +// (Using InterruptionLevelCritical requires an approved entitlement from Apple.) +// See: https://developer.apple.com/documentation/usernotifications/unnotificationinterruptionlevel/ +// +// {"aps":{"interruption-level":passive}} +func (p *Payload) InterruptionLevel(interruptionLevel EInterruptionLevel) *Payload { + p.aps().InterruptionLevel = interruptionLevel + return p +} + +// The relevance score, a number between 0 and 1, +// that the system uses to sort the notifications from your app. +// The highest score gets featured in the notification summary. +// See https://developer.apple.com/documentation/usernotifications/unnotificationcontent/3821031-relevancescore. +// +// {"aps":{"relevance-score":0.1}} +func (p *Payload) RelevanceScore(b float32) *Payload { + p.aps().RelevanceScore = b + return p +} + +// Unsets the relevance score +// that the system uses to sort the notifications from your app. +// The highest score gets featured in the notification summary. +// See https://developer.apple.com/documentation/usernotifications/unnotificationcontent/3821031-relevancescore. +// +// {"aps":{"relevance-score":0.1}} +func (p *Payload) UnsetRelevanceScore() *Payload { + p.aps().RelevanceScore = nil + return p +} + +// MarshalJSON returns the JSON encoded version of the Payload +func (p *Payload) MarshalJSON() ([]byte, error) { + return json.Marshal(p.content) +} + +func (p *Payload) aps() *aps { + return p.content["aps"].(*aps) +} + +func (a *aps) alert() *alert { + if _, ok := a.Alert.(*alert); !ok { + a.Alert = &alert{} + } + return a.Alert.(*alert) +} + +func (a *aps) sound() *sound { + if _, ok := a.Sound.(*sound); !ok { + a.Sound = &sound{Critical: 1, Name: "default", Volume: 1.0} + } + return a.Sound.(*sound) +} diff --git a/vendor/github.com/sideshow/apns2/response.go b/vendor/github.com/sideshow/apns2/response.go new file mode 100644 index 0000000000..99d6345634 --- /dev/null +++ b/vendor/github.com/sideshow/apns2/response.go @@ -0,0 +1,156 @@ +package apns2 + +import ( + "net/http" + "strconv" + "time" +) + +// StatusSent is a 200 response. +const StatusSent = http.StatusOK + +// The possible Reason error codes returned from APNs. From table 4 in the +// Handling Notification Responses from APNs article +const ( + // 400 The collapse identifier exceeds the maximum allowed size + ReasonBadCollapseID = "BadCollapseId" + + // 400 The specified device token was bad. Verify that the request contains a + // valid token and that the token matches the environment. + ReasonBadDeviceToken = "BadDeviceToken" + + // 400 The apns-expiration value is bad. + ReasonBadExpirationDate = "BadExpirationDate" + + // 400 The apns-id value is bad. + ReasonBadMessageID = "BadMessageId" + + // 400 The apns-priority value is bad. + ReasonBadPriority = "BadPriority" + + // 400 The apns-topic was invalid. + ReasonBadTopic = "BadTopic" + + // 400 The device token does not match the specified topic. + ReasonDeviceTokenNotForTopic = "DeviceTokenNotForTopic" + + // 400 One or more headers were repeated. + ReasonDuplicateHeaders = "DuplicateHeaders" + + // 400 Idle time out. + ReasonIdleTimeout = "IdleTimeout" + + // 400 The apns-push-type value is invalid. + ReasonInvalidPushType = "InvalidPushType" + + // 400 The device token is not specified in the request :path. Verify that the + // :path header contains the device token. + ReasonMissingDeviceToken = "MissingDeviceToken" + + // 400 The apns-topic header of the request was not specified and was + // required. The apns-topic header is mandatory when the client is connected + // using a certificate that supports multiple topics. + ReasonMissingTopic = "MissingTopic" + + // 400 The message payload was empty. + ReasonPayloadEmpty = "PayloadEmpty" + + // 400 Pushing to this topic is not allowed. + ReasonTopicDisallowed = "TopicDisallowed" + + // 403 The certificate was bad. + ReasonBadCertificate = "BadCertificate" + + // 403 The client certificate was for the wrong environment. + ReasonBadCertificateEnvironment = "BadCertificateEnvironment" + + // 403 The provider token is stale and a new token should be generated. + ReasonExpiredProviderToken = "ExpiredProviderToken" + + // 403 The specified action is not allowed. + ReasonForbidden = "Forbidden" + + // 403 The provider token is not valid or the token signature could not be + // verified. + ReasonInvalidProviderToken = "InvalidProviderToken" + + // 403 No provider certificate was used to connect to APNs and Authorization + // header was missing or no provider token was specified. + ReasonMissingProviderToken = "MissingProviderToken" + + // 404 The request contained a bad :path value. + ReasonBadPath = "BadPath" + + // 405 The specified :method was not POST. + ReasonMethodNotAllowed = "MethodNotAllowed" + + // 410 The device token is inactive for the specified topic. + ReasonUnregistered = "Unregistered" + + // 413 The message payload was too large. See Creating the Remote Notification + // Payload in the Apple Local and Remote Notification Programming Guide for + // details on maximum payload size. + ReasonPayloadTooLarge = "PayloadTooLarge" + + // 429 The provider token is being updated too often. + ReasonTooManyProviderTokenUpdates = "TooManyProviderTokenUpdates" + + // 429 Too many requests were made consecutively to the same device token. + ReasonTooManyRequests = "TooManyRequests" + + // 500 An internal server error occurred. + ReasonInternalServerError = "InternalServerError" + + // 503 The service is unavailable. + ReasonServiceUnavailable = "ServiceUnavailable" + + // 503 The server is shutting down. + ReasonShutdown = "Shutdown" +) + +// Response represents a result from the APNs gateway indicating whether a +// notification was accepted or rejected and (if applicable) the metadata +// surrounding the rejection. +type Response struct { + + // The HTTP status code returned by APNs. + // A 200 value indicates that the notification was successfully sent. + // For a list of other possible status codes, see table 6-4 in the Apple Local + // and Remote Notification Programming Guide. + StatusCode int + + // The APNs error string indicating the reason for the notification failure (if + // any). The error code is specified as a string. For a list of possible + // values, see the Reason constants above. + // If the notification was accepted, this value will be "". + Reason string + + // The APNs ApnsID value from the Notification. If you didn't set an ApnsID on the + // Notification, this will be a new unique UUID which has been created by APNs. + ApnsID string + + // If the value of StatusCode is 410, this is the last time at which APNs + // confirmed that the device token was no longer valid for the topic. + Timestamp Time +} + +// Sent returns whether or not the notification was successfully sent. +// This is the same as checking if the StatusCode == 200. +func (c *Response) Sent() bool { + return c.StatusCode == StatusSent +} + +// Time represents a device uninstall time +type Time struct { + time.Time +} + +// UnmarshalJSON converts an epoch date into a Time struct. +func (t *Time) UnmarshalJSON(b []byte) error { + ts, err := strconv.ParseInt(string(b), 10, 64) + if err != nil { + return err + } + t.Time = time.Unix(ts/1000, 0) + return nil +} diff --git a/vendor/github.com/sideshow/apns2/token/token.go b/vendor/github.com/sideshow/apns2/token/token.go new file mode 100644 index 0000000000..26fec563dd --- /dev/null +++ b/vendor/github.com/sideshow/apns2/token/token.go @@ -0,0 +1,107 @@ +package token + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" + "io/ioutil" + "sync" + "time" + + "github.com/golang-jwt/jwt/v4" +) + +const ( + // TokenTimeout is the period of time in seconds that a token is valid for. + // If the timestamp for token issue is not within the last hour, APNs + // rejects subsequent push messages. This is set to under an hour so that + // we generate a new token before the existing one expires. + TokenTimeout = 3000 +) + +// Possible errors when parsing a .p8 file. +var ( + ErrAuthKeyNotPem = errors.New("token: AuthKey must be a valid .p8 PEM file") + ErrAuthKeyNotECDSA = errors.New("token: AuthKey must be of type ecdsa.PrivateKey") + ErrAuthKeyNil = errors.New("token: AuthKey was nil") +) + +// Token represents an Apple Provider Authentication Token (JSON Web Token). +type Token struct { + sync.Mutex + AuthKey *ecdsa.PrivateKey + KeyID string + TeamID string + IssuedAt int64 + Bearer string +} + +// AuthKeyFromFile loads a .p8 certificate from a local file and returns a +// *ecdsa.PrivateKey. +func AuthKeyFromFile(filename string) (*ecdsa.PrivateKey, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return AuthKeyFromBytes(bytes) +} + +// AuthKeyFromBytes loads a .p8 certificate from an in memory byte array and +// returns an *ecdsa.PrivateKey. +func AuthKeyFromBytes(bytes []byte) (*ecdsa.PrivateKey, error) { + block, _ := pem.Decode(bytes) + if block == nil { + return nil, ErrAuthKeyNotPem + } + key, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + if pk, ok := key.(*ecdsa.PrivateKey); ok { + return pk, nil + } + return nil, ErrAuthKeyNotECDSA +} + +// GenerateIfExpired checks to see if the token is about to expire and +// generates a new token. +func (t *Token) GenerateIfExpired() (bearer string) { + t.Lock() + defer t.Unlock() + if t.Expired() { + t.Generate() + } + return t.Bearer +} + +// Expired checks to see if the token has expired. +func (t *Token) Expired() bool { + return time.Now().Unix() >= (t.IssuedAt + TokenTimeout) +} + +// Generate creates a new token. +func (t *Token) Generate() (bool, error) { + if t.AuthKey == nil { + return false, ErrAuthKeyNil + } + issuedAt := time.Now().Unix() + jwtToken := &jwt.Token{ + Header: map[string]interface{}{ + "alg": "ES256", + "kid": t.KeyID, + }, + Claims: jwt.MapClaims{ + "iss": t.TeamID, + "iat": issuedAt, + }, + Method: jwt.SigningMethodES256, + } + bearer, err := jwtToken.SignedString(t.AuthKey) + if err != nil { + return false, err + } + t.IssuedAt = issuedAt + t.Bearer = bearer + return true, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index ad277b460d..005ba037f9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -354,6 +354,11 @@ github.com/rinchsan/device-check-go # github.com/russross/blackfriday/v2 v2.1.0 ## explicit github.com/russross/blackfriday/v2 +# github.com/sideshow/apns2 v0.23.0 +## explicit; go 1.15 +github.com/sideshow/apns2 +github.com/sideshow/apns2/payload +github.com/sideshow/apns2/token # github.com/tidepool-org/clinic/client v0.0.0-20240629034458-1365c8963143 ## explicit; go 1.22 github.com/tidepool-org/clinic/client From 92131073912d60bfc5dddcf2c4ebd60aaf01f123 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Wed, 26 Jun 2024 09:48:04 -0600 Subject: [PATCH 08/15] adapts sarama.Logger to implement log.Logger So that sarama log messages better follow our standards, and will be emitted as JSON when log.Logger is configured for that. Before this change, the sarama logs were printed in plain-text without any of the benefits of the platform log.Logger. BACK-2554 --- data/service/service/standard.go | 21 ++++++++++++------- log/sarama.go | 35 ++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 7 deletions(-) create mode 100644 log/sarama.go diff --git a/data/service/service/standard.go b/data/service/service/standard.go index 80911f4b20..20456e91e0 100644 --- a/data/service/service/standard.go +++ b/data/service/service/standard.go @@ -2,15 +2,12 @@ package service import ( "context" - "log" - "os" - - "github.com/tidepool-org/platform/clinics" "github.com/IBM/sarama" eventsCommon "github.com/tidepool-org/go-common/events" "github.com/tidepool-org/platform/application" + "github.com/tidepool-org/platform/clinics" dataDeduplicatorDeduplicator "github.com/tidepool-org/platform/data/deduplicator/deduplicator" dataDeduplicatorFactory "github.com/tidepool-org/platform/data/deduplicator/factory" dataEvents "github.com/tidepool-org/platform/data/events" @@ -22,7 +19,7 @@ import ( dataStoreMongo "github.com/tidepool-org/platform/data/store/mongo" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/events" - logInternal "github.com/tidepool-org/platform/log" + "github.com/tidepool-org/platform/log" metricClient "github.com/tidepool-org/platform/metric/client" "github.com/tidepool-org/platform/permission" permissionClient "github.com/tidepool-org/platform/permission/client" @@ -87,6 +84,9 @@ func (s *Standard) Initialize(provider application.Provider) error { if err := s.initializeDataSourceClient(); err != nil { return err } + if err := s.initializeSaramaLogger(); err != nil { + return err + } if err := s.initializeUserEventsHandler(); err != nil { return err } @@ -406,9 +406,8 @@ func (s *Standard) initializeServer() error { func (s *Standard) initializeUserEventsHandler() error { s.Logger().Debug("Initializing user events handler") - sarama.Logger = log.New(os.Stdout, "SARAMA ", log.LstdFlags|log.Lshortfile) - ctx := logInternal.NewContextWithLogger(context.Background(), s.Logger()) + ctx := log.NewContextWithLogger(context.Background(), s.Logger()) handler := dataEvents.NewUserDataDeletionHandler(ctx, s.dataStore, s.dataSourceStructuredStore) handlers := []eventsCommon.EventHandler{handler} runner := events.NewRunner(handlers) @@ -419,3 +418,11 @@ func (s *Standard) initializeUserEventsHandler() error { return nil } + +func (s *Standard) initializeSaramaLogger() error { + // Multiple properties of Standard use the sarama package. This is + // intended to be the one place that the sarama Logger is initialized, + // before any of the properties that need it are run. + sarama.Logger = log.NewSarama(s.Logger()) + return nil +} diff --git a/log/sarama.go b/log/sarama.go new file mode 100644 index 0000000000..d09576c5a6 --- /dev/null +++ b/log/sarama.go @@ -0,0 +1,35 @@ +package log + +import ( + "fmt" + "strings" + + "github.com/IBM/sarama" +) + +// NewSarama returns a [Logger] adapted to implement [sarama.StdLogger]. +func NewSarama(l Logger) sarama.StdLogger { + return &SaramaLogger{Logger: l.WithField("SARAMA", "1")} +} + +// SaramaLogger wraps a [Logger] to implement [sarama.StdLogger]. +// +// Sarama doesn't support the concept of logging levels, so all messages will +// use the info level. +type SaramaLogger struct { + Logger +} + +func (l *SaramaLogger) Print(args ...interface{}) { + l.Logger.Info(fmt.Sprint(args...)) +} + +func (l *SaramaLogger) Printf(format string, args ...interface{}) { + // Sarama log messages sent via this method include a newline, which + // doesn't fit with Logger's style, so remove it. + l.Logger.Infof(strings.TrimSuffix(format, "\n"), args...) +} + +func (l *SaramaLogger) Println(args ...interface{}) { + l.Logger.Info(fmt.Sprint(args...)) +} From 2deaa4721893ed0bebe0e4ebd84ad0a8cac4d0b9 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Wed, 8 May 2024 14:34:37 -0600 Subject: [PATCH 09/15] adapts go-common's asyncevents.SaramaEventsConsumer for alerts The existing FaultTolerantConsumer isn't used because it's retry semantics are hard-wired and aren't compatible with what care partner alerting's needs. Note: A proper implementation of AlertsEventsConsumer to consume events is yet to be written. It will follow shortly. BACK-2554 --- data/events/events.go | 139 +++++++++++ data/events/events_suite_test.go | 34 +++ data/events/events_test.go | 163 ++++++++++++ go.mod | 2 +- go.sum | 4 +- log/gocommon_adapter.go | 54 ++++ .../go-common/asyncevents/sarama.go | 235 ++++++++++++++++++ .../tidepool-org/go-common/events/config.go | 25 +- vendor/modules.txt | 3 +- 9 files changed, 644 insertions(+), 15 deletions(-) create mode 100644 data/events/events_suite_test.go create mode 100644 data/events/events_test.go create mode 100644 log/gocommon_adapter.go create mode 100644 vendor/github.com/tidepool-org/go-common/asyncevents/sarama.go diff --git a/data/events/events.go b/data/events/events.go index 3e41a0630d..10f9a664f9 100644 --- a/data/events/events.go +++ b/data/events/events.go @@ -2,7 +2,14 @@ package events import ( "context" + "fmt" + "log/slog" + "sync" + "time" + "github.com/IBM/sarama" + + "github.com/tidepool-org/go-common/asyncevents" ev "github.com/tidepool-org/go-common/events" dataSourceStoreStructured "github.com/tidepool-org/platform/data/source/store/structured" @@ -58,3 +65,135 @@ func (u *userDeletionEventsHandler) HandleDeleteUserEvent(payload ev.DeleteUserE } return nil } + +// AlertsEventRetryDelayMaximum is the maximum delay between consumption +// retries. +const AlertsEventRetryDelayMaximum = time.Minute + +// AlertsEventRetries is the maximum consumption attempts before giving up. +const AlertsEventRetries = 1000 + +// AlertsEventConsumptionTimeout is the maximum time to process an alerts event. +const AlertsEventConsumptionTimeout = 30 * time.Second + +// SaramaRunner interfaces between events.Runner and go-common's +// asyncevents.SaramaEventsConsumer. +type SaramaRunner struct { + EventsRunner SaramaEventsRunner + Config SaramaRunnerConfig + cancelCtx context.CancelFunc + cancelMu sync.Mutex +} + +// SaramaEventsRunner is implemented by go-common's +// asyncevents.SaramaEventsRunner. +type SaramaEventsRunner interface { + Run(ctx context.Context) error +} + +// SaramaRunnerConfig collects values needed to initialize a SaramaRunner. +// +// This provides isolation for the SaramaRunner from ConfigReporter, +// envconfig, or any of the other options in platform for reading config +// values. +type SaramaRunnerConfig struct { + Brokers []string + GroupID string + Logger log.Logger + Topics []string + MessageConsumer asyncevents.SaramaMessageConsumer + + Sarama *sarama.Config +} + +func (r *SaramaRunner) Initialize() error { + group, err := sarama.NewConsumerGroup(r.Config.Brokers, r.Config.GroupID, r.Config.Sarama) + if err != nil { + return errors.Wrap(err, "Unable to build sarama consumer group") + } + handler := asyncevents.NewSaramaConsumerGroupHandler(&asyncevents.NTimesRetryingConsumer{ + Consumer: r.Config.MessageConsumer, + Delay: CappedExponentialBinaryDelay(AlertsEventRetryDelayMaximum), + Times: AlertsEventRetries, + Logger: r.logger, + }, AlertsEventConsumptionTimeout) + r.EventsRunner = asyncevents.NewSaramaEventsConsumer(group, handler, r.Config.Topics...) + return nil +} + +func (r *SaramaRunner) logger(ctx context.Context) asyncevents.Logger { + // Prefer a logger from the context. + if ctxLogger := log.LoggerFromContext(ctx); ctxLogger != nil { + return &log.GoCommonAdapter{Logger: ctxLogger} + } + if r.Config.Logger != nil { + return &log.GoCommonAdapter{Logger: r.Config.Logger} + } + // No known log.Logger could be found, default to slog. + return slog.Default() +} + +// Run adapts platform's event.Runner to work with go-common's +// asyncevents.SaramaEventsConsumer. +func (r *SaramaRunner) Run() error { + if r.EventsRunner == nil { + return errors.New("Unable to run SaramaRunner, EventsRunner is nil") + } + + r.cancelMu.Lock() + ctx, err := func() (context.Context, error) { + defer r.cancelMu.Unlock() + if r.cancelCtx != nil { + return nil, errors.New("Unable to Run SaramaRunner, it's already initialized") + } + var ctx context.Context + ctx, r.cancelCtx = context.WithCancel(context.Background()) + return ctx, nil + }() + if err != nil { + return err + } + if err := r.EventsRunner.Run(ctx); err != nil { + return errors.Wrap(err, "Unable to Run SaramaRunner") + } + return nil +} + +// Terminate adapts platform's event.Runner to work with go-common's +// asyncevents.SaramaEventsConsumer. +func (r *SaramaRunner) Terminate() error { + r.cancelMu.Lock() + defer r.cancelMu.Unlock() + if r.cancelCtx == nil { + return errors.New("Unable to Terminate SaramaRunner, it's not running") + } + r.cancelCtx() + return nil +} + +// CappedExponentialBinaryDelay builds delay functions that use exponential +// binary backoff with a maximum duration. +func CappedExponentialBinaryDelay(cap time.Duration) func(int) time.Duration { + return func(tries int) time.Duration { + b := asyncevents.DelayExponentialBinary(tries) + if b > cap { + return cap + } + return b + } +} + +// TODO: implement me!! +type AlertsEventsConsumer struct { + Consumer asyncevents.SaramaMessageConsumer +} + +func (c *AlertsEventsConsumer) Consume(ctx context.Context, + session sarama.ConsumerGroupSession, message *sarama.ConsumerMessage) error { + err := c.Consumer.Consume(ctx, session, message) + if err != nil { + session.MarkMessage(message, fmt.Sprintf("I have given up after error: %s", err)) + return err + } + return nil +} diff --git a/data/events/events_suite_test.go b/data/events/events_suite_test.go new file mode 100644 index 0000000000..4bab08b129 --- /dev/null +++ b/data/events/events_suite_test.go @@ -0,0 +1,34 @@ +package events + +import ( + "log/slog" + "os" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/tidepool-org/platform/test" +) + +func TestSuite(t *testing.T) { + test.Test(t) +} + +var _ = BeforeSuite(func() { + slog.SetDefault(devNullSlogLogger(GinkgoT())) +}) + +// Cleaner is part of testing.T and FullGinkgoTInterface +type Cleaner interface { + Cleanup(func()) +} + +func devNullSlogLogger(c Cleaner) *slog.Logger { + f, err := os.Open(os.DevNull) + Expect(err).To(Succeed()) + c.Cleanup(func() { + Expect(f.Close()).To(Succeed()) + }) + return slog.New(slog.NewTextHandler(f, nil)) +} diff --git a/data/events/events_test.go b/data/events/events_test.go new file mode 100644 index 0000000000..492a059376 --- /dev/null +++ b/data/events/events_test.go @@ -0,0 +1,163 @@ +package events + +import ( + "context" + "log/slog" + "sync" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/tidepool-org/platform/log" + "github.com/tidepool-org/platform/log/test" +) + +var _ = Describe("SaramaRunner", func() { + Context("has a lifecycle", func() { + newTestRunner := func() *SaramaRunner { + return &SaramaRunner{ + Config: SaramaRunnerConfig{}, + EventsRunner: &mockEventsRunner{}, + } + } + It("starts with Run() and stops with Terminate()", func() { + r := newTestRunner() + var runErr error + var errMu sync.Mutex + launched := make(chan struct{}, 1) + go func() { + errMu.Lock() + func() { + defer errMu.Unlock() + runErr = r.Run() + launched <- struct{}{} + }() + }() + <-launched + time.Sleep(time.Millisecond) + errMu.Lock() + defer errMu.Unlock() + + Expect(r.Terminate()).To(Succeed()) + Eventually(runErr).WithTimeout(10 * time.Millisecond).Should(Succeed()) + }) + + Describe("Run()", func() { + var errMu sync.Mutex + + It("can be started only once", func() { + r := newTestRunner() + var firstRunErr, secondRunErr error + launched := make(chan struct{}, 2) + go func() { + errMu.Lock() + func() { + defer errMu.Unlock() + firstRunErr = r.Run() + launched <- struct{}{} + }() + }() + go func() { + errMu.Lock() + func() { + defer errMu.Unlock() + secondRunErr = r.Run() + launched <- struct{}{} + }() + + }() + <-launched + <-launched + errMu.Lock() + defer errMu.Unlock() + + // The above doesn't _guarantee_ that Run has been called twice, + // but... it should work. + + Expect(r.Terminate()).To(Succeed()) + if firstRunErr != nil { + Expect(firstRunErr).To(MatchError(ContainSubstring("it's already initialized"))) + Expect(secondRunErr).To(Succeed()) + } else { + Expect(firstRunErr).To(Succeed()) + Expect(secondRunErr).To(MatchError(ContainSubstring("it's already initialized"))) + } + }) + + It("can't be Terminate()'d before it's Run()", func() { + r := newTestRunner() + Expect(r.Terminate()).To(MatchError(ContainSubstring("it's not running"))) + }) + }) + }) + + Describe("logger", func() { + It("prefers a context's logger", func() { + testLogger := test.NewLogger() + ctxLogger := test.NewLogger() + r := &SaramaRunner{ + Config: SaramaRunnerConfig{Logger: testLogger}, + } + + ctx := log.NewContextWithLogger(context.Background(), ctxLogger) + got := r.logger(ctx) + + goCommonLogger, ok := got.(*log.GoCommonAdapter) + Expect(ok).To(BeTrue()) + Expect(goCommonLogger.Logger).To(Equal(ctxLogger)) + }) + + Context("without a context logger", func() { + It("uses the configured logger", func() { + testLogger := test.NewLogger() + r := &SaramaRunner{ + Config: SaramaRunnerConfig{ + Logger: testLogger, + }, + } + + got := r.logger(context.Background()) + + goCommonLogger, ok := got.(*log.GoCommonAdapter) + Expect(ok).To(BeTrue()) + Expect(goCommonLogger.Logger).To(Equal(testLogger)) + }) + + Context("or any configured logger", func() { + It("doesn't panic", func() { + r := &SaramaRunner{Config: SaramaRunnerConfig{}} + ctx := context.Background() + got := r.logger(ctx) + + Expect(func() { + got.Log(ctx, slog.LevelInfo, "testing") + }).ToNot(Panic()) + }) + }) + }) + }) + + DescribeTable("CappedExponentialBinaryDelay", + func(cap time.Duration, input int, output time.Duration) { + f := CappedExponentialBinaryDelay(cap) + Expect(f(input)).To(Equal(output)) + }, + Entry("cap: 1m; tries: 0", time.Minute, 0, time.Second), + Entry("cap: 1m; tries: 1", time.Minute, 1, 2*time.Second), + Entry("cap: 1m; tries: 2", time.Minute, 2, 4*time.Second), + Entry("cap: 1m; tries: 3", time.Minute, 3, 8*time.Second), + Entry("cap: 1m; tries: 4", time.Minute, 4, 16*time.Second), + Entry("cap: 1m; tries: 5", time.Minute, 5, 32*time.Second), + Entry("cap: 1m; tries: 6", time.Minute, 6, time.Minute), + Entry("cap: 1m; tries: 20", time.Minute, 20, time.Minute), + ) +}) + +type mockEventsRunner struct { + Err error +} + +func (r *mockEventsRunner) Run(ctx context.Context) error { + return r.Err +} diff --git a/go.mod b/go.mod index 732d4c5ce7..00a0932514 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/sideshow/apns2 v0.23.0 github.com/tidepool-org/clinic/client v0.0.0-20240629034458-1365c8963143 github.com/tidepool-org/devices/api v0.0.0-20240412011010-75b16d8daec0 - github.com/tidepool-org/go-common v0.12.2-0.20240612192926-de6d5c5a742c + github.com/tidepool-org/go-common v0.12.2-0.20240711192928-70d1d0216072 github.com/tidepool-org/hydrophone/client v0.0.0-20240613043503-6c32828b1ace github.com/urfave/cli v1.22.15 go.mongodb.org/mongo-driver v1.16.0 diff --git a/go.sum b/go.sum index a58a9f5d2c..137f60d3cd 100644 --- a/go.sum +++ b/go.sum @@ -183,8 +183,8 @@ github.com/tidepool-org/clinic/client v0.0.0-20240629034458-1365c8963143 h1:A6qF github.com/tidepool-org/clinic/client v0.0.0-20240629034458-1365c8963143/go.mod h1:7BpAdFdGJNB3aw/xvCz5XnWjSWRoUtWIX4xcMc4Bsko= github.com/tidepool-org/devices/api v0.0.0-20240412011010-75b16d8daec0 h1:SnIsHwaJE2kltz9xMfjC+L9SWhQS3w6t+mSIIoFiwag= github.com/tidepool-org/devices/api v0.0.0-20240412011010-75b16d8daec0/go.mod h1:pu8FmuhxtBbOyswB94Mr15l2heCxY357lVHOVVAEZq8= -github.com/tidepool-org/go-common v0.12.2-0.20240612192926-de6d5c5a742c h1:hJZyiHNGeqyLA/5p60/0H9CZtJi4fAuzOuyQF0TpF7E= -github.com/tidepool-org/go-common v0.12.2-0.20240612192926-de6d5c5a742c/go.mod h1:mIzYteUyPf//fhee4e2KEZhmcm2iE4IQ/2dyQr5pRKA= +github.com/tidepool-org/go-common v0.12.2-0.20240711192928-70d1d0216072 h1:wJqm4BwEHGQkY+0LVcnvxKeW9CKpQ3FGvYHwaQMXlCY= +github.com/tidepool-org/go-common v0.12.2-0.20240711192928-70d1d0216072/go.mod h1:mIzYteUyPf//fhee4e2KEZhmcm2iE4IQ/2dyQr5pRKA= github.com/tidepool-org/hydrophone/client v0.0.0-20240613043503-6c32828b1ace h1:L0UiCj2eL/NOpLa19Tf5IgoK6feILmdA+zK3nCTIhqU= github.com/tidepool-org/hydrophone/client v0.0.0-20240613043503-6c32828b1ace/go.mod h1:gon+x+jAh8DZZ2hD23fBWqrYwOizVSwIBbxEsuXCbZ4= github.com/urfave/cli v1.22.15 h1:nuqt+pdC/KqswQKhETJjo7pvn/k4xMUxgW6liI7XpnM= diff --git a/log/gocommon_adapter.go b/log/gocommon_adapter.go new file mode 100644 index 0000000000..14a35cbfaa --- /dev/null +++ b/log/gocommon_adapter.go @@ -0,0 +1,54 @@ +package log + +import ( + "context" + "fmt" + "log/slog" +) + +// GoCommonAdapter implements gocommon's asyncevents.Logger interface. +// +// It adapts a Logger for the purpose. +type GoCommonAdapter struct { + Logger Logger +} + +func (a *GoCommonAdapter) Log(ctx context.Context, level slog.Level, msg string, args ...any) { + logger := a.Logger + if fields := a.fieldsFromArgs(args); len(fields) > 0 { + logger = logger.WithFields(fields) + } + logger.Log(SlogLevelToLevel[level], msg) +} + +// fieldsFromArgs builds a Fields following the same rules as slog.Log. +// +// As Fields is a map instead of a slice, !BADKEY becomes !BADKEY[x] where +// x is the index counter of the value. See the godoc for slog.Log for +// details. +func (a *GoCommonAdapter) fieldsFromArgs(args []any) Fields { + fields := Fields{} + for i := 0; i < len(args); i++ { + switch v := args[i].(type) { + case slog.Attr: + fields[v.Key] = v.Value + case string: + if i+1 < len(args) { + fields[v] = args[i+1] + i++ + } else { + fields[fmt.Sprintf("!BADKEY[%d]", i)] = v + } + default: + fields[fmt.Sprintf("!BADKEY[%d]", i)] = v + } + } + return fields +} + +var SlogLevelToLevel = map[slog.Level]Level{ + slog.LevelDebug: DebugLevel, + slog.LevelInfo: InfoLevel, + slog.LevelWarn: WarnLevel, + slog.LevelError: ErrorLevel, +} diff --git a/vendor/github.com/tidepool-org/go-common/asyncevents/sarama.go b/vendor/github.com/tidepool-org/go-common/asyncevents/sarama.go new file mode 100644 index 0000000000..94d4f5c618 --- /dev/null +++ b/vendor/github.com/tidepool-org/go-common/asyncevents/sarama.go @@ -0,0 +1,235 @@ +package asyncevents + +import ( + "context" + "errors" + "fmt" + "log" + "log/slog" + "math" + "time" + + "github.com/IBM/sarama" +) + +// SaramaEventsConsumer consumes Kafka messages for asynchronous event +// handling. +type SaramaEventsConsumer struct { + Handler sarama.ConsumerGroupHandler + ConsumerGroup sarama.ConsumerGroup + Topics []string +} + +func NewSaramaEventsConsumer(consumerGroup sarama.ConsumerGroup, + handler sarama.ConsumerGroupHandler, topics ...string) *SaramaEventsConsumer { + + return &SaramaEventsConsumer{ + ConsumerGroup: consumerGroup, + Handler: handler, + Topics: topics, + } +} + +// Run the consumer, to begin consuming Kafka messages. +// +// Run is stopped by its context being canceled. When its context is canceled, +// it returns nil. +func (p *SaramaEventsConsumer) Run(ctx context.Context) (err error) { + defer canceledContextReturnsNil(&err) + + for { + err := p.ConsumerGroup.Consume(ctx, p.Topics, p.Handler) + if err != nil { + return err + } + if ctxErr := ctx.Err(); ctxErr != nil { + return ctxErr + } + } +} + +// canceledContextReturnsNil checks for a context.Canceled error, and when +// found, returns nil instead. +// +// It is meant to be called via defer. +func canceledContextReturnsNil(err *error) { + if err != nil && errors.Is(*err, context.Canceled) { + *err = nil + } +} + +// SaramaConsumerGroupHandler implements sarama.ConsumerGroupHandler. +type SaramaConsumerGroupHandler struct { + Consumer SaramaMessageConsumer + ConsumerTimeout time.Duration +} + +// NewSaramaConsumerGroupHandler builds a consumer group handler. +// +// A timeout of 0 will use DefaultMessageConsumptionTimeout. +func NewSaramaConsumerGroupHandler(consumer SaramaMessageConsumer, timeout time.Duration) *SaramaConsumerGroupHandler { + if timeout == 0 { + timeout = DefaultMessageConsumptionTimeout + } + return &SaramaConsumerGroupHandler{ + Consumer: consumer, + ConsumerTimeout: timeout, + } +} + +const ( + // DefaultMessageConsumptionTimeout is the default time to allow + // SaramaMessageConsumer.Consume to work before canceling. + DefaultMessageConsumptionTimeout = 30 * time.Second +) + +// Setup implements sarama.ConsumerGroupHandler. +func (h *SaramaConsumerGroupHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil } + +// Cleanup implements sarama.ConsumerGroupHandler. +func (h *SaramaConsumerGroupHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil } + +// ConsumeClaim implements sarama.ConsumerGroupHandler. +func (h *SaramaConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, + claim sarama.ConsumerGroupClaim) error { + + for message := range claim.Messages() { + err := func() error { + ctx, cancel := context.WithTimeout(context.Background(), h.ConsumerTimeout) + defer cancel() + return h.Consumer.Consume(ctx, session, message) + }() + switch { + case errors.Is(err, context.DeadlineExceeded): + log.Print(err) + case !errors.Is(err, nil): + return err + } + } + return nil +} + +// Close implements sarama.ConsumerGroupHandler. +func (h *SaramaConsumerGroupHandler) Close() error { return nil } + +// SaramaMessageConsumer processes Kafka messages. +type SaramaMessageConsumer interface { + // Consume should process a message. + // + // Consume is responsible for marking the message consumed, unless the + // context is canceled, in which case the caller should retry, or mark the + // message as appropriate. + Consume(ctx context.Context, session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) error +} + +var ErrRetriesLimitExceeded = errors.New("retry limit exceeded") + +// NTimesRetryingConsumer enhances a SaramaMessageConsumer with a finite +// number of immediate retries. +// +// The delay between each retry can be controlled via the Delay property. If +// no Delay property is specified, a delay based on the Fibonacci sequence is +// used. +// +// Logger is intentionally minimal. The slog.Log function is used by default. +type NTimesRetryingConsumer struct { + Times int + Consumer SaramaMessageConsumer + Delay func(tries int) time.Duration + Logger func(ctx context.Context) Logger +} + +// Logger is an intentionally minimal interface for basic logging. +// +// It matches the signature of slog.Log. +type Logger interface { + Log(ctx context.Context, level slog.Level, msg string, args ...any) +} + +func (c *NTimesRetryingConsumer) Consume(ctx context.Context, + session sarama.ConsumerGroupSession, message *sarama.ConsumerMessage) error { + + var joinedErrors error + var tries int = 0 + var delay time.Duration = 0 + if c.Delay == nil { + c.Delay = DelayFibonacci + } + if c.Logger == nil { + c.Logger = func(_ context.Context) Logger { return slog.Default() } + } + logger := c.Logger(ctx) + done := ctx.Done() + for tries < c.Times { + select { + case <-done: + if ctxErr := ctx.Err(); ctxErr != nil { + return ctxErr + } + return nil + case <-time.After(delay): + err := c.Consumer.Consume(ctx, session, message) + if err == nil { + return nil + } + if c.isContextErr(err) { + return err + } else if errors.Is(err, nil) { + return nil + } + delay = c.Delay(tries) + logger.Log(ctx, slog.LevelInfo, "failure consuming Kafka message, will retry", + slog.Attr{Key: "tries", Value: slog.IntValue(tries)}, + slog.Attr{Key: "times", Value: slog.IntValue(c.Times)}, + slog.Attr{Key: "delay", Value: slog.DurationValue(delay)}, + slog.Attr{Key: "err", Value: slog.AnyValue(err)}, + ) + joinedErrors = errors.Join(joinedErrors, err) + tries++ + } + } + + return errors.Join(joinedErrors, c.retryLimitError()) +} + +func (c *NTimesRetryingConsumer) isContextErr(err error) bool { + return errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) +} + +func (c *NTimesRetryingConsumer) retryLimitError() error { + return fmt.Errorf("%w (%d)", ErrRetriesLimitExceeded, c.Times) +} + +// DelayNone is a function returning a constant "no delay" of 0 seconds. +var DelayNone = func(_ int) time.Duration { return DelayConstant(0) } + +// DelayConstant is a function returning a constant number of seconds. +func DelayConstant(n int) time.Duration { return time.Duration(n) * time.Second } + +// DelayExponentialBinary returns a binary exponential delay. +// +// The delay is 2**tries seconds. +func DelayExponentialBinary(tries int) time.Duration { + return time.Second * time.Duration(math.Pow(2, float64(tries))) +} + +// DelayFibonacci returns a delay based on the Fibonacci sequence. +func DelayFibonacci(tries int) time.Duration { + return time.Second * time.Duration(Fib(tries)) +} + +// Fib returns the nth number in the Fibonacci sequence. +func Fib(n int) int { + if n == 0 { + return 0 + } else if n < 3 { + return 1 + } + + n1, n2 := 1, 1 + for i := 3; i <= n; i++ { + n1, n2 = n1+n2, n1 + } + + return n1 +} diff --git a/vendor/github.com/tidepool-org/go-common/events/config.go b/vendor/github.com/tidepool-org/go-common/events/config.go index a07d70ed6e..c486f9c8ee 100644 --- a/vendor/github.com/tidepool-org/go-common/events/config.go +++ b/vendor/github.com/tidepool-org/go-common/events/config.go @@ -2,6 +2,7 @@ package events import ( "errors" + "github.com/IBM/sarama" "github.com/kelseyhightower/envconfig" ) @@ -9,17 +10,19 @@ import ( const DeadLetterSuffix = "-dead-letters" type CloudEventsConfig struct { - EventSource string `envconfig:"CLOUD_EVENTS_SOURCE" required:"false"` - KafkaBrokers []string `envconfig:"KAFKA_BROKERS" required:"true"` - KafkaConsumerGroup string `envconfig:"KAFKA_CONSUMER_GROUP" required:"false"` - KafkaTopic string `envconfig:"KAFKA_TOPIC" default:"events"` - KafkaDeadLettersTopic string `envconfig:"KAFKA_DEAD_LETTERS_TOPIC"` - KafkaTopicPrefix string `envconfig:"KAFKA_TOPIC_PREFIX" required:"true"` - KafkaRequireSSL bool `envconfig:"KAFKA_REQUIRE_SSL" required:"true"` - KafkaVersion string `envconfig:"KAFKA_VERSION" required:"true"` - KafkaUsername string `envconfig:"KAFKA_USERNAME" required:"false"` - KafkaPassword string `envconfig:"KAFKA_PASSWORD" required:"false"` - SaramaConfig *sarama.Config + EventSource string `envconfig:"CLOUD_EVENTS_SOURCE" required:"false"` + KafkaBrokers []string `envconfig:"KAFKA_BROKERS" required:"true"` + KafkaConsumerGroup string `envconfig:"KAFKA_CONSUMER_GROUP" required:"false"` + KafkaTopic string `envconfig:"KAFKA_TOPIC" default:"events"` + KafkaDeadLettersTopic string `envconfig:"KAFKA_DEAD_LETTERS_TOPIC"` + KafkaTopicPrefix string `envconfig:"KAFKA_TOPIC_PREFIX" required:"true"` + KafkaRequireSSL bool `envconfig:"KAFKA_REQUIRE_SSL" required:"true"` + KafkaVersion string `envconfig:"KAFKA_VERSION" required:"true"` + KafkaUsername string `envconfig:"KAFKA_USERNAME" required:"false"` + KafkaPassword string `envconfig:"KAFKA_PASSWORD" required:"false"` + KafkaAlertsConfigTopics []string `envconfig:"KAFKA_ALERTS_CONFIG_TOPICS" default:"alerts"` + KafkaAlertsDataTopics []string `envconfig:"KAFKA_DATA_ALERTS_TOPICS" default:"data.alerts"` + SaramaConfig *sarama.Config } func NewConfig() *CloudEventsConfig { diff --git a/vendor/modules.txt b/vendor/modules.txt index 005ba037f9..79a5d3471e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -365,8 +365,9 @@ github.com/tidepool-org/clinic/client # github.com/tidepool-org/devices/api v0.0.0-20240412011010-75b16d8daec0 ## explicit; go 1.22 github.com/tidepool-org/devices/api -# github.com/tidepool-org/go-common v0.12.2-0.20240612192926-de6d5c5a742c +# github.com/tidepool-org/go-common v0.12.2-0.20240711192928-70d1d0216072 ## explicit; go 1.22 +github.com/tidepool-org/go-common/asyncevents github.com/tidepool-org/go-common/clients github.com/tidepool-org/go-common/clients/disc github.com/tidepool-org/go-common/clients/hakken From a31dd5c965bc237e7f03ddb74a60809cf21c4413 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Tue, 2 Jul 2024 13:14:29 -0600 Subject: [PATCH 10/15] allow invites to set an upload id The upload id is necessary to ensure that only the proper device data uploads are evaluated for care partner alert conditions. BACK-2554 --- data/service/api/v1/alerts.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/data/service/api/v1/alerts.go b/data/service/api/v1/alerts.go index 70941b9e20..a0aa2a354e 100644 --- a/data/service/api/v1/alerts.go +++ b/data/service/api/v1/alerts.go @@ -115,8 +115,13 @@ func UpsertAlert(dCtx service.Context) { return } - a := &alerts.Alerts{} - if err := request.DecodeRequestBody(r.Request, a); err != nil { + incomingCfg := &alerts.Config{} + var bodyReceiver interface{} = &incomingCfg.Alerts + if authDetails.IsService() && authDetails.UserID() == "" { + // Accept upload id only from services. + bodyReceiver = incomingCfg + } + if err := request.DecodeRequestBody(r.Request, bodyReceiver); err != nil { dCtx.RespondWithError(platform.ErrorJSONMalformed()) return } @@ -127,7 +132,12 @@ func UpsertAlert(dCtx service.Context) { return } - cfg := &alerts.Config{UserID: path.UserID, FollowedUserID: path.FollowedUserID, Alerts: *a} + cfg := &alerts.Config{ + UserID: path.UserID, + FollowedUserID: path.FollowedUserID, + UploadID: incomingCfg.UploadID, + Alerts: incomingCfg.Alerts, + } if err := repo.Upsert(ctx, cfg); err != nil { dCtx.RespondWithError(platform.ErrorInternalServerFailure()) lgr.WithError(err).Error("upserting alerts config") From 13dc4c639e1cc840a903040e4daaea160de55c6f Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Mon, 8 Jul 2024 13:36:56 -0600 Subject: [PATCH 11/15] integrates an APNs pusher into data service If the necessary configuration isn't found, then push notifications will instead be logged. BACK-2554 --- alerts/config.go | 8 +++++++ data/service/service/standard.go | 38 +++++++++++++++++++++++++++++++ push/logpush.go | 39 ++++++++++++++++++++++++++++++++ push/push.go | 28 +++++++++++++++++++++++ 4 files changed, 113 insertions(+) create mode 100644 push/logpush.go diff --git a/alerts/config.go b/alerts/config.go index b6f8334656..9437c6a5ff 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -269,3 +269,11 @@ type Repository interface { EnsureIndexes() error } + +// Note gathers information necessary for sending an alert notification. +type Note struct { + // Message communicates the alert to the recipient. + Message string + RecipientUserID string + FollowedUserID string +} diff --git a/data/service/service/standard.go b/data/service/service/standard.go index 20456e91e0..c08104f24d 100644 --- a/data/service/service/standard.go +++ b/data/service/service/standard.go @@ -4,6 +4,7 @@ import ( "context" "github.com/IBM/sarama" + "github.com/kelseyhightower/envconfig" eventsCommon "github.com/tidepool-org/go-common/events" "github.com/tidepool-org/platform/application" @@ -17,6 +18,7 @@ import ( dataSourceStoreStructured "github.com/tidepool-org/platform/data/source/store/structured" dataSourceStoreStructuredMongo "github.com/tidepool-org/platform/data/source/store/structured/mongo" dataStoreMongo "github.com/tidepool-org/platform/data/store/mongo" + "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/events" "github.com/tidepool-org/platform/log" @@ -24,6 +26,7 @@ import ( "github.com/tidepool-org/platform/permission" permissionClient "github.com/tidepool-org/platform/permission/client" "github.com/tidepool-org/platform/platform" + "github.com/tidepool-org/platform/push" "github.com/tidepool-org/platform/service/server" "github.com/tidepool-org/platform/service/service" storeStructuredMongo "github.com/tidepool-org/platform/store/structured/mongo" @@ -41,6 +44,7 @@ type Standard struct { dataClient *Client clinicsClient *clinics.Client dataSourceClient *dataSourceServiceClient.Client + pusher Pusher userEventsHandler events.Runner api *api.Standard server *server.Standard @@ -87,6 +91,9 @@ func (s *Standard) Initialize(provider application.Provider) error { if err := s.initializeSaramaLogger(); err != nil { return err } + if err := s.initializePusher(); err != nil { + return err + } if err := s.initializeUserEventsHandler(); err != nil { return err } @@ -426,3 +433,34 @@ func (s *Standard) initializeSaramaLogger() error { sarama.Logger = log.NewSarama(s.Logger()) return nil } + +// Pusher is a service-agnostic interface for sending push notifications. +type Pusher interface { + // Push a notification to a device. + Push(context.Context, *devicetokens.DeviceToken, *push.Notification) error +} + +func (s *Standard) initializePusher() error { + var err error + + apns2Config := &struct { + SigningKey []byte `envconfig:"TIDEPOOL_DATA_SERVICE_PUSHER_APNS_SIGNING_KEY"` + KeyID string `envconfig:"TIDEPOOL_DATA_SERVICE_PUSHER_APNS_KEY_ID"` + BundleID string `envconfig:"TIDEPOOL_DATA_SERVICE_PUSHER_APNS_BUNDLE_ID"` + TeamID string `envconfig:"TIDEPOOL_DATA_SERVICE_PUSHER_APNS_TEAM_ID"` + }{} + if err := envconfig.Process("", apns2Config); err != nil { + return errors.Wrap(err, "Unable to process APNs pusher config") + } + + var pusher Pusher + pusher, err = push.NewAPNSPusherFromKeyData(apns2Config.SigningKey, apns2Config.KeyID, + apns2Config.TeamID, apns2Config.BundleID) + if err != nil { + s.Logger().WithError(err).Warn("falling back to logging of push notifications") + pusher = push.NewLogPusher(s.Logger()) + } + s.pusher = pusher + + return nil +} diff --git a/push/logpush.go b/push/logpush.go new file mode 100644 index 0000000000..a313806a87 --- /dev/null +++ b/push/logpush.go @@ -0,0 +1,39 @@ +package push + +import ( + "context" + "os" + + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/log" + logjson "github.com/tidepool-org/platform/log/json" + lognull "github.com/tidepool-org/platform/log/null" +) + +// LogPusher logs notifications instead of sending push notifications. +// +// Useful for dev or testing situations. +type LogPusher struct { + log.Logger +} + +// NewLogPusher uses a [log.Logger] instead of pushing via APNs. +func NewLogPusher(l log.Logger) *LogPusher { + if l == nil { + var err error + l, err = logjson.NewLogger(os.Stderr, log.DefaultLevelRanks(), log.DefaultLevel()) + if err != nil { + l = lognull.NewLogger() + } + } + return &LogPusher{Logger: l} +} + +// Push implements [service.Pusher]. +func (p *LogPusher) Push(ctx context.Context, deviceToken *devicetokens.DeviceToken, note *Notification) error { + p.Logger.WithFields(log.Fields{ + "deviceToken": deviceToken, + "note": note, + }).Info("logging push notification") + return nil +} diff --git a/push/push.go b/push/push.go index 419cd395b3..bca2d45988 100644 --- a/push/push.go +++ b/push/push.go @@ -11,6 +11,7 @@ import ( "github.com/sideshow/apns2/payload" "github.com/sideshow/apns2/token" + "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/log" @@ -21,6 +22,17 @@ type Notification struct { Message string } +// String implements fmt.Stringer. +func (n Notification) String() string { + return n.Message +} + +func FromNote(note *alerts.Note) *Notification { + return &Notification{ + Message: note.Message, + } +} + // APNSPusher implements push notifications via Apple APNs. type APNSPusher struct { BundleID string @@ -47,6 +59,22 @@ func NewAPNSPusher(client APNS2Client, bundleID string) *APNSPusher { // // https://developer.apple.com/documentation/usernotifications/sending-notification-requests-to-apns func NewAPNSPusherFromKeyData(signingKey []byte, keyID, teamID, bundleID string) (*APNSPusher, error) { + if len(signingKey) == 0 { + return nil, errors.New("Unable to build APNSPusher: APNs signing key is blank") + } + + if bundleID == "" { + return nil, errors.New("Unable to build APNSPusher: bundleID is blank") + } + + if keyID == "" { + return nil, errors.New("Unable to build APNSPusher: keyID is blank") + } + + if teamID == "" { + return nil, errors.New("Unable to build APNSPusher: teamID is blank") + } + authKey, err := token.AuthKeyFromBytes(signingKey) if err != nil { return nil, err From b5141dca763f79ea6541bc7a72ebf06342271a7e Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Mon, 8 Jul 2024 13:53:41 -0600 Subject: [PATCH 12/15] adds Evaluate methods to alerts.Config These methods return Note objects that can be sent as push notifications. NotLooping evaluation will be handled in a later commit. BACK-2554 --- alerts/config.go | 270 ++++++++++++++++- alerts/config_test.go | 684 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 933 insertions(+), 21 deletions(-) diff --git a/alerts/config.go b/alerts/config.go index 9437c6a5ff..f26387c1b8 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -6,10 +6,15 @@ import ( "bytes" "context" "encoding/json" + "slices" "time" "github.com/tidepool-org/platform/data" - "github.com/tidepool-org/platform/data/blood/glucose" + nontypesglucose "github.com/tidepool-org/platform/data/blood/glucose" + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/data/types/dosingdecision" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/log" "github.com/tidepool-org/platform/structure" "github.com/tidepool-org/platform/structure/validator" "github.com/tidepool-org/platform/user" @@ -50,6 +55,45 @@ func (c Config) Validate(validator structure.Validator) { c.Alerts.Validate(validator) } +// Evaluate alerts in the context of the provided data. +// +// While this method, or the methods it calls, can fail, there's no point in returning an +// error. Instead errors are logged before continuing. This is to ensure that any possible alert +// that should be triggered, will be triggered. +func (c Config) Evaluate(ctx context.Context, gd []*glucose.Glucose, dd []*dosingdecision.DosingDecision) *Note { + n := c.Alerts.Evaluate(ctx, gd, dd) + if n != nil { + n.FollowedUserID = c.FollowedUserID + n.RecipientUserID = c.UserID + } + if lgr := log.LoggerFromContext(ctx); lgr != nil { + lgr.WithField("note", n).Info("evaluated alert") + } + + return n +} + +// LongestDelay of the delays set on enabled alerts. +func (a Alerts) LongestDelay() time.Duration { + delays := []time.Duration{} + if a.Low != nil && a.Low.Enabled { + delays = append(delays, a.Low.Delay.Duration()) + } + if a.High != nil && a.High.Enabled { + delays = append(delays, a.High.Delay.Duration()) + } + if a.NotLooping != nil && a.NotLooping.Enabled { + delays = append(delays, a.NotLooping.Delay.Duration()) + } + if a.NoCommunication != nil && a.NoCommunication.Enabled { + delays = append(delays, a.NoCommunication.Delay.Duration()) + } + if len(delays) == 0 { + return 0 + } + return slices.Max(delays) +} + func (a Alerts) Validate(validator structure.Validator) { if a.UrgentLow != nil { a.UrgentLow.Validate(validator) @@ -68,6 +112,41 @@ func (a Alerts) Validate(validator structure.Validator) { } } +// Evaluate a user's data to determine if notifications are indicated. +// +// Evaluations are performed according to priority. The process is +// "short-circuited" at the first indicated notification. +func (a Alerts) Evaluate(ctx context.Context, + gd []*glucose.Glucose, dd []*dosingdecision.DosingDecision) *Note { + + if a.NoCommunication != nil && a.NoCommunication.Enabled { + if n := a.NoCommunication.Evaluate(ctx, gd); n != nil { + return n + } + } + if a.UrgentLow != nil && a.UrgentLow.Enabled { + if n := a.UrgentLow.Evaluate(ctx, gd); n != nil { + return n + } + } + if a.Low != nil && a.Low.Enabled { + if n := a.Low.Evaluate(ctx, gd); n != nil { + return n + } + } + if a.High != nil && a.High.Enabled { + if n := a.High.Evaluate(ctx, gd); n != nil { + return n + } + } + if a.NotLooping != nil && a.NotLooping.Enabled { + if n := a.NotLooping.Evaluate(ctx, dd); n != nil { + return n + } + } + return nil +} + // Base describes the minimum specifics of a desired alert. type Base struct { // Enabled controls whether notifications should be sent for this alert. @@ -81,6 +160,13 @@ func (b Base) Validate(validator structure.Validator) { validator.Bool("enabled", &b.Enabled) } +func (b Base) Evaluate(ctx context.Context, data []*glucose.Glucose) *Note { + if lgr := log.LoggerFromContext(ctx); lgr != nil { + lgr.Warn("alerts.Base.Evaluate called, this shouldn't happen!") + } + return nil +} + type Activity struct { // Triggered records the last time this alert was triggered. Triggered time.Time `json:"triggered" bson:"triggered"` @@ -132,6 +218,46 @@ func (a UrgentLowAlert) Validate(validator structure.Validator) { a.Threshold.Validate(validator) } +// Evaluate urgent low condition. +// +// Assumes data is pre-sorted in descending order by Time. +func (a *UrgentLowAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (note *Note) { + lgr := log.LoggerFromContext(ctx) + if len(data) == 0 { + lgr.Debug("no data to evaluate for urgent low") + return nil + } + datum := data[0] + okDatum, okThreshold, err := validateGlucoseAlertDatum(datum, a.Threshold) + if err != nil { + lgr.WithError(err).Warn("Unable to evaluate urgent low") + return nil + } + defer func() { logGlucoseAlertEvaluation(lgr, "urgent low", note, okDatum, okThreshold) }() + active := okDatum < okThreshold + if !active { + if a.IsActive() { + a.Resolved = time.Now() + } + return nil + } + if !a.IsActive() { + a.Triggered = time.Now() + } + return &Note{Message: genGlucoseThresholdMessage("below urgent low")} +} + +func validateGlucoseAlertDatum(datum *glucose.Glucose, t Threshold) (float64, float64, error) { + if datum.Blood.Units == nil || datum.Blood.Value == nil || datum.Blood.Time == nil { + return 0, 0, errors.Newf("Unable to evaluate datum: Units, Value, or Time is nil") + } + threshold := nontypesglucose.NormalizeValueForUnits(&t.Value, datum.Blood.Units) + if threshold == nil { + return 0, 0, errors.Newf("Unable to normalize threshold units: normalized to nil") + } + return *datum.Blood.Value, *threshold, nil +} + // NotLoopingAlert extends Base with a delay. type NotLoopingAlert struct { Base `bson:",inline"` @@ -144,6 +270,16 @@ func (a NotLoopingAlert) Validate(validator structure.Validator) { validator.Duration("delay", &dur).InRange(0, 2*time.Hour) } +// Evaluate if the device is looping. +func (a NotLoopingAlert) Evaluate(ctx context.Context, decisions []*dosingdecision.DosingDecision) (note *Note) { + // TODO will be implemented in the near future. + return nil +} + +// DosingDecisionReasonLoop is specified in a [dosingdecision.DosingDecision] to indicate that +// the decision is part of a loop adjustment (as opposed to bolus or something else). +const DosingDecisionReasonLoop string = "loop" + // NoCommunicationAlert extends Base with a delay. type NoCommunicationAlert struct { Base `bson:",inline"` @@ -156,6 +292,26 @@ func (a NoCommunicationAlert) Validate(validator structure.Validator) { validator.Duration("delay", &dur).InRange(0, 6*time.Hour) } +// Evaluate if CGM data is being received by Tidepool. +// +// Assumes data is pre-sorted by Time in descending order. +func (a NoCommunicationAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) *Note { + var newest time.Time + for _, d := range data { + if d != nil && d.Time != nil && !(*d.Time).IsZero() { + newest = *d.Time + break + } + } + if time.Since(newest) > a.Delay.Duration() { + return &Note{Message: NoCommunicationMessage} + } + + return nil +} + +const NoCommunicationMessage = "Tidepool is unable to communicate with a user's device" + // LowAlert extends Base with threshold and a delay. type LowAlert struct { Base `bson:",inline"` @@ -178,6 +334,51 @@ func (a LowAlert) Validate(validator structure.Validator) { validator.Duration("repeat", &repeatDur).Using(validateRepeat) } +// Evaluate the given data to determine if an alert should be sent. +// +// Assumes data is pre-sorted in descending order by Time. +func (a *LowAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (note *Note) { + lgr := log.LoggerFromContext(ctx) + if len(data) == 0 { + lgr.Debug("no data to evaluate for low") + return nil + } + var eventBegan time.Time + var okDatum, okThreshold float64 + var err error + defer func() { logGlucoseAlertEvaluation(lgr, "low", note, okDatum, okThreshold) }() + for _, datum := range data { + okDatum, okThreshold, err = validateGlucoseAlertDatum(datum, a.Threshold) + if err != nil { + lgr.WithError(err).Debug("Skipping low alert datum evaluation") + continue + } + active := okDatum < okThreshold + if !active { + break + } + if (*datum.Time).Before(eventBegan) || eventBegan.IsZero() { + eventBegan = *datum.Time + } + } + if eventBegan.IsZero() { + if a.IsActive() { + a.Resolved = time.Now() + } + return nil + } + if !a.IsActive() { + if time.Since(eventBegan) > a.Delay.Duration() { + a.Triggered = time.Now() + } + } + return &Note{Message: genGlucoseThresholdMessage("below low")} +} + +func genGlucoseThresholdMessage(alertType string) string { + return "Glucose reading " + alertType + " threshold" +} + // HighAlert extends Base with a threshold and a delay. type HighAlert struct { Base `bson:",inline"` @@ -200,6 +401,57 @@ func (a HighAlert) Validate(validator structure.Validator) { validator.Duration("repeat", &repeatDur).Using(validateRepeat) } +// Evaluate the given data to determine if an alert should be sent. +// +// Assumes data is pre-sorted in descending order by Time. +func (a *HighAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (note *Note) { + lgr := log.LoggerFromContext(ctx) + if len(data) == 0 { + lgr.Debug("no data to evaluate for high") + return nil + } + var eventBegan time.Time + var okDatum, okThreshold float64 + var err error + defer func() { logGlucoseAlertEvaluation(lgr, "high", note, okDatum, okThreshold) }() + for _, datum := range data { + okDatum, okThreshold, err = validateGlucoseAlertDatum(datum, a.Threshold) + if err != nil { + lgr.WithError(err).Debug("Skipping high alert datum evaluation") + continue + } + active := okDatum > okThreshold + if !active { + break + } + if (*datum.Time).Before(eventBegan) || eventBegan.IsZero() { + eventBegan = *datum.Time + } + } + if eventBegan.IsZero() { + if a.IsActive() { + a.Resolved = time.Now() + } + return nil + } + if !a.IsActive() { + if time.Since(eventBegan) > a.Delay.Duration() { + a.Triggered = time.Now() + } + } + return &Note{Message: genGlucoseThresholdMessage("above high")} +} + +// logGlucoseAlertEvaluation is called during each glucose-based evaluation for record-keeping. +func logGlucoseAlertEvaluation(lgr log.Logger, alertType string, note *Note, value, threshold float64) { + fields := log.Fields{ + "isAlerting?": note != nil, + "threshold": threshold, + "value": value, + } + lgr.WithFields(fields).Info(alertType) +} + // DurationMinutes reads a JSON integer and converts it to a time.Duration. // // Values are specified in minutes. @@ -227,7 +479,7 @@ func (m DurationMinutes) Duration() time.Duration { return time.Duration(m) } -// ValueWithUnits binds a value to its units. +// ValueWithUnits binds a value with its units. // // Other types can extend it to parse and validate the Units. type ValueWithUnits struct { @@ -240,20 +492,20 @@ type Threshold ValueWithUnits // Validate implements structure.Validatable func (t Threshold) Validate(v structure.Validator) { - v.String("units", &t.Units).OneOf(glucose.MgdL, glucose.MmolL) + v.String("units", &t.Units).OneOf(nontypesglucose.MgdL, nontypesglucose.MmolL) // This is a sanity check. Client software will likely further constrain these values. The // broadness of these values allows clients to change their own min and max values // independently, and it sidesteps rounding and conversion conflicts between the backend and // clients. var max, min float64 switch t.Units { - case glucose.MgdL, glucose.Mgdl: - max = glucose.MgdLMaximum - min = glucose.MgdLMinimum + case nontypesglucose.MgdL, nontypesglucose.Mgdl: + max = nontypesglucose.MgdLMaximum + min = nontypesglucose.MgdLMinimum v.Float64("value", &t.Value).InRange(min, max) - case glucose.MmolL, glucose.Mmoll: - max = glucose.MmolLMaximum - min = glucose.MmolLMinimum + case nontypesglucose.MmolL, nontypesglucose.Mmoll: + max = nontypesglucose.MmolLMaximum + min = nontypesglucose.MmolLMinimum v.Float64("value", &t.Value).InRange(min, max) default: v.WithReference("value").ReportError(validator.ErrorValueNotValid()) diff --git a/alerts/config_test.go b/alerts/config_test.go index 1d17b5a852..74d7ca6110 100644 --- a/alerts/config_test.go +++ b/alerts/config_test.go @@ -2,6 +2,7 @@ package alerts import ( "bytes" + "context" "fmt" "strings" "testing" @@ -10,7 +11,13 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/tidepool-org/platform/data/blood/glucose" + nontypesglucose "github.com/tidepool-org/platform/data/blood/glucose" + "github.com/tidepool-org/platform/data/types" + "github.com/tidepool-org/platform/data/types/blood" + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/log" + logtest "github.com/tidepool-org/platform/log/test" + "github.com/tidepool-org/platform/pointer" "github.com/tidepool-org/platform/request" "github.com/tidepool-org/platform/structure/validator" "github.com/tidepool-org/platform/test" @@ -76,15 +83,15 @@ var _ = Describe("Config", func() { Expect(conf.High.Repeat).To(Equal(DurationMinutes(30 * time.Minute))) Expect(conf.High.Delay).To(Equal(DurationMinutes(5 * time.Minute))) Expect(conf.High.Threshold.Value).To(Equal(10.0)) - Expect(conf.High.Threshold.Units).To(Equal(glucose.MmolL)) + Expect(conf.High.Threshold.Units).To(Equal(nontypesglucose.MmolL)) Expect(conf.Low.Enabled).To(Equal(true)) Expect(conf.Low.Repeat).To(Equal(DurationMinutes(30 * time.Minute))) Expect(conf.Low.Delay).To(Equal(DurationMinutes(10 * time.Minute))) Expect(conf.Low.Threshold.Value).To(Equal(80.0)) - Expect(conf.Low.Threshold.Units).To(Equal(glucose.MgdL)) + Expect(conf.Low.Threshold.Units).To(Equal(nontypesglucose.MgdL)) Expect(conf.UrgentLow.Enabled).To(Equal(false)) Expect(conf.UrgentLow.Threshold.Value).To(Equal(47.5)) - Expect(conf.UrgentLow.Threshold.Units).To(Equal(glucose.MgdL)) + Expect(conf.UrgentLow.Threshold.Units).To(Equal(nontypesglucose.MgdL)) Expect(conf.NotLooping.Enabled).To(Equal(true)) Expect(conf.NotLooping.Delay).To(Equal(DurationMinutes(4 * time.Minute))) Expect(conf.NoCommunication.Enabled).To(Equal(true)) @@ -125,6 +132,44 @@ var _ = Describe("Config", func() { }) }) + Describe("Evaluate", func() { + Context("when a note is returned", func() { + It("injects the userIDs", func() { + ctx := contextWithTestLogger() + mockGlucoseData := []*glucose.Glucose{ + { + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(nontypesglucose.MmolL), + Value: pointer.FromAny(0.0), + }, + }, + } + conf := Config{ + UserID: mockUserID1, + FollowedUserID: mockUserID2, + Alerts: Alerts{ + UrgentLow: &UrgentLowAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 10, + Units: nontypesglucose.MmolL, + }, + }, + }, + } + + note := conf.Evaluate(ctx, mockGlucoseData, nil) + + Expect(note).ToNot(BeNil()) + Expect(note.RecipientUserID).To(Equal(mockUserID1)) + Expect(note.FollowedUserID).To(Equal(mockUserID2)) + }) + }) + }) + Context("Base", func() { Context("Activity", func() { Context("IsActive()", func() { @@ -173,6 +218,18 @@ var _ = Describe("Config", func() { }) }) + var testGlucoseDatum = func(v float64) *glucose.Glucose { + return &glucose.Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(nontypesglucose.MmolL), + Value: pointer.FromAny(v), + }, + } + } + Context("UrgentLowAlert", func() { Context("Threshold", func() { It("accepts values between 0 and 1000 mg/dL", func() { @@ -197,6 +254,138 @@ var _ = Describe("Config", func() { Expect(val.Error()).To(MatchError("value -1 is not between 0 and 1000")) }) }) + + Context("Evaluate", func() { + testUrgentLow := func() *UrgentLowAlert { + return &UrgentLowAlert{ + Threshold: Threshold{ + Value: 4.0, + Units: nontypesglucose.MmolL, + }, + } + } + + It("handles being passed empty data", func() { + ctx := contextWithTestLogger() + var note *Note + + alert := testUrgentLow() + + Expect(func() { + note = alert.Evaluate(ctx, []*glucose.Glucose{}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + Expect(func() { + note = alert.Evaluate(ctx, nil) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + }) + + It("logs evaluation results", func() { + ctx := contextWithTestLogger() + data := []*glucose.Glucose{testGlucoseDatum(1.1)} + + alert := testUrgentLow() + + Expect(func() { + alert.Evaluate(ctx, data) + }).ToNot(Panic()) + Expect(func() { + lgr := log.LoggerFromContext(ctx).(*logtest.Logger) + lgr.AssertLog(log.InfoLevel, "urgent low", log.Fields{ + "threshold": 4.0, + "value": 1.1, + "isAlerting?": true, + }) + }).ToNot(Panic()) + }) + + Context("when currently active", func() { + It("marks itself resolved", func() { + ctx := contextWithTestLogger() + + alert := testUrgentLow() + + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).ToNot(BeZero()) + }) + }) + + Context("when currently INactive", func() { + It("doesn't re-mark itself resolved", func() { + ctx := contextWithTestLogger() + + alert := testUrgentLow() + + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).ToNot(BeZero()) + was := alert.Resolved + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).To(Equal(was)) + }) + }) + + It("marks itself triggered", func() { + ctx := contextWithTestLogger() + + alert := testUrgentLow() + + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Triggered).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) + }).ToNot(Panic()) + Expect(alert.Triggered).ToNot(BeZero()) + }) + + It("validates glucose data", func() { + ctx := contextWithTestLogger() + var note *Note + + Expect(func() { + note = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1)}) + }).ToNot(Panic()) + Expect(note).ToNot(BeNil()) + + badUnits := testGlucoseDatum(1) + badUnits.Units = nil + Expect(func() { + note = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badUnits}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + + badValue := testGlucoseDatum(1) + badValue.Value = nil + Expect(func() { + note = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badValue}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + + badTime := testGlucoseDatum(1) + badTime.Time = nil + Expect(func() { + note = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badTime}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + + }) + }) }) Context("LowAlert", func() { @@ -256,6 +445,137 @@ var _ = Describe("Config", func() { Expect(val.Error()).To(MatchError("value 6h1m0s is not between 0s and 6h0m0s")) }) }) + + Context("Evaluate", func() { + testLow := func() *LowAlert { + return &LowAlert{ + Threshold: Threshold{ + Value: 4.0, + Units: nontypesglucose.MmolL, + }, + } + } + + It("handles being passed empty data", func() { + ctx := contextWithTestLogger() + var note *Note + + alert := testLow() + + Expect(func() { + note = alert.Evaluate(ctx, []*glucose.Glucose{}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + Expect(func() { + note = alert.Evaluate(ctx, nil) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + }) + + It("logs evaluation results", func() { + ctx := contextWithTestLogger() + data := []*glucose.Glucose{testGlucoseDatum(1.1)} + + alert := testLow() + + Expect(func() { + alert.Evaluate(ctx, data) + }).ToNot(Panic()) + Expect(func() { + lgr := log.LoggerFromContext(ctx).(*logtest.Logger) + lgr.AssertLog(log.InfoLevel, "low", log.Fields{ + "threshold": 4.0, + "value": 1.1, + "isAlerting?": true, + }) + }).ToNot(Panic()) + }) + + Context("when currently active", func() { + It("marks itself resolved", func() { + ctx := contextWithTestLogger() + + alert := testLow() + + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).ToNot(BeZero()) + }) + }) + + Context("when currently INactive", func() { + It("doesn't re-mark itself resolved", func() { + ctx := contextWithTestLogger() + + alert := testLow() + + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).ToNot(BeZero()) + was := alert.Resolved + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).To(Equal(was)) + }) + }) + + It("marks itself triggered", func() { + ctx := contextWithTestLogger() + + alert := testLow() + + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Triggered).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) + }).ToNot(Panic()) + Expect(alert.Triggered).ToNot(BeZero()) + }) + + It("validates glucose data", func() { + ctx := contextWithTestLogger() + var note *Note + + Expect(func() { + note = testLow().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1)}) + }).ToNot(Panic()) + Expect(note).ToNot(BeNil()) + + badUnits := testGlucoseDatum(1) + badUnits.Units = nil + Expect(func() { + note = testLow().Evaluate(ctx, []*glucose.Glucose{badUnits}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + + badValue := testGlucoseDatum(1) + badValue.Value = nil + Expect(func() { + note = testLow().Evaluate(ctx, []*glucose.Glucose{badValue}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + + badTime := testGlucoseDatum(1) + badTime.Time = nil + Expect(func() { + note = testLow().Evaluate(ctx, []*glucose.Glucose{badTime}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + }) + }) }) Context("HighAlert", func() { @@ -308,6 +628,137 @@ var _ = Describe("Config", func() { Expect(val.Error()).To(MatchError("value 6h1m0s is not between 0s and 6h0m0s")) }) }) + + Context("Evaluate", func() { + testHigh := func() *HighAlert { + return &HighAlert{ + Threshold: Threshold{ + Value: 20.0, + Units: nontypesglucose.MmolL, + }, + } + } + + It("handles being passed empty data", func() { + ctx := contextWithTestLogger() + var note *Note + + alert := testHigh() + + Expect(func() { + note = alert.Evaluate(ctx, []*glucose.Glucose{}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + Expect(func() { + note = alert.Evaluate(ctx, nil) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + }) + + It("logs evaluation results", func() { + ctx := contextWithTestLogger() + data := []*glucose.Glucose{testGlucoseDatum(21.1)} + + alert := testHigh() + + Expect(func() { + alert.Evaluate(ctx, data) + }).ToNot(Panic()) + Expect(func() { + lgr := log.LoggerFromContext(ctx).(*logtest.Logger) + lgr.AssertLog(log.InfoLevel, "high", log.Fields{ + "threshold": 20.0, + "value": 21.1, + "isAlerting?": true, + }) + }).ToNot(Panic()) + }) + + Context("when currently active", func() { + It("marks itself resolved", func() { + ctx := contextWithTestLogger() + + alert := testHigh() + + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(21.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).ToNot(BeZero()) + }) + }) + + Context("when currently INactive", func() { + It("doesn't re-mark itself resolved", func() { + ctx := contextWithTestLogger() + + alert := testHigh() + + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(21.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).ToNot(BeZero()) + was := alert.Resolved + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Resolved).To(Equal(was)) + }) + }) + + It("marks itself triggered", func() { + ctx := contextWithTestLogger() + + alert := testHigh() + + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + }).ToNot(Panic()) + Expect(alert.Triggered).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(21.0)}) + }).ToNot(Panic()) + Expect(alert.Triggered).ToNot(BeZero()) + }) + + It("validates glucose data", func() { + ctx := contextWithTestLogger() + var note *Note + + Expect(func() { + note = testHigh().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(21)}) + }).ToNot(Panic()) + Expect(note).ToNot(BeNil()) + + badUnits := testGlucoseDatum(1) + badUnits.Units = nil + Expect(func() { + note = testHigh().Evaluate(ctx, []*glucose.Glucose{badUnits}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + + badValue := testGlucoseDatum(1) + badValue.Value = nil + Expect(func() { + note = testHigh().Evaluate(ctx, []*glucose.Glucose{badValue}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + + badTime := testGlucoseDatum(1) + badTime.Time = nil + Expect(func() { + note = testHigh().Evaluate(ctx, []*glucose.Glucose{badTime}) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + }) + }) }) Context("NoCommunicationAlert", func() { @@ -365,7 +816,7 @@ var _ = Describe("Config", func() { Context("repeat", func() { var defaultAlert = LowAlert{ - Threshold: Threshold{Value: 11, Units: glucose.MmolL}, + Threshold: Threshold{Value: 11, Units: nontypesglucose.MmolL}, } It("accepts values of 0 (indicating disabled)", func() { @@ -446,7 +897,7 @@ var _ = Describe("Config", func() { "value": 47.5 } } -}`, mockUserID1, mockUserID2, mockUploadID, glucose.MgdL) +}`, mockUserID1, mockUserID2, mockUploadID, nontypesglucose.MgdL) cfg := &Config{} err := request.DecodeObject(nil, buf, cfg) Expect(err).To(MatchError("value -11m0s is not greater than or equal to 15m0s")) @@ -464,13 +915,217 @@ var _ = Describe("Config", func() { "value": 1 } } -}`, mockUserID1, mockUserID2, mockUploadID, glucose.MgdL) +}`, mockUserID1, mockUserID2, mockUploadID, nontypesglucose.MgdL) cfg := &Config{} err := request.DecodeObject(nil, buf, cfg) Expect(err).To(MatchError("json is malformed")) }) }) +var ( + testNoCommunicationAlert = func() *NoCommunicationAlert { + return &NoCommunicationAlert{ + Base: Base{Enabled: true}, + } + } + testLowAlert = func() *LowAlert { + return &LowAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 4, + Units: nontypesglucose.MmolL, + }, + } + } + testHighAlert = func() *HighAlert { + return &HighAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 10, + Units: nontypesglucose.MmolL, + }, + } + } + testUrgentLowAlert = func() *UrgentLowAlert { + return &UrgentLowAlert{ + Base: Base{Enabled: true}, + Threshold: Threshold{ + Value: 3, + Units: nontypesglucose.MmolL, + }, + } + } + testNotLoopingAlert = func() *NotLoopingAlert { + return &NotLoopingAlert{ + Base: Base{Enabled: true}, + } + } + testNoCommunicationDatum = &glucose.Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(nontypesglucose.MmolL), + Value: pointer.FromAny(11.0), + }, + } + testHighDatum = &glucose.Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(nontypesglucose.MmolL), + Value: pointer.FromAny(11.0), + }, + } + testLowDatum = &glucose.Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(nontypesglucose.MmolL), + Value: pointer.FromAny(3.9), + }, + } + testUrgentLowDatum = &glucose.Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(nontypesglucose.MmolL), + Value: pointer.FromAny(2.9), + }, + } +) + +var _ = Describe("Alerts", func() { + Describe("LongestDelay", func() { + It("does what it says", func() { + noComm := testNoCommunicationAlert() + noComm.Delay = DurationMinutes(10 * time.Minute) + low := testLowAlert() + low.Delay = DurationMinutes(5 * time.Minute) + high := testHighAlert() + high.Delay = DurationMinutes(5 * time.Minute) + notLooping := testNotLoopingAlert() + notLooping.Delay = DurationMinutes(5 * time.Minute) + + a := Alerts{ + NoCommunication: noComm, + Low: low, + High: high, + NotLooping: notLooping, + } + + delay := a.LongestDelay() + + Expect(delay).To(Equal(10 * time.Minute)) + }) + + It("ignores disabled alerts", func() { + noComm := testNoCommunicationAlert() + noComm.Delay = DurationMinutes(10 * time.Minute) + noComm.Enabled = false + low := testLowAlert() + low.Delay = DurationMinutes(7 * time.Minute) + high := testHighAlert() + high.Delay = DurationMinutes(5 * time.Minute) + notLooping := testNotLoopingAlert() + notLooping.Delay = DurationMinutes(5 * time.Minute) + + a := Alerts{ + NoCommunication: noComm, + Low: low, + High: high, + NotLooping: notLooping, + } + + delay := a.LongestDelay() + + Expect(delay).To(Equal(7 * time.Minute)) + }) + + It("returns a Zero Duration when no alerts are set", func() { + a := Alerts{ + NoCommunication: nil, + Low: nil, + High: nil, + NotLooping: nil, + } + + delay := a.LongestDelay() + + Expect(delay).To(Equal(time.Duration(0))) + }) + }) + + Describe("Evaluate", func() { + Context("when not communicating", func() { + It("returns only NoCommunication alerts", func() { + ctx := contextWithTestLogger() + data := []*glucose.Glucose{testNoCommunicationDatum} + data[0].Value = pointer.FromAny(0.0) + a := Alerts{ + NoCommunication: testNoCommunicationAlert(), + UrgentLow: testUrgentLowAlert(), + Low: testLowAlert(), + High: testHighAlert(), + } + + note := a.Evaluate(ctx, data, nil) + + Expect(note).To(HaveField("Message", ContainSubstring(NoCommunicationMessage))) + }) + }) + + It("logs decisions", func() { + Skip("TODO logAlertEvaluation") + }) + + It("detects low data", func() { + ctx := contextWithTestLogger() + data := []*glucose.Glucose{testLowDatum} + a := Alerts{ + Low: testLowAlert(), + } + + note := a.Evaluate(ctx, data, nil) + + Expect(note).ToNot(BeNil()) + Expect(note.Message).To(ContainSubstring("below low threshold")) + }) + + It("detects high data", func() { + ctx := contextWithTestLogger() + data := []*glucose.Glucose{testHighDatum} + a := Alerts{ + High: testHighAlert(), + } + + note := a.Evaluate(ctx, data, nil) + + Expect(note).ToNot(BeNil()) + Expect(note.Message).To(ContainSubstring("above high threshold")) + }) + + Context("with both low and urgent low alerts detected", func() { + It("prefers urgent low", func() { + ctx := contextWithTestLogger() + data := []*glucose.Glucose{testUrgentLowDatum} + a := Alerts{ + Low: testLowAlert(), + UrgentLow: testUrgentLowAlert(), + } + + note := a.Evaluate(ctx, data, nil) + + Expect(note).ToNot(BeNil()) + Expect(note.Message).To(ContainSubstring("below urgent low threshold")) + }) + }) + }) +}) + var _ = Describe("DurationMinutes", func() { It("parses 42", func() { d := DurationMinutes(0) @@ -506,20 +1161,20 @@ var _ = Describe("DurationMinutes", func() { var _ = Describe("Threshold", func() { It("accepts mg/dL", func() { - buf := buff(`{"units":"%s","value":42}`, glucose.MgdL) + buf := buff(`{"units":"%s","value":42}`, nontypesglucose.MgdL) threshold := &Threshold{} err := request.DecodeObject(nil, buf, threshold) Expect(err).To(BeNil()) Expect(threshold.Value).To(Equal(42.0)) - Expect(threshold.Units).To(Equal(glucose.MgdL)) + Expect(threshold.Units).To(Equal(nontypesglucose.MgdL)) }) It("accepts mmol/L", func() { - buf := buff(`{"units":"%s","value":42}`, glucose.MmolL) + buf := buff(`{"units":"%s","value":42}`, nontypesglucose.MmolL) threshold := &Threshold{} err := request.DecodeObject(nil, buf, threshold) Expect(err).To(BeNil()) Expect(threshold.Value).To(Equal(42.0)) - Expect(threshold.Units).To(Equal(glucose.MmolL)) + Expect(threshold.Units).To(Equal(nontypesglucose.MmolL)) }) It("rejects lb/gal", func() { buf := buff(`{"units":"%s","value":42}`, "lb/gal") @@ -532,7 +1187,7 @@ var _ = Describe("Threshold", func() { Expect(err).Should(HaveOccurred()) }) It("is case-sensitive with respect to Units", func() { - badUnits := strings.ToUpper(glucose.MmolL) + badUnits := strings.ToUpper(nontypesglucose.MmolL) buf := buff(`{"units":"%s","value":42}`, badUnits) err := request.DecodeObject(nil, buf, &Threshold{}) Expect(err).Should(HaveOccurred()) @@ -544,3 +1199,8 @@ var _ = Describe("Threshold", func() { func buff(format string, args ...interface{}) *bytes.Buffer { return bytes.NewBufferString(fmt.Sprintf(format, args...)) } + +func contextWithTestLogger() context.Context { + lgr := logtest.NewLogger() + return log.NewContextWithLogger(context.Background(), lgr) +} From 5053a693b1fdef2692f448330eb3c77cee2a7ef3 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Mon, 8 Jul 2024 14:05:43 -0600 Subject: [PATCH 13/15] adds the alerts events consumer to the data service It uses the new asyncevents from go-common, as alerts processing requires different retry semantics than the existing solution. The Pusher interface is moved out of data/service into data/events to avoid a circular dependency. BACK-2554 --- data/events/alerts.go | 358 +++++++++++++++++ data/events/alerts_test.go | 639 +++++++++++++++++++++++++++++++ data/service/service/standard.go | 86 ++++- 3 files changed, 1074 insertions(+), 9 deletions(-) create mode 100644 data/events/alerts.go create mode 100644 data/events/alerts_test.go diff --git a/data/events/alerts.go b/data/events/alerts.go new file mode 100644 index 0000000000..a1dcccaef4 --- /dev/null +++ b/data/events/alerts.go @@ -0,0 +1,358 @@ +package events + +import ( + "cmp" + "context" + "os" + "slices" + "strings" + "time" + + "github.com/IBM/sarama" + "go.mongodb.org/mongo-driver/bson" + + "github.com/tidepool-org/platform/alerts" + "github.com/tidepool-org/platform/auth" + "github.com/tidepool-org/platform/data/store" + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/data/types/dosingdecision" + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/log" + logjson "github.com/tidepool-org/platform/log/json" + lognull "github.com/tidepool-org/platform/log/null" + "github.com/tidepool-org/platform/permission" + "github.com/tidepool-org/platform/push" +) + +type Consumer struct { + Alerts AlertsClient + Data store.DataRepository + DeviceTokens auth.DeviceTokensClient + Evaluator AlertsEvaluator + Permissions permission.Client + Pusher Pusher + Tokens alerts.TokenProvider + + Logger log.Logger +} + +// DosingDecision removes a stutter to improve readability. +type DosingDecision = dosingdecision.DosingDecision + +// Glucose removes a stutter to improve readability. +type Glucose = glucose.Glucose + +func (c *Consumer) Consume(ctx context.Context, + session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) (err error) { + + if msg == nil { + c.logger(ctx).Info("UNEXPECTED: nil message; ignoring") + return nil + } + + switch { + case strings.HasSuffix(msg.Topic, ".data.alerts"): + return c.consumeAlertsConfigs(ctx, session, msg) + case strings.HasSuffix(msg.Topic, ".data.deviceData.alerts"): + return c.consumeDeviceData(ctx, session, msg) + default: + c.logger(ctx).WithField("topic", msg.Topic). + Infof("UNEXPECTED: topic; ignoring") + } + + return nil +} + +func (c *Consumer) consumeAlertsConfigs(ctx context.Context, + session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) error { + + cfg := &alerts.Config{} + if err := unmarshalMessageValue(msg.Value, cfg); err != nil { + return err + } + lgr := c.logger(ctx) + lgr.WithField("cfg", cfg).Info("consuming an alerts config message") + + ctxLog := c.logger(ctx).WithField("followedUserID", cfg.FollowedUserID) + ctx = log.NewContextWithLogger(ctx, ctxLog) + + notes, err := c.Evaluator.Evaluate(ctx, cfg.FollowedUserID) + if err != nil { + format := "Unable to evalaute alerts configs triggered event for user %s" + return errors.Wrapf(err, format, cfg.UserID) + } + ctxLog.WithField("notes", notes).Debug("notes generated from alerts config") + + c.pushNotes(ctx, notes) + + session.MarkMessage(msg, "") + lgr.WithField("message", msg).Debug("marked") + return nil +} + +func (c *Consumer) consumeDeviceData(ctx context.Context, + session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) (err error) { + + datum := &Glucose{} + if err := unmarshalMessageValue(msg.Value, datum); err != nil { + return err + } + lgr := c.logger(ctx) + lgr.WithField("data", datum).Info("consuming a device data message") + + if datum.UserID == nil { + return errors.New("Unable to retrieve alerts configs: userID is nil") + } + ctx = log.NewContextWithLogger(ctx, lgr.WithField("followedUserID", *datum.UserID)) + notes, err := c.Evaluator.Evaluate(ctx, *datum.UserID) + if err != nil { + format := "Unable to evalaute device data triggered event for user %s" + return errors.Wrapf(err, format, *datum.UserID) + } + for idx, note := range notes { + lgr.WithField("idx", idx).WithField("note", note).Debug("notes") + } + + c.pushNotes(ctx, notes) + + session.MarkMessage(msg, "") + lgr.WithField("message", msg).Debug("marked") + return nil +} + +func (c *Consumer) pushNotes(ctx context.Context, notes []*alerts.Note) { + lgr := c.logger(ctx) + + // Notes could be pushed into a Kafka topic to have a more durable retry, + // but that can be added later. + for _, note := range notes { + lgr := lgr.WithField("recipientUserID", note.RecipientUserID) + tokens, err := c.DeviceTokens.GetDeviceTokens(ctx, note.RecipientUserID) + if err != nil { + lgr.WithError(err).Info("Unable to retrieve device tokens") + } + if len(tokens) == 0 { + lgr.Debug("no device tokens found, won't push any notifications") + } + pushNote := push.FromNote(note) + for _, token := range tokens { + if err := c.Pusher.Push(ctx, token, pushNote); err != nil { + lgr.WithError(err).Info("Unable to push notification") + } + } + } +} + +// logger produces a log.Logger. +// +// It tries a number of options before falling back to a null Logger. +func (c *Consumer) logger(ctx context.Context) log.Logger { + // A context's Logger is preferred, as it has the most... context. + if ctxLgr := log.LoggerFromContext(ctx); ctxLgr != nil { + return ctxLgr + } + if c.Logger != nil { + return c.Logger + } + fallback, err := logjson.NewLogger(os.Stderr, log.DefaultLevelRanks(), log.DefaultLevel()) + if err != nil { + fallback = lognull.NewLogger() + } + return fallback +} + +type AlertsEvaluator interface { + Evaluate(ctx context.Context, followedUserID string) ([]*alerts.Note, error) +} + +func NewAlertsEvaluator(alerts AlertsClient, data store.DataRepository, + perms permission.Client, tokens alerts.TokenProvider) *evaluator { + + return &evaluator{ + Alerts: alerts, + Data: data, + Permissions: perms, + Tokens: tokens, + } +} + +// evaluator implements AlertsEvaluator. +type evaluator struct { + Alerts AlertsClient + Data store.DataRepository + Permissions permission.Client + Tokens alerts.TokenProvider +} + +// logger produces a log.Logger. +// +// It tries a number of options before falling back to a null Logger. +func (e *evaluator) logger(ctx context.Context) log.Logger { + // A context's Logger is preferred, as it has the most... context. + if ctxLgr := log.LoggerFromContext(ctx); ctxLgr != nil { + return ctxLgr + } + fallback, err := logjson.NewLogger(os.Stderr, log.DefaultLevelRanks(), log.DefaultLevel()) + if err != nil { + fallback = lognull.NewLogger() + } + return fallback +} + +// Evaluate followers' alerts.Configs to generate alert notifications. +func (e *evaluator) Evaluate(ctx context.Context, followedUserID string) ( + []*alerts.Note, error) { + + alertsConfigs, err := e.gatherAlertsConfigs(ctx, followedUserID) + if err != nil { + return nil, err + } + e.logger(ctx).Debugf("%d alerts configs found", len(alertsConfigs)) + + alertsConfigsByUploadID := e.mapAlertsConfigsByUploadID(alertsConfigs) + + notes := []*alerts.Note{} + for uploadID, cfgs := range alertsConfigsByUploadID { + resp, err := e.gatherData(ctx, followedUserID, uploadID, cfgs) + if err != nil { + return nil, err + } + notes = slices.Concat(notes, e.generateNotes(ctx, cfgs, resp)) + } + + return notes, nil +} + +func (e *evaluator) mapAlertsConfigsByUploadID(cfgs []*alerts.Config) map[string][]*alerts.Config { + mapped := map[string][]*alerts.Config{} + for _, cfg := range cfgs { + if _, found := mapped[cfg.UploadID]; !found { + mapped[cfg.UploadID] = []*alerts.Config{} + } + mapped[cfg.UploadID] = append(mapped[cfg.UploadID], cfg) + } + return mapped +} + +func (e *evaluator) gatherAlertsConfigs(ctx context.Context, + followedUserID string) ([]*alerts.Config, error) { + + alertsConfigs, err := e.Alerts.List(ctx, followedUserID) + if err != nil { + return nil, err + } + e.logger(ctx).Debugf("after List, %d alerts configs", len(alertsConfigs)) + alertsConfigs = slices.DeleteFunc(alertsConfigs, e.authDenied(ctx)) + e.logger(ctx).Debugf("after perms check, %d alerts configs", len(alertsConfigs)) + return alertsConfigs, nil +} + +// authDenied builds functions that enable slices.DeleteFunc to remove +// unauthorized users' alerts.Configs. +// +// Via a closure it's able to inject information from the Context and the +// evaluator itself into the resulting function. +func (e *evaluator) authDenied(ctx context.Context) func(ac *alerts.Config) bool { + lgr := e.logger(ctx) + return func(ac *alerts.Config) bool { + if ac == nil { + return true + } + lgr = lgr.WithFields(log.Fields{ + "userID": ac.UserID, + "followedUserID": ac.FollowedUserID, + }) + token, err := e.Tokens.ServerSessionToken() + if err != nil { + lgr.WithError(err).Warn("Unable to confirm permissions; skipping") + return false + } + ctx = auth.NewContextWithServerSessionToken(ctx, token) + perms, err := e.Permissions.GetUserPermissions(ctx, ac.UserID, ac.FollowedUserID) + if err != nil { + lgr.WithError(err).Warn("Unable to confirm permissions; skipping") + return true + } + if _, found := perms[permission.Follow]; !found { + lgr.Debug("permission denied: skipping") + return true + } + return false + } +} + +func (e *evaluator) gatherData(ctx context.Context, followedUserID, uploadID string, + alertsConfigs []*alerts.Config) (*store.AlertableResponse, error) { + + if len(alertsConfigs) == 0 { + return nil, nil + } + + longestDelay := slices.MaxFunc(alertsConfigs, func(i, j *alerts.Config) int { + return cmp.Compare(i.LongestDelay(), j.LongestDelay()) + }).LongestDelay() + longestDelay = max(5*time.Minute, longestDelay) + e.logger(ctx).WithField("longestDelay", longestDelay).Debug("here it is") + params := store.AlertableParams{ + UserID: followedUserID, + UploadID: uploadID, + Start: time.Now().Add(-longestDelay), + } + resp, err := e.Data.GetAlertableData(ctx, params) + if err != nil { + return nil, err + } + + return resp, nil +} + +func (e *evaluator) generateNotes(ctx context.Context, + alertsConfigs []*alerts.Config, resp *store.AlertableResponse) []*alerts.Note { + + if len(alertsConfigs) == 0 { + return nil + } + + lgr := e.logger(ctx) + notes := []*alerts.Note{} + for _, alertsConfig := range alertsConfigs { + l := lgr.WithFields(log.Fields{ + "userID": alertsConfig.UserID, + "followedUserID": alertsConfig.FollowedUserID, + "uploadID": alertsConfig.UploadID, + }) + c := log.NewContextWithLogger(ctx, l) + note := alertsConfig.Evaluate(c, resp.Glucose, resp.DosingDecisions) + if note != nil { + notes = append(notes, note) + continue + } + } + + return notes +} + +func unmarshalMessageValue[A any](b []byte, payload *A) error { + wrapper := &struct { + FullDocument A `json:"fullDocument"` + }{} + if err := bson.UnmarshalExtJSON(b, false, wrapper); err != nil { + return errors.Wrap(err, "Unable to unmarshal ExtJSON") + } + *payload = wrapper.FullDocument + return nil +} + +type AlertsClient interface { + Delete(context.Context, *alerts.Config) error + Get(context.Context, *alerts.Config) (*alerts.Config, error) + List(_ context.Context, userID string) ([]*alerts.Config, error) + Upsert(context.Context, *alerts.Config) error +} + +// Pusher is a service-agnostic interface for sending push notifications. +type Pusher interface { + // Push a notification to a device. + Push(context.Context, *devicetokens.DeviceToken, *push.Notification) error +} diff --git a/data/events/alerts_test.go b/data/events/alerts_test.go new file mode 100644 index 0000000000..64ed0f8bca --- /dev/null +++ b/data/events/alerts_test.go @@ -0,0 +1,639 @@ +package events + +import ( + "context" + "time" + + "github.com/IBM/sarama" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + + "github.com/tidepool-org/platform/alerts" + nontypesglucose "github.com/tidepool-org/platform/data/blood/glucose" + "github.com/tidepool-org/platform/data/store" + storetest "github.com/tidepool-org/platform/data/store/test" + "github.com/tidepool-org/platform/data/types" + "github.com/tidepool-org/platform/data/types/blood" + "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/devicetokens" + "github.com/tidepool-org/platform/errors" + "github.com/tidepool-org/platform/log" + logtest "github.com/tidepool-org/platform/log/test" + "github.com/tidepool-org/platform/permission" + "github.com/tidepool-org/platform/pointer" + "github.com/tidepool-org/platform/push" +) + +const ( + testUserID = "test-user-id" + testFollowedUserID = "test-followed-user-id" + testUserNoPermsID = "test-user-no-perms" + testUploadID = "test-upload-id" +) + +var ( + testMongoUrgentLowResponse = &store.AlertableResponse{ + Glucose: []*glucose.Glucose{ + newTestStaticDatumMmolL(1.0)}, + } +) + +var _ = Describe("Consumer", func() { + + Describe("Consume", func() { + It("ignores nil messages", func() { + ctx, _ := addLogger(context.Background()) + c := &Consumer{} + + Expect(c.Consume(ctx, nil, nil)).To(Succeed()) + }) + + It("processes alerts config events", func() { + cfg := &alerts.Config{ + UserID: testUserID, + FollowedUserID: testFollowedUserID, + Alerts: alerts.Alerts{ + Low: &alerts.LowAlert{ + Base: alerts.Base{ + Enabled: true}, + Threshold: alerts.Threshold{ + Value: 101.1, + Units: "mg/dL", + }, + }, + }, + } + kafkaMsg := newAlertsMockConsumerMessage(".data.alerts", cfg) + docs := []interface{}{bson.M{}} + c, deps := newConsumerTestDeps(docs) + + Expect(c.Consume(deps.Context, deps.Session, kafkaMsg)).To(Succeed()) + Expect(deps.Session.MarkCalls).To(Equal(1)) + }) + + It("processes device data events", func() { + blood := &glucose.Glucose{ + Blood: blood.Blood{ + Units: pointer.FromAny("mmol/L"), + Value: pointer.FromAny(7.2), + Base: types.Base{ + UserID: pointer.FromAny(testFollowedUserID), + }, + }, + } + kafkaMsg := newAlertsMockConsumerMessage(".data.deviceData.alerts", blood) + docs := []interface{}{bson.M{}} + c, deps := newConsumerTestDeps(docs) + + Expect(c.Consume(deps.Context, deps.Session, kafkaMsg)).To(Succeed()) + Expect(deps.Session.MarkCalls).To(Equal(1)) + }) + + }) + + Describe("Evaluator", func() { + Describe("Evaluate", func() { + It("checks that alerts config owners have permission", func() { + testLogger := logtest.NewLogger() + ctx := log.NewContextWithLogger(context.Background(), testLogger) + + eval, deps := newEvaluatorTestDeps([]*store.AlertableResponse{testMongoUrgentLowResponse}) + deps.Permissions.Allow(testUserID, permission.Follow, testFollowedUserID) + deps.Permissions.DenyAll(testUserNoPermsID, testFollowedUserID) + deps.Alerts.Configs = append(deps.Alerts.Configs, testAlertsConfigUrgentLow(testUserNoPermsID)) + deps.Alerts.Configs = append(deps.Alerts.Configs, testAlertsConfigUrgentLow(testUserID)) + + notes, err := eval.Evaluate(ctx, testFollowedUserID) + + Expect(err).To(Succeed()) + Expect(notes).To(ConsistOf(HaveField("RecipientUserID", testUserID))) + }) + + It("uses the longest delay", func() { + + }) + }) + + }) + + // Describe("evaluateUrgentLow", func() { + // It("can't function without datum units", func() { + // ctx, _ := addLogger(context.Background()) + // alert := newTestUrgentLowAlert() + // datum := newTestStaticDatumMmolL(11) + // datum.Blood.Units = nil + // c := &Consumer{ + // Pusher: newMockPusher(), + // DeviceTokens: newMockDeviceTokensClient(), + // } + + // _, err := c.evaluateUrgentLow(ctx, datum, testUserID, alert) + + // Expect(err).To(MatchError("Unable to evaluate datum: Units, Value, or Time is nil")) + // }) + + // It("can't function without datum value", func() { + // ctx, _ := addLogger(context.Background()) + // alert := newTestUrgentLowAlert() + // datum := newTestStaticDatumMmolL(11) + // datum.Blood.Value = nil + // c := &Consumer{ + // Pusher: newMockPusher(), + // DeviceTokens: newMockDeviceTokensClient(), + // } + + // _, err := c.evaluateUrgentLow(ctx, datum, testUserID, alert) + + // Expect(err).To(MatchError("Unable to evaluate datum: Units, Value, or Time is nil")) + // }) + + // It("can't function without datum time", func() { + // ctx, _ := addLogger(context.Background()) + // alert := newTestUrgentLowAlert() + // datum := newTestStaticDatumMmolL(11) + // datum.Blood.Time = nil + // c := &Consumer{ + // Pusher: newMockPusher(), + // DeviceTokens: newMockDeviceTokensClient(), + // } + + // _, err := c.evaluateUrgentLow(ctx, datum, testUserID, alert) + // Expect(err).To(MatchError("Unable to evaluate datum: Units, Value, or Time is nil")) + // }) + + // It("is marked resolved", func() { + // ctx, _ := addLogger(context.Background()) + // datum := newTestStaticDatumMmolL(11) + // alert := newTestUrgentLowAlert() + // alert.Threshold.Value = *datum.Blood.Value - 1 + // userID := "test-user-id" + // c := &Consumer{ + // Pusher: newMockPusher(), + // DeviceTokens: newMockDeviceTokensClient(), + // } + + // updated, err := c.evaluateUrgentLow(ctx, datum, userID, alert) + // Expect(err).To(Succeed()) + // Expect(updated).To(BeTrue()) + // Expect(alert.Resolved).To(BeTemporally("~", time.Now(), time.Second)) + // }) + + // It("is marked both notified and triggered", func() { + // ctx, _ := addLogger(context.Background()) + // datum := newTestStaticDatumMmolL(11) + // alert := newTestUrgentLowAlert() + // alert.Threshold.Value = *datum.Blood.Value + 1 + // userID := "test-user-id" + // c := &Consumer{ + // Pusher: newMockPusher(), + // DeviceTokens: newMockDeviceTokensClient(), + // } + + // updated, err := c.evaluateUrgentLow(ctx, datum, userID, alert) + // Expect(err).To(Succeed()) + // Expect(updated).To(BeTrue()) + // Expect(alert.Sent).To(BeTemporally("~", time.Now(), time.Second)) + // Expect(alert.Triggered).To(BeTemporally("~", time.Now(), time.Second)) + // }) + + // It("sends notifications regardless of previous notification time", func() { + // ctx, _ := addLogger(context.Background()) + // datum := newTestStaticDatumMmolL(11) + // alert := newTestUrgentLowAlert() + // lastTime := time.Now().Add(-10 * time.Second) + // alert.Activity.Sent = lastTime + // alert.Threshold.Value = *datum.Blood.Value + 1 + // userID := "test-user-id" + // c := &Consumer{ + // Pusher: newMockPusher(), + // DeviceTokens: newMockDeviceTokensClient(), + // } + + // updated, err := c.evaluateUrgentLow(ctx, datum, userID, alert) + // Expect(err).To(Succeed()) + // Expect(updated).To(BeTrue()) + // Expect(alert.Sent).To(BeTemporally("~", time.Now(), time.Second)) + // }) + // }) +}) + +type consumerTestDeps struct { + Alerts *mockAlertsConfigClient + Context context.Context + Cursor *mongo.Cursor + Evaluator *mockStaticEvaluator + Logger log.Logger + Permissions *mockPermissionsClient + Repo *storetest.DataRepository + Session *mockConsumerGroupSession + Tokens alerts.TokenProvider +} + +func newConsumerTestDeps(docs []interface{}) (*Consumer, *consumerTestDeps) { + GinkgoHelper() + ctx, logger := addLogger(context.Background()) + alertsClient := newMockAlertsConfigClient([]*alerts.Config{ + { + UserID: testUserID, + FollowedUserID: testFollowedUserID, + Alerts: alerts.Alerts{}, + }, + }, nil) + dataRepo := storetest.NewDataRepository() + dataRepo.GetLastUpdatedForUserOutputs = []storetest.GetLastUpdatedForUserOutput{} + augmentedDocs := augmentMockMongoDocs(docs) + cur := newMockMongoCursor(augmentedDocs) + dataRepo.GetDataRangeOutputs = []storetest.GetDataRangeOutput{ + {Error: nil, Cursor: cur}, + } + tokens := &mockAlertsTokenProvider{Token: "test-token"} + permissions := newMockPermissionsClient() + evaluator := newMockStaticEvaluator() + + return &Consumer{ + Alerts: alertsClient, + Evaluator: evaluator, + Tokens: tokens, + Data: dataRepo, + Permissions: permissions, + }, &consumerTestDeps{ + Alerts: alertsClient, + Context: ctx, + Cursor: cur, + Evaluator: evaluator, + Repo: dataRepo, + Session: &mockConsumerGroupSession{}, + Logger: logger, + Tokens: tokens, + Permissions: permissions, + } +} + +func newEvaluatorTestDeps(responses []*store.AlertableResponse) (*evaluator, *evaluatorTestDeps) { + alertsClient := newMockAlertsConfigClient(nil, nil) + dataRepo := storetest.NewDataRepository() + dataRepo.GetLastUpdatedForUserOutputs = []storetest.GetLastUpdatedForUserOutput{} + for _, r := range responses { + out := storetest.GetAlertableDataOutput{Response: r} + dataRepo.GetAlertableDataOutputs = append(dataRepo.GetAlertableDataOutputs, out) + } + permissions := newMockPermissionsClient() + tokens := newMockTokensProvider() + return &evaluator{ + Alerts: alertsClient, + Data: dataRepo, + Permissions: permissions, + Tokens: tokens, + }, &evaluatorTestDeps{ + Alerts: alertsClient, + Permissions: permissions, + } +} + +type evaluatorTestDeps struct { + Alerts *mockAlertsConfigClient + Permissions *mockPermissionsClient +} + +// mockEvaluator implements Evaluator. +type mockEvaluator struct { + Evaluations map[string][]mockEvaluatorResponse + EvaluateCalls map[string]int +} + +type mockEvaluatorResponse struct { + Notes []*alerts.Note + Error error +} + +func newMockEvaluator() *mockEvaluator { + return &mockEvaluator{ + Evaluations: map[string][]mockEvaluatorResponse{}, + EvaluateCalls: map[string]int{}, + } +} + +func (e *mockEvaluator) Evaluate(ctx context.Context, followedUserID string) ([]*alerts.Note, error) { + if _, found := e.Evaluations[followedUserID]; !found { + return nil, nil + } + resp := e.Evaluations[followedUserID][0] + if len(e.Evaluations[followedUserID]) > 1 { + e.Evaluations[followedUserID] = e.Evaluations[followedUserID][1:] + } + e.EvaluateCalls[followedUserID] += 1 + if resp.Error != nil { + return nil, resp.Error + } + return resp.Notes, nil +} + +func (e *mockEvaluator) EvaluateCallsTotal() int { + total := 0 + for _, val := range e.EvaluateCalls { + total += val + } + return total +} + +// mockStaticEvaluator wraps mock evaluator with a static response. +// +// Useful when testing Consumer behavior, when the behavior of the Evaulator +// isn't relevant to the Consumer test. +type mockStaticEvaluator struct { + *mockEvaluator +} + +func newMockStaticEvaluator() *mockStaticEvaluator { + return &mockStaticEvaluator{newMockEvaluator()} +} + +func (e *mockStaticEvaluator) Evaluate(ctx context.Context, followedUserID string) ([]*alerts.Note, error) { + e.EvaluateCalls[followedUserID] += 1 + return nil, nil +} + +func newAlertsMockConsumerMessage(topic string, v any) *sarama.ConsumerMessage { + GinkgoHelper() + doc := &struct { + FullDocument any `json:"fullDocument" bson:"fullDocument"` + }{FullDocument: v} + vBytes, err := bson.MarshalExtJSON(doc, false, false) + Expect(err).To(Succeed()) + return &sarama.ConsumerMessage{ + Value: vBytes, + Topic: topic, + } +} + +func addLogger(ctx context.Context) (context.Context, log.Logger) { + GinkgoHelper() + if ctx == nil { + ctx = context.Background() + } + + lgr := newTestLogger() + return log.NewContextWithLogger(ctx, lgr), lgr +} + +func newTestLogger() log.Logger { + GinkgoHelper() + lgr := logtest.NewLogger() + return lgr +} + +func augmentMockMongoDocs(inDocs []interface{}) []interface{} { + defaultDoc := bson.M{ + "_userId": testFollowedUserID, + "_active": true, + "type": "upload", + "time": time.Now(), + } + outDocs := []interface{}{} + for _, inDoc := range inDocs { + newDoc := defaultDoc + switch v := (inDoc).(type) { + case map[string]interface{}: + for key, val := range v { + newDoc[key] = val + } + outDocs = append(outDocs, newDoc) + default: + outDocs = append(outDocs, inDoc) + } + } + return outDocs +} + +func newMockMongoCursor(docs []interface{}) *mongo.Cursor { + GinkgoHelper() + cur, err := mongo.NewCursorFromDocuments(docs, nil, nil) + Expect(err).To(Succeed()) + return cur +} + +func newTestStaticDatumMmolL(value float64) *glucose.Glucose { + return &glucose.Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromTime(time.Now()), + }, + Units: pointer.FromString(nontypesglucose.MmolL), + Value: pointer.FromFloat64(value), + }, + } +} + +func newTestUrgentLowAlert() *alerts.UrgentLowAlert { + return &alerts.UrgentLowAlert{ + Base: alerts.Base{ + Enabled: true, + Activity: alerts.Activity{}, + }, + Threshold: alerts.Threshold{ + Units: nontypesglucose.MmolL, + }, + } +} + +type mockDeviceTokensClient struct { + Error error + Tokens []*devicetokens.DeviceToken +} + +func newMockDeviceTokensClient() *mockDeviceTokensClient { + return &mockDeviceTokensClient{ + Tokens: []*devicetokens.DeviceToken{}, + } +} + +// // testingT is a subset of testing.TB +// type testingT interface { +// Errorf(format string, args ...any) +// Fatalf(format string, args ...any) +// } + +func (m *mockDeviceTokensClient) GetDeviceTokens(ctx context.Context, + userID string) ([]*devicetokens.DeviceToken, error) { + + if m.Error != nil { + return nil, m.Error + } + return m.Tokens, nil +} + +type mockPusher struct { + Pushes []string +} + +func newMockPusher() *mockPusher { + return &mockPusher{ + Pushes: []string{}, + } +} + +func (p *mockPusher) Push(ctx context.Context, + deviceToken *devicetokens.DeviceToken, notification *push.Notification) error { + p.Pushes = append(p.Pushes, notification.Message) + return nil +} + +type mockAlertsConfigClient struct { + Error error + Configs []*alerts.Config +} + +func newMockAlertsConfigClient(c []*alerts.Config, err error) *mockAlertsConfigClient { + if c == nil { + c = []*alerts.Config{} + } + return &mockAlertsConfigClient{ + Configs: c, + Error: err, + } +} + +func (c *mockAlertsConfigClient) Delete(_ context.Context, _ *alerts.Config) error { + return c.Error +} + +func (c *mockAlertsConfigClient) Get(_ context.Context, _ *alerts.Config) (*alerts.Config, error) { + if c.Error != nil { + return nil, c.Error + } else if len(c.Configs) > 0 { + return c.Configs[0], nil + } + return nil, nil +} + +func (c *mockAlertsConfigClient) List(_ context.Context, userID string) ([]*alerts.Config, error) { + if c.Error != nil { + return nil, c.Error + } else if len(c.Configs) > 0 { + return c.Configs, nil + } + return nil, nil +} + +func (c *mockAlertsConfigClient) Upsert(_ context.Context, _ *alerts.Config) error { + return c.Error +} + +type mockConsumerGroupSession struct { + MarkCalls int +} + +func (s *mockConsumerGroupSession) Claims() map[string][]int32 { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) MemberID() string { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) GenerationID() int32 { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) Commit() { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) { + panic("not implemented") // TODO: Implement +} + +func (s *mockConsumerGroupSession) MarkMessage(msg *sarama.ConsumerMessage, metadata string) { + s.MarkCalls++ +} + +func (s *mockConsumerGroupSession) Context() context.Context { + panic("not implemented") // TODO: Implement +} + +type mockAlertsTokenProvider struct { + Token string + Error error +} + +func (p *mockAlertsTokenProvider) ServerSessionToken() (string, error) { + if p.Error != nil { + return "", p.Error + } + return p.Token, nil +} + +type mockPermissionsClient struct { + Error error + Perms map[string]permission.Permissions +} + +func newMockPermissionsClient() *mockPermissionsClient { + return &mockPermissionsClient{ + Perms: map[string]permission.Permissions{}, + } +} + +func (c *mockPermissionsClient) Key(requesterUserID, targetUserID string) string { + return requesterUserID + targetUserID +} + +func (c *mockPermissionsClient) Allow(requestUserID, perm, targetUserID string) { + key := c.Key(requestUserID, targetUserID) + if _, found := c.Perms[key]; !found { + c.Perms[key] = permission.Permissions{} + } + c.Perms[key][perm] = permission.Permission{} +} + +func (c *mockPermissionsClient) DenyAll(requestUserID, targetUserID string) { + key := c.Key(requestUserID, targetUserID) + delete(c.Perms, key) +} + +func (c *mockPermissionsClient) GetUserPermissions(ctx context.Context, requestUserID string, targetUserID string) (permission.Permissions, error) { + if c.Error != nil { + return nil, c.Error + } + if p, ok := c.Perms[c.Key(requestUserID, targetUserID)]; ok { + return p, nil + } else { + return nil, errors.New("test error NOT FOUND") + } +} + +type mockTokensProvider struct{} + +func newMockTokensProvider() *mockTokensProvider { + return &mockTokensProvider{} +} + +func (p *mockTokensProvider) ServerSessionToken() (string, error) { + return "test-server-session-token", nil +} + +func testAlertsConfigUrgentLow(userID string) *alerts.Config { + return &alerts.Config{ + UserID: userID, + FollowedUserID: testFollowedUserID, + UploadID: testUploadID, + Alerts: alerts.Alerts{ + UrgentLow: &alerts.UrgentLowAlert{ + Base: alerts.Base{ + Enabled: true, + Activity: alerts.Activity{}, + }, + Threshold: alerts.Threshold{ + Value: 10.0, + Units: nontypesglucose.MgdL, + }, + }, + }, + } +} diff --git a/data/service/service/standard.go b/data/service/service/standard.go index c08104f24d..6aaa6fe3d3 100644 --- a/data/service/service/standard.go +++ b/data/service/service/standard.go @@ -2,6 +2,7 @@ package service import ( "context" + "strings" "github.com/IBM/sarama" "github.com/kelseyhightower/envconfig" @@ -18,7 +19,6 @@ import ( dataSourceStoreStructured "github.com/tidepool-org/platform/data/source/store/structured" dataSourceStoreStructuredMongo "github.com/tidepool-org/platform/data/source/store/structured/mongo" dataStoreMongo "github.com/tidepool-org/platform/data/store/mongo" - "github.com/tidepool-org/platform/devicetokens" "github.com/tidepool-org/platform/errors" "github.com/tidepool-org/platform/events" "github.com/tidepool-org/platform/log" @@ -44,8 +44,9 @@ type Standard struct { dataClient *Client clinicsClient *clinics.Client dataSourceClient *dataSourceServiceClient.Client - pusher Pusher + pusher dataEvents.Pusher userEventsHandler events.Runner + alertsEventsHandler events.Runner api *api.Standard server *server.Standard } @@ -97,6 +98,9 @@ func (s *Standard) Initialize(provider application.Provider) error { if err := s.initializeUserEventsHandler(); err != nil { return err } + if err := s.initializeAlertsEventsHandler(); err != nil { + return err + } if err := s.initializeAPI(); err != nil { return err } @@ -117,6 +121,13 @@ func (s *Standard) Terminate() { } s.userEventsHandler = nil } + if s.alertsEventsHandler != nil { + s.Logger().Info("Terminating the alertsEventsHandler") + if err := s.alertsEventsHandler.Terminate(); err != nil { + s.Logger().Errorf("Error while terminating the alertsEventsHandler: %v", err) + } + s.alertsEventsHandler = nil + } s.api = nil s.dataClient = nil if s.syncTaskStore != nil { @@ -147,6 +158,9 @@ func (s *Standard) Run() error { go func() { errs <- s.userEventsHandler.Run() }() + go func() { + errs <- s.alertsEventsHandler.Run() + }() go func() { errs <- s.server.Serve() }() @@ -434,12 +448,6 @@ func (s *Standard) initializeSaramaLogger() error { return nil } -// Pusher is a service-agnostic interface for sending push notifications. -type Pusher interface { - // Push a notification to a device. - Push(context.Context, *devicetokens.DeviceToken, *push.Notification) error -} - func (s *Standard) initializePusher() error { var err error @@ -453,7 +461,7 @@ func (s *Standard) initializePusher() error { return errors.Wrap(err, "Unable to process APNs pusher config") } - var pusher Pusher + var pusher dataEvents.Pusher pusher, err = push.NewAPNSPusherFromKeyData(apns2Config.SigningKey, apns2Config.KeyID, apns2Config.TeamID, apns2Config.BundleID) if err != nil { @@ -464,3 +472,63 @@ func (s *Standard) initializePusher() error { return nil } + +func (s *Standard) initializeAlertsEventsHandler() error { + s.Logger().Debug("Initializing alerts events handler") + + commonConfig := eventsCommon.NewConfig() + if err := commonConfig.LoadFromEnv(); err != nil { + return err + } + + // In addition to the CloudEventsConfig, additional specific config values + // are needed. + config := &struct { + KafkaAlertsTopics []string `envconfig:"KAFKA_ALERTS_TOPICS" default:"alerts,deviceData.alerts"` + KafkaAlertsGroupID string `envconfig:"KAFKA_ALERTS_CONSUMER_GROUP" required:"true"` + }{} + if err := envconfig.Process("", config); err != nil { + return errors.Wrap(err, "Unable to process envconfig") + } + + // Some kafka topics use a `-` as a prefix. But MongoDB CDC topics are created with + // `.`. This code is using CDC topics, so ensuring that a `.` is used for alerts events + // lines everything up as expected. + topicPrefix := strings.ReplaceAll(commonConfig.KafkaTopicPrefix, "-", ".") + prefixedTopics := make([]string, 0, len(config.KafkaAlertsTopics)) + for _, topic := range config.KafkaAlertsTopics { + prefixedTopics = append(prefixedTopics, topicPrefix+topic) + } + + alerts := s.dataStore.NewAlertsRepository() + dataRepo := s.dataStore.NewDataRepository() + s.Logger().WithField("permissionClient", s.permissionClient).Debug("yo!") + ec := &dataEvents.Consumer{ + Alerts: alerts, + Data: dataRepo, + DeviceTokens: s.AuthClient(), + Evaluator: dataEvents.NewAlertsEvaluator(alerts, dataRepo, s.permissionClient, s.AuthClient()), + Permissions: s.permissionClient, + Pusher: s.pusher, + Tokens: s.AuthClient(), + Logger: s.Logger(), + } + + runnerCfg := dataEvents.SaramaRunnerConfig{ + Brokers: commonConfig.KafkaBrokers, + GroupID: config.KafkaAlertsGroupID, + Logger: s.Logger(), + Topics: prefixedTopics, + Sarama: commonConfig.SaramaConfig, + MessageConsumer: &dataEvents.AlertsEventsConsumer{ + Consumer: ec, + }, + } + runner := &dataEvents.SaramaRunner{Config: runnerCfg} + if err := runner.Initialize(); err != nil { + return errors.Wrap(err, "Unable to initialize alerts events handler runner") + } + s.alertsEventsHandler = runner + + return nil +} From 986106bb9a41a0d813ba22c809ac745e35466fed Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Thu, 11 Jul 2024 15:15:06 -0600 Subject: [PATCH 14/15] remove some debugging logs No longer needed --- data/events/alerts.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/data/events/alerts.go b/data/events/alerts.go index a1dcccaef4..819db3a1af 100644 --- a/data/events/alerts.go +++ b/data/events/alerts.go @@ -208,7 +208,6 @@ func (e *evaluator) Evaluate(ctx context.Context, followedUserID string) ( if err != nil { return nil, err } - e.logger(ctx).Debugf("%d alerts configs found", len(alertsConfigs)) alertsConfigsByUploadID := e.mapAlertsConfigsByUploadID(alertsConfigs) @@ -242,9 +241,7 @@ func (e *evaluator) gatherAlertsConfigs(ctx context.Context, if err != nil { return nil, err } - e.logger(ctx).Debugf("after List, %d alerts configs", len(alertsConfigs)) alertsConfigs = slices.DeleteFunc(alertsConfigs, e.authDenied(ctx)) - e.logger(ctx).Debugf("after perms check, %d alerts configs", len(alertsConfigs)) return alertsConfigs, nil } @@ -293,7 +290,6 @@ func (e *evaluator) gatherData(ctx context.Context, followedUserID, uploadID str return cmp.Compare(i.LongestDelay(), j.LongestDelay()) }).LongestDelay() longestDelay = max(5*time.Minute, longestDelay) - e.logger(ctx).WithField("longestDelay", longestDelay).Debug("here it is") params := store.AlertableParams{ UserID: followedUserID, UploadID: uploadID, From d57125d898bff3acf41e5ab501b2e08638a86e59 Mon Sep 17 00:00:00 2001 From: Eric Wollesen Date: Thu, 11 Jul 2024 10:46:36 -0600 Subject: [PATCH 15/15] add evaluation of not looping alerts BACK-2559 --- alerts/config.go | 77 +++++++++-- alerts/config_test.go | 290 +++++++++++++++++++++++++++++++++--------- data/events/alerts.go | 8 +- 3 files changed, 300 insertions(+), 75 deletions(-) diff --git a/alerts/config.go b/alerts/config.go index f26387c1b8..14c354c979 100644 --- a/alerts/config.go +++ b/alerts/config.go @@ -20,6 +20,12 @@ import ( "github.com/tidepool-org/platform/user" ) +// DosingDecision removes a stutter to improve readability. +type DosingDecision = dosingdecision.DosingDecision + +// Glucose removes a stutter to improve readability. +type Glucose = glucose.Glucose + // Config wraps Alerts to include user relationships. // // As a wrapper type, Config provides a clear demarcation of what a user @@ -60,7 +66,7 @@ func (c Config) Validate(validator structure.Validator) { // While this method, or the methods it calls, can fail, there's no point in returning an // error. Instead errors are logged before continuing. This is to ensure that any possible alert // that should be triggered, will be triggered. -func (c Config) Evaluate(ctx context.Context, gd []*glucose.Glucose, dd []*dosingdecision.DosingDecision) *Note { +func (c Config) Evaluate(ctx context.Context, gd []*Glucose, dd []*DosingDecision) *Note { n := c.Alerts.Evaluate(ctx, gd, dd) if n != nil { n.FollowedUserID = c.FollowedUserID @@ -117,7 +123,7 @@ func (a Alerts) Validate(validator structure.Validator) { // Evaluations are performed according to priority. The process is // "short-circuited" at the first indicated notification. func (a Alerts) Evaluate(ctx context.Context, - gd []*glucose.Glucose, dd []*dosingdecision.DosingDecision) *Note { + gd []*Glucose, dd []*DosingDecision) *Note { if a.NoCommunication != nil && a.NoCommunication.Enabled { if n := a.NoCommunication.Evaluate(ctx, gd); n != nil { @@ -160,7 +166,7 @@ func (b Base) Validate(validator structure.Validator) { validator.Bool("enabled", &b.Enabled) } -func (b Base) Evaluate(ctx context.Context, data []*glucose.Glucose) *Note { +func (b Base) Evaluate(ctx context.Context, data []*Glucose) *Note { if lgr := log.LoggerFromContext(ctx); lgr != nil { lgr.Warn("alerts.Base.Evaluate called, this shouldn't happen!") } @@ -221,7 +227,7 @@ func (a UrgentLowAlert) Validate(validator structure.Validator) { // Evaluate urgent low condition. // // Assumes data is pre-sorted in descending order by Time. -func (a *UrgentLowAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (note *Note) { +func (a *UrgentLowAlert) Evaluate(ctx context.Context, data []*Glucose) (note *Note) { lgr := log.LoggerFromContext(ctx) if len(data) == 0 { lgr.Debug("no data to evaluate for urgent low") @@ -247,7 +253,7 @@ func (a *UrgentLowAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) return &Note{Message: genGlucoseThresholdMessage("below urgent low")} } -func validateGlucoseAlertDatum(datum *glucose.Glucose, t Threshold) (float64, float64, error) { +func validateGlucoseAlertDatum(datum *Glucose, t Threshold) (float64, float64, error) { if datum.Blood.Units == nil || datum.Blood.Value == nil || datum.Blood.Time == nil { return 0, 0, errors.Newf("Unable to evaluate datum: Units, Value, or Time is nil") } @@ -270,12 +276,61 @@ func (a NotLoopingAlert) Validate(validator structure.Validator) { validator.Duration("delay", &dur).InRange(0, 2*time.Hour) } -// Evaluate if the device is looping. -func (a NotLoopingAlert) Evaluate(ctx context.Context, decisions []*dosingdecision.DosingDecision) (note *Note) { - // TODO will be implemented in the near future. +// Evaluate if the user's device is looping. +// +// If no decisions are present, that indicates a not looping condition. It's the lack of any +// non-errored loop decisions that indicates an alert is warranted. +func (a *NotLoopingAlert) Evaluate(ctx context.Context, decisions []*DosingDecision) (note *Note) { + lgr := log.LoggerFromContext(ctx) + if decisions == nil { + lgr.Debug("no data to evaluate for not looping") + return nil + } + defer func() { lgr.WithField("isAlerting?", note != nil).Info("not looping") }() + var lastLooped time.Time + for _, decision := range decisions { + if !a.isInteresting(decision) { + continue + } + if decision.Time.After(lastLooped) { + lastLooped = *decision.Time + } + } + alerting := time.Since(lastLooped) > NotLoopingTriggeredAfter+a.Delay.Duration() + if alerting { + if !a.IsActive() { + a.Triggered = time.Now() + } + return &Note{Message: NotLoopingMessage} + } + if a.IsActive() { + a.Resolved = time.Now() + } + return nil } +func (a NotLoopingAlert) isInteresting(decision *DosingDecision) bool { + // Only dosing decisions for loop are of interest. + if decision.Reason == nil || *decision.Reason != DosingDecisionReasonLoop { + return false + } + // Dosing decisions with errors can't indicate a loop. + if decision.Errors != nil && len(*decision.Errors) != 0 { + return false + } + if decision.Time == nil || (decision.Time).IsZero() { + return false + } + return true +} + +// NotLoopingTriggeredAfter is the minimum time before a device is considered "not looping". +const NotLoopingTriggeredAfter = 20 * time.Minute + +// NotLoopingMessage is delivered via push notifications. +const NotLoopingMessage = "A user's Loop isn't looping" + // DosingDecisionReasonLoop is specified in a [dosingdecision.DosingDecision] to indicate that // the decision is part of a loop adjustment (as opposed to bolus or something else). const DosingDecisionReasonLoop string = "loop" @@ -295,7 +350,7 @@ func (a NoCommunicationAlert) Validate(validator structure.Validator) { // Evaluate if CGM data is being received by Tidepool. // // Assumes data is pre-sorted by Time in descending order. -func (a NoCommunicationAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) *Note { +func (a NoCommunicationAlert) Evaluate(ctx context.Context, data []*Glucose) *Note { var newest time.Time for _, d := range data { if d != nil && d.Time != nil && !(*d.Time).IsZero() { @@ -337,7 +392,7 @@ func (a LowAlert) Validate(validator structure.Validator) { // Evaluate the given data to determine if an alert should be sent. // // Assumes data is pre-sorted in descending order by Time. -func (a *LowAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (note *Note) { +func (a *LowAlert) Evaluate(ctx context.Context, data []*Glucose) (note *Note) { lgr := log.LoggerFromContext(ctx) if len(data) == 0 { lgr.Debug("no data to evaluate for low") @@ -404,7 +459,7 @@ func (a HighAlert) Validate(validator structure.Validator) { // Evaluate the given data to determine if an alert should be sent. // // Assumes data is pre-sorted in descending order by Time. -func (a *HighAlert) Evaluate(ctx context.Context, data []*glucose.Glucose) (note *Note) { +func (a *HighAlert) Evaluate(ctx context.Context, data []*Glucose) (note *Note) { lgr := log.LoggerFromContext(ctx) if len(data) == 0 { lgr.Debug("no data to evaluate for high") diff --git a/alerts/config_test.go b/alerts/config_test.go index 74d7ca6110..de7dd6f032 100644 --- a/alerts/config_test.go +++ b/alerts/config_test.go @@ -14,7 +14,7 @@ import ( nontypesglucose "github.com/tidepool-org/platform/data/blood/glucose" "github.com/tidepool-org/platform/data/types" "github.com/tidepool-org/platform/data/types/blood" - "github.com/tidepool-org/platform/data/types/blood/glucose" + "github.com/tidepool-org/platform/data/types/dosingdecision" "github.com/tidepool-org/platform/log" logtest "github.com/tidepool-org/platform/log/test" "github.com/tidepool-org/platform/pointer" @@ -136,7 +136,7 @@ var _ = Describe("Config", func() { Context("when a note is returned", func() { It("injects the userIDs", func() { ctx := contextWithTestLogger() - mockGlucoseData := []*glucose.Glucose{ + mockGlucoseData := []*Glucose{ { Blood: blood.Blood{ Base: types.Base{ @@ -218,18 +218,6 @@ var _ = Describe("Config", func() { }) }) - var testGlucoseDatum = func(v float64) *glucose.Glucose { - return &glucose.Glucose{ - Blood: blood.Blood{ - Base: types.Base{ - Time: pointer.FromAny(time.Now()), - }, - Units: pointer.FromAny(nontypesglucose.MmolL), - Value: pointer.FromAny(v), - }, - } - } - Context("UrgentLowAlert", func() { Context("Threshold", func() { It("accepts values between 0 and 1000 mg/dL", func() { @@ -272,7 +260,7 @@ var _ = Describe("Config", func() { alert := testUrgentLow() Expect(func() { - note = alert.Evaluate(ctx, []*glucose.Glucose{}) + note = alert.Evaluate(ctx, []*Glucose{}) }).ToNot(Panic()) Expect(note).To(BeNil()) Expect(func() { @@ -283,7 +271,7 @@ var _ = Describe("Config", func() { It("logs evaluation results", func() { ctx := contextWithTestLogger() - data := []*glucose.Glucose{testGlucoseDatum(1.1)} + data := []*Glucose{testGlucoseDatum(1.1)} alert := testUrgentLow() @@ -307,11 +295,11 @@ var _ = Describe("Config", func() { alert := testUrgentLow() Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) + alert.Evaluate(ctx, []*Glucose{testGlucoseDatum(1.0)}) }).ToNot(Panic()) Expect(alert.Resolved).To(BeZero()) Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + alert.Evaluate(ctx, []*Glucose{testGlucoseDatum(5.0)}) }).ToNot(Panic()) Expect(alert.Resolved).ToNot(BeZero()) }) @@ -324,16 +312,16 @@ var _ = Describe("Config", func() { alert := testUrgentLow() Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) + alert.Evaluate(ctx, []*Glucose{testGlucoseDatum(1.0)}) }).ToNot(Panic()) Expect(alert.Resolved).To(BeZero()) Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + alert.Evaluate(ctx, []*Glucose{testGlucoseDatum(5.0)}) }).ToNot(Panic()) Expect(alert.Resolved).ToNot(BeZero()) was := alert.Resolved Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + alert.Evaluate(ctx, []*Glucose{testGlucoseDatum(5.0)}) }).ToNot(Panic()) Expect(alert.Resolved).To(Equal(was)) }) @@ -345,11 +333,11 @@ var _ = Describe("Config", func() { alert := testUrgentLow() Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + alert.Evaluate(ctx, []*Glucose{testGlucoseDatum(5.0)}) }).ToNot(Panic()) Expect(alert.Triggered).To(BeZero()) Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) + alert.Evaluate(ctx, []*Glucose{testGlucoseDatum(1.0)}) }).ToNot(Panic()) Expect(alert.Triggered).ToNot(BeZero()) }) @@ -359,28 +347,28 @@ var _ = Describe("Config", func() { var note *Note Expect(func() { - note = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1)}) + note = testUrgentLow().Evaluate(ctx, []*Glucose{testGlucoseDatum(1)}) }).ToNot(Panic()) Expect(note).ToNot(BeNil()) badUnits := testGlucoseDatum(1) badUnits.Units = nil Expect(func() { - note = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badUnits}) + note = testUrgentLow().Evaluate(ctx, []*Glucose{badUnits}) }).ToNot(Panic()) Expect(note).To(BeNil()) badValue := testGlucoseDatum(1) badValue.Value = nil Expect(func() { - note = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badValue}) + note = testUrgentLow().Evaluate(ctx, []*Glucose{badValue}) }).ToNot(Panic()) Expect(note).To(BeNil()) badTime := testGlucoseDatum(1) badTime.Time = nil Expect(func() { - note = testUrgentLow().Evaluate(ctx, []*glucose.Glucose{badTime}) + note = testUrgentLow().Evaluate(ctx, []*Glucose{badTime}) }).ToNot(Panic()) Expect(note).To(BeNil()) @@ -463,7 +451,7 @@ var _ = Describe("Config", func() { alert := testLow() Expect(func() { - note = alert.Evaluate(ctx, []*glucose.Glucose{}) + note = alert.Evaluate(ctx, []*Glucose{}) }).ToNot(Panic()) Expect(note).To(BeNil()) Expect(func() { @@ -474,7 +462,7 @@ var _ = Describe("Config", func() { It("logs evaluation results", func() { ctx := contextWithTestLogger() - data := []*glucose.Glucose{testGlucoseDatum(1.1)} + data := []*Glucose{testGlucoseDatum(1.1)} alert := testLow() @@ -498,11 +486,11 @@ var _ = Describe("Config", func() { alert := testLow() Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) + alert.Evaluate(ctx, []*Glucose{testGlucoseDatum(1.0)}) }).ToNot(Panic()) Expect(alert.Resolved).To(BeZero()) Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + alert.Evaluate(ctx, []*Glucose{testGlucoseDatum(5.0)}) }).ToNot(Panic()) Expect(alert.Resolved).ToNot(BeZero()) }) @@ -515,16 +503,16 @@ var _ = Describe("Config", func() { alert := testLow() Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) + alert.Evaluate(ctx, []*Glucose{testGlucoseDatum(1.0)}) }).ToNot(Panic()) Expect(alert.Resolved).To(BeZero()) Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + alert.Evaluate(ctx, []*Glucose{testGlucoseDatum(5.0)}) }).ToNot(Panic()) Expect(alert.Resolved).ToNot(BeZero()) was := alert.Resolved Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + alert.Evaluate(ctx, []*Glucose{testGlucoseDatum(5.0)}) }).ToNot(Panic()) Expect(alert.Resolved).To(Equal(was)) }) @@ -536,11 +524,11 @@ var _ = Describe("Config", func() { alert := testLow() Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + alert.Evaluate(ctx, []*Glucose{testGlucoseDatum(5.0)}) }).ToNot(Panic()) Expect(alert.Triggered).To(BeZero()) Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1.0)}) + alert.Evaluate(ctx, []*Glucose{testGlucoseDatum(1.0)}) }).ToNot(Panic()) Expect(alert.Triggered).ToNot(BeZero()) }) @@ -550,28 +538,28 @@ var _ = Describe("Config", func() { var note *Note Expect(func() { - note = testLow().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(1)}) + note = testLow().Evaluate(ctx, []*Glucose{testGlucoseDatum(1)}) }).ToNot(Panic()) Expect(note).ToNot(BeNil()) badUnits := testGlucoseDatum(1) badUnits.Units = nil Expect(func() { - note = testLow().Evaluate(ctx, []*glucose.Glucose{badUnits}) + note = testLow().Evaluate(ctx, []*Glucose{badUnits}) }).ToNot(Panic()) Expect(note).To(BeNil()) badValue := testGlucoseDatum(1) badValue.Value = nil Expect(func() { - note = testLow().Evaluate(ctx, []*glucose.Glucose{badValue}) + note = testLow().Evaluate(ctx, []*Glucose{badValue}) }).ToNot(Panic()) Expect(note).To(BeNil()) badTime := testGlucoseDatum(1) badTime.Time = nil Expect(func() { - note = testLow().Evaluate(ctx, []*glucose.Glucose{badTime}) + note = testLow().Evaluate(ctx, []*Glucose{badTime}) }).ToNot(Panic()) Expect(note).To(BeNil()) }) @@ -646,7 +634,7 @@ var _ = Describe("Config", func() { alert := testHigh() Expect(func() { - note = alert.Evaluate(ctx, []*glucose.Glucose{}) + note = alert.Evaluate(ctx, []*Glucose{}) }).ToNot(Panic()) Expect(note).To(BeNil()) Expect(func() { @@ -657,7 +645,7 @@ var _ = Describe("Config", func() { It("logs evaluation results", func() { ctx := contextWithTestLogger() - data := []*glucose.Glucose{testGlucoseDatum(21.1)} + data := []*Glucose{testGlucoseDatum(21.1)} alert := testHigh() @@ -681,11 +669,11 @@ var _ = Describe("Config", func() { alert := testHigh() Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(21.0)}) + alert.Evaluate(ctx, []*Glucose{testGlucoseDatum(21.0)}) }).ToNot(Panic()) Expect(alert.Resolved).To(BeZero()) Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + alert.Evaluate(ctx, []*Glucose{testGlucoseDatum(5.0)}) }).ToNot(Panic()) Expect(alert.Resolved).ToNot(BeZero()) }) @@ -698,16 +686,16 @@ var _ = Describe("Config", func() { alert := testHigh() Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(21.0)}) + alert.Evaluate(ctx, []*Glucose{testGlucoseDatum(21.0)}) }).ToNot(Panic()) Expect(alert.Resolved).To(BeZero()) Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + alert.Evaluate(ctx, []*Glucose{testGlucoseDatum(5.0)}) }).ToNot(Panic()) Expect(alert.Resolved).ToNot(BeZero()) was := alert.Resolved Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + alert.Evaluate(ctx, []*Glucose{testGlucoseDatum(5.0)}) }).ToNot(Panic()) Expect(alert.Resolved).To(Equal(was)) }) @@ -719,11 +707,11 @@ var _ = Describe("Config", func() { alert := testHigh() Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(5.0)}) + alert.Evaluate(ctx, []*Glucose{testGlucoseDatum(5.0)}) }).ToNot(Panic()) Expect(alert.Triggered).To(BeZero()) Expect(func() { - alert.Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(21.0)}) + alert.Evaluate(ctx, []*Glucose{testGlucoseDatum(21.0)}) }).ToNot(Panic()) Expect(alert.Triggered).ToNot(BeZero()) }) @@ -733,28 +721,28 @@ var _ = Describe("Config", func() { var note *Note Expect(func() { - note = testHigh().Evaluate(ctx, []*glucose.Glucose{testGlucoseDatum(21)}) + note = testHigh().Evaluate(ctx, []*Glucose{testGlucoseDatum(21)}) }).ToNot(Panic()) Expect(note).ToNot(BeNil()) badUnits := testGlucoseDatum(1) badUnits.Units = nil Expect(func() { - note = testHigh().Evaluate(ctx, []*glucose.Glucose{badUnits}) + note = testHigh().Evaluate(ctx, []*Glucose{badUnits}) }).ToNot(Panic()) Expect(note).To(BeNil()) badValue := testGlucoseDatum(1) badValue.Value = nil Expect(func() { - note = testHigh().Evaluate(ctx, []*glucose.Glucose{badValue}) + note = testHigh().Evaluate(ctx, []*Glucose{badValue}) }).ToNot(Panic()) Expect(note).To(BeNil()) badTime := testGlucoseDatum(1) badTime.Time = nil Expect(func() { - note = testHigh().Evaluate(ctx, []*glucose.Glucose{badTime}) + note = testHigh().Evaluate(ctx, []*Glucose{badTime}) }).ToNot(Panic()) Expect(note).To(BeNil()) }) @@ -810,7 +798,166 @@ var _ = Describe("Config", func() { b.Validate(val) Expect(val.Error()).To(MatchError("value 2h0m1s is not between 0s and 2h0m0s")) }) + }) + + Context("Evaluate", func() { + testNotLooping := func() *NotLoopingAlert { + return &NotLoopingAlert{} + } + pastDecision := func(dur time.Duration) *DosingDecision { + if dur > 0 { + dur *= -1 + } + return &DosingDecision{ + Base: types.Base{ + Time: pointer.FromAny(time.Now().Add(dur)), + }, + Reason: pointer.FromAny(DosingDecisionReasonLoop), + Errors: nil, + } + } + + It("handles being passed empty data", func() { + ctx := contextWithTestLogger() + var note *Note + alert := testNotLooping() + + Expect(func() { + note = alert.Evaluate(ctx, nil) + }).ToNot(Panic()) + Expect(note).To(BeNil()) + Expect(func() { + note = alert.Evaluate(ctx, []*DosingDecision{}) + }).ToNot(Panic()) + Expect(note).ToNot(BeNil()) + Expect(note.Message).To(ContainSubstring(NotLoopingMessage)) + }) + + It("isn't interested in dosing decisions with errors", func() { + ctx := contextWithTestLogger() + decisionWithError := &DosingDecision{ + Reason: pointer.FromAny(DosingDecisionReasonLoop), + Errors: &dosingdecision.IssueArray{{}}, + } + alert := testNotLooping() + + note := alert.Evaluate(ctx, []*DosingDecision{decisionWithError}) + Expect(note).ToNot(BeNil()) + Expect(note.Message).To(ContainSubstring(NotLoopingMessage)) + }) + + It("isn't interested in dosing decisions with reasons other than \"loop\"", func() { + ctx := contextWithTestLogger() + decisionWithOtherReason := &DosingDecision{ + Reason: pointer.FromAny("test"), + Errors: nil, + } + alert := testNotLooping() + + note := alert.Evaluate(ctx, []*DosingDecision{decisionWithOtherReason}) + Expect(note).ToNot(BeNil()) + Expect(note.Message).To(ContainSubstring(NotLoopingMessage)) + }) + + It("uses the most recent loop decision", func() { + ctx := contextWithTestLogger() + olderDecision := pastDecision(-25 * time.Minute) + newerDecision := pastDecision(-time.Minute) + wayOldDecision := pastDecision(-45 * time.Minute) + alert := testNotLooping() + + note := alert.Evaluate(ctx, []*DosingDecision{newerDecision, olderDecision}) + Expect(note).To(BeNil()) + note = alert.Evaluate(ctx, []*DosingDecision{olderDecision, newerDecision}) + Expect(note).To(BeNil()) + note = alert.Evaluate(ctx, []*DosingDecision{wayOldDecision, olderDecision}) + if Expect(note).ToNot(BeNil()) { + Expect(note.Message).To(ContainSubstring(NotLoopingMessage)) + } + }) + + It("logs evaluation results", func() { + ctx := contextWithTestLogger() + data := []*DosingDecision{testDecisionDatum("loop", time.Now())} + alert := testNotLooping() + + Expect(func() { + alert.Evaluate(ctx, data) + }).ToNot(Panic()) + Expect(func() { + lgr := log.LoggerFromContext(ctx).(*logtest.Logger) + lgr.AssertLog(log.InfoLevel, "not looping", log.Fields{ + "isAlerting?": false, + }) + }).ToNot(Panic()) + }) + Context("when currently active", func() { + It("marks itself resolved", func() { + ctx := contextWithTestLogger() + longAgo := time.Now().Add(-time.Hour) + data := []*DosingDecision{ + testDecisionDatum("loop", longAgo), + } + alert := testNotLooping() + + Expect(func() { + alert.Evaluate(ctx, data) + }).ToNot(Panic()) + Expect(alert.Resolved).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, append(data, testDecisionDatum("loop", time.Now()))) + }).ToNot(Panic()) + Expect(alert.Resolved).ToNot(BeZero()) + }) + }) + + Context("when currently INactive", func() { + It("doesn't re-mark itself resolved", func() { + ctx := contextWithTestLogger() + longAgo := time.Now().Add(-time.Hour) + data := []*DosingDecision{ + testDecisionDatum("loop", longAgo), + } + alert := testNotLooping() + + Expect(func() { + alert.Evaluate(ctx, data) + }).ToNot(Panic()) + Expect(alert.Resolved).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, append(data, testDecisionDatum("loop", time.Now()))) + }).ToNot(Panic()) + Expect(alert.IsActive()).To(BeFalse()) + was := alert.Resolved + Expect(func() { + alert.Evaluate(ctx, append(data, testDecisionDatum("loop", time.Now()))) + }).ToNot(Panic()) + Expect(alert.Resolved).To(Equal(was)) + }) + }) + + It("marks itself triggered", func() { + ctx := contextWithTestLogger() + justNow := time.Now().Add(-time.Minute) + dataBefore := []*DosingDecision{ + testDecisionDatum("loop", justNow), + } + longAgo := time.Now().Add(-time.Hour) + dataNow := []*DosingDecision{ + testDecisionDatum("loop", longAgo), + } + alert := testNotLooping() + + Expect(func() { + alert.Evaluate(ctx, dataBefore) + }).ToNot(Panic()) + Expect(alert.Triggered).To(BeZero()) + Expect(func() { + alert.Evaluate(ctx, dataNow) + }).ToNot(Panic()) + Expect(alert.Triggered).ToNot(BeZero()) + }) }) }) @@ -960,7 +1107,7 @@ var ( Base: Base{Enabled: true}, } } - testNoCommunicationDatum = &glucose.Glucose{ + testNoCommunicationDatum = &Glucose{ Blood: blood.Blood{ Base: types.Base{ Time: pointer.FromAny(time.Now()), @@ -969,7 +1116,7 @@ var ( Value: pointer.FromAny(11.0), }, } - testHighDatum = &glucose.Glucose{ + testHighDatum = &Glucose{ Blood: blood.Blood{ Base: types.Base{ Time: pointer.FromAny(time.Now()), @@ -978,7 +1125,7 @@ var ( Value: pointer.FromAny(11.0), }, } - testLowDatum = &glucose.Glucose{ + testLowDatum = &Glucose{ Blood: blood.Blood{ Base: types.Base{ Time: pointer.FromAny(time.Now()), @@ -987,7 +1134,7 @@ var ( Value: pointer.FromAny(3.9), }, } - testUrgentLowDatum = &glucose.Glucose{ + testUrgentLowDatum = &Glucose{ Blood: blood.Blood{ Base: types.Base{ Time: pointer.FromAny(time.Now()), @@ -1063,7 +1210,7 @@ var _ = Describe("Alerts", func() { Context("when not communicating", func() { It("returns only NoCommunication alerts", func() { ctx := contextWithTestLogger() - data := []*glucose.Glucose{testNoCommunicationDatum} + data := []*Glucose{testNoCommunicationDatum} data[0].Value = pointer.FromAny(0.0) a := Alerts{ NoCommunication: testNoCommunicationAlert(), @@ -1084,7 +1231,7 @@ var _ = Describe("Alerts", func() { It("detects low data", func() { ctx := contextWithTestLogger() - data := []*glucose.Glucose{testLowDatum} + data := []*Glucose{testLowDatum} a := Alerts{ Low: testLowAlert(), } @@ -1097,7 +1244,7 @@ var _ = Describe("Alerts", func() { It("detects high data", func() { ctx := contextWithTestLogger() - data := []*glucose.Glucose{testHighDatum} + data := []*Glucose{testHighDatum} a := Alerts{ High: testHighAlert(), } @@ -1111,7 +1258,7 @@ var _ = Describe("Alerts", func() { Context("with both low and urgent low alerts detected", func() { It("prefers urgent low", func() { ctx := contextWithTestLogger() - data := []*glucose.Glucose{testUrgentLowDatum} + data := []*Glucose{testUrgentLowDatum} a := Alerts{ Low: testLowAlert(), UrgentLow: testUrgentLowAlert(), @@ -1204,3 +1351,24 @@ func contextWithTestLogger() context.Context { lgr := logtest.NewLogger() return log.NewContextWithLogger(context.Background(), lgr) } + +func testGlucoseDatum(v float64) *Glucose { + return &Glucose{ + Blood: blood.Blood{ + Base: types.Base{ + Time: pointer.FromAny(time.Now()), + }, + Units: pointer.FromAny(nontypesglucose.MmolL), + Value: pointer.FromAny(v), + }, + } +} + +func testDecisionDatum(reason string, t time.Time) *DosingDecision { + return &DosingDecision{ + Base: types.Base{ + Time: &t, + }, + Reason: &reason, + } +} diff --git a/data/events/alerts.go b/data/events/alerts.go index 819db3a1af..c4ba0d0967 100644 --- a/data/events/alerts.go +++ b/data/events/alerts.go @@ -14,6 +14,7 @@ import ( "github.com/tidepool-org/platform/alerts" "github.com/tidepool-org/platform/auth" "github.com/tidepool-org/platform/data/store" + dataTypes "github.com/tidepool-org/platform/data/types" "github.com/tidepool-org/platform/data/types/blood/glucose" "github.com/tidepool-org/platform/data/types/dosingdecision" "github.com/tidepool-org/platform/devicetokens" @@ -94,8 +95,9 @@ func (c *Consumer) consumeAlertsConfigs(ctx context.Context, func (c *Consumer) consumeDeviceData(ctx context.Context, session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) (err error) { - datum := &Glucose{} - if err := unmarshalMessageValue(msg.Value, datum); err != nil { + datum := &dataTypes.Base{} + err = unmarshalMessageValue(msg.Value, datum) + if err != nil { return err } lgr := c.logger(ctx) @@ -289,7 +291,7 @@ func (e *evaluator) gatherData(ctx context.Context, followedUserID, uploadID str longestDelay := slices.MaxFunc(alertsConfigs, func(i, j *alerts.Config) int { return cmp.Compare(i.LongestDelay(), j.LongestDelay()) }).LongestDelay() - longestDelay = max(5*time.Minute, longestDelay) + longestDelay = max(alerts.NotLoopingTriggeredAfter, longestDelay) params := store.AlertableParams{ UserID: followedUserID, UploadID: uploadID,