From 3fd50d48dd2d9811e4a5d3d2149f555c9c865aa7 Mon Sep 17 00:00:00 2001 From: JellyTony Date: Thu, 21 Nov 2024 01:36:19 +0800 Subject: [PATCH 1/2] =?UTF-8?q?feat(logger):=20=E9=87=8D=E6=9E=84=E6=97=A5?= =?UTF-8?q?=E5=BF=97=EF=BC=8C=E5=A2=9E=E5=8A=A0=20handler=20=E8=87=AA?= =?UTF-8?q?=E5=AE=9A=E4=B9=89=E5=A4=84=E7=90=86=E5=87=BD=E6=95=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 146 ------- fs.go | 40 -- go.mod | 13 +- go.sum | 35 +- handler.go | 16 + internal/bufferpool/bufferpool.go | 31 ++ internal/pool/pool.go | 58 +++ internal/pool/pool_test.go | 106 +++++ internal/stacktrace/stack.go | 181 ++++++++ internal/stacktrace/stack_test.go | 106 +++++ level.go | 92 ---- logger.go | 593 +++++++++++++++++++++++--- logger_test.go | 45 ++ logging.go | 481 --------------------- logging_test.go | 218 ---------- logtest/logtest.go | 66 --- options.go | 390 ++++++++--------- pool.go | 41 -- qodana.yaml | 29 -- rotatelogger.go | 402 ------------------ rotatelogger_test.go | 660 ----------------------------- rotaterule.go | 265 ------------ sugar.go | 671 ++++++++++++++++++++++++++++++ value.go | 23 - 24 files changed, 1936 insertions(+), 2772 deletions(-) delete mode 100644 README.md delete mode 100644 fs.go create mode 100644 handler.go create mode 100644 internal/bufferpool/bufferpool.go create mode 100644 internal/pool/pool.go create mode 100644 internal/pool/pool_test.go create mode 100644 internal/stacktrace/stack.go create mode 100644 internal/stacktrace/stack_test.go delete mode 100644 level.go create mode 100644 logger_test.go delete mode 100644 logging.go delete mode 100644 logging_test.go delete mode 100644 logtest/logtest.go delete mode 100644 pool.go delete mode 100644 qodana.yaml delete mode 100644 rotatelogger.go delete mode 100644 rotatelogger_test.go delete mode 100644 rotaterule.go create mode 100644 sugar.go delete mode 100644 value.go diff --git a/README.md b/README.md deleted file mode 100644 index eb4210d..0000000 --- a/README.md +++ /dev/null @@ -1,146 +0,0 @@ -# go 日志模块 - -> 基础日志模块,基于 `uber.zap` 封装 - -## 日志级别 - -```go -// DebugLevel level. Usually only enabled when debugging. Very verbose logging. -DebugLevel = iota + 1 -// InfoLevel is the default logging priority. -// General operational entries about what's going on inside the application. -InfoLevel -// WarnLevel level. Non-critical entries that deserve eyes. -WarnLevel -// ErrorLevel level. Logs. Used for errors that should definitely be noted. -ErrorLevel -// FatalLevel level. Logs and then calls `logger.Exit(1)`. highest level of severity. -FatalLevel -``` - -低于级别的日志不会输出,高于级别的日志会输出到对应的文件,建议 `开发环境` 日志级别设置为 `DEBUG`, `线上环境` 日志级别设置为 `INFO` - -## 打印日志方法 - -```go -// Logger is the interface for Logger types -type Logger interface { - // SetLevel set logger level - SetLevel(lv Level) - // WithContext with context - WithContext(ctx context.Context) Logger - // WithFields set fields to always be logged - WithFields(fields map[string]any) Logger - // WithCallDepth with logger call depth. - WithCallDepth(callDepth int) Logger - // Debug uses fmt.Sprint to construct and log a message. - Debug(args ...interface{}) - // Info uses fmt.Sprint to construct and log a message. - Info(args ...interface{}) - // Warn uses fmt.Sprint to construct and log a message. - Warn(args ...interface{}) - // Error uses fmt.Sprint to construct and log a message. - Error(args ...interface{}) - // Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit. - Fatal(args ...interface{}) - // Debugf uses fmt.Sprintf to log a templated message. - Debugf(template string, args ...interface{}) - // Infof uses fmt.Sprintf to log a templated message. - Infof(template string, args ...interface{}) - // Warnf uses fmt.Sprintf to log a templated message. - Warnf(template string, args ...interface{}) - // Errorf uses fmt.Sprintf to log a templated message. - Errorf(template string, args ...interface{}) - // Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit. - Fatalf(template string, args ...interface{}) - // Debugw logs a message with some additional context. The variadic key-value - // pairs are treated as they are in With. - // - // When debug-level logging is disabled, this is much faster than - // s.With(keysAndValues).Debug(msg) - Debugw(msg string, keysAndValues ...interface{}) - // Infow logs a message with some additional context. The variadic key-value - // pairs are treated as they are in With. - Infow(msg string, keysAndValues ...interface{}) - // Warnw logs a message with some additional context. The variadic key-value - // pairs are treated as they are in With. - Warnw(msg string, keysAndValues ...interface{}) - // Errorw logs a message with some additional context. The variadic key-value - // pairs are treated as they are in With. - Errorw(msg string, keysAndValues ...interface{}) - // Fatalw logs a message with some additional context, then calls os.Exit. The - // variadic key-value pairs are treated as they are in With. - Fatalw(msg string, keysAndValues ...interface{}) - // Sync logger sync - Sync() error -} -``` - -## 简单使用 - -> 使用日志的时候注意尽量避免使用 `Fatal` 级别的日志,虽然提供了,但是不建议使用,使用 `Fatal` 记录消息后,直接调用 os.Exit(1),这意味着: 在其他 goroutine defer 语句不会被执行; 各种 buffers 不会被 flush,包括日志的; 临时文件或者目录不会被移除; 不要使用 fatal 记录日志,而是向调用者返回错误。如果错误一直持续到 main.main。main.main 那就是在退出之前做处理任何清理操作的正确位置。 - -### 日志初始化 -```go -logger.InitLogger(New( - WithPath("../logs"), - WithLevel(DebugLevel), - WithMode(ConsoleMode), - Fields(map[string]interface{}{ - "app_id": "nextmicro", - "instance_id": "JeffreyBool", - }), -)) -``` - -上面会覆盖日志默认的行为,因为方便使用,默认调用就会初始化 - -### 各种级别日志输出 -```go -func TestDebug(t *testing.T) { - logger.Debug("test debug logger") - logger.Debugf("debug test time:%d", time.Now().Unix()) - logger.Sync() -} - -func TestInfo(t *testing.T) { - logger.Info("测试日志") - logger.Infof("name:%s, age:%d", "pandatv", 14) - logger.Infow("我们都是中国人", "age", 18, "name", "zhanggaoyuan") - logger.Sync() -} - -func TestWarn(t *testing.T) { - logger.Warn("test warn logger") - logger.Warnf("warn test time:%d", time.Now().Unix()) - logger.Sync() -} - -func TestError(t *testing.T) { - logger.Error("test error logger") - logger.Errorf("error test time:%d", time.Now().Unix()) - logger.Sync() -} -``` - -### 日志多实例使用 -> 多实例日志必须设置 `WithFilename(xxx)` 和普通日志区分,这种日志不需要指定级别,全部会输出到一个文件中 -```go -slow := New( - WithPath("../logs"), - WithMode(FileMode), - WithFilename("slow"), - WithFields(map[string]interface{}{ - "app_id": "mt", - "instance_id": "JeffreyBool", - }), -) - -slow.Info(msg) -``` -> 需要外部自己保存日志的对象信息 - -## 注意事项 - -- 不要使用 `Fatal` 级别日志 -- 日志使用完毕后一定要显式调用 `Sync()` 函数将日志落盘 \ No newline at end of file diff --git a/fs.go b/fs.go deleted file mode 100644 index 69f13f8..0000000 --- a/fs.go +++ /dev/null @@ -1,40 +0,0 @@ -package logger - -import ( - "io" - "os" -) - -var fileSys StandardFileSystem - -type ( - FileSystem interface { - Close(closer io.Closer) error - Copy(writer io.Writer, reader io.Reader) (int64, error) - Create(name string) (*os.File, error) - Open(name string) (*os.File, error) - Remove(name string) error - } - - StandardFileSystem struct{} -) - -func (fs StandardFileSystem) Close(closer io.Closer) error { - return closer.Close() -} - -func (fs StandardFileSystem) Copy(writer io.Writer, reader io.Reader) (int64, error) { - return io.Copy(writer, reader) -} - -func (fs StandardFileSystem) Create(name string) (*os.File, error) { - return os.Create(name) -} - -func (fs StandardFileSystem) Open(name string) (*os.File, error) { - return os.Open(name) -} - -func (fs StandardFileSystem) Remove(name string) error { - return os.Remove(name) -} diff --git a/go.mod b/go.mod index 07a7684..c38146e 100644 --- a/go.mod +++ b/go.mod @@ -3,22 +3,13 @@ module github.com/nextmicro/logger go 1.18 require ( - github.com/smallnest/ringbuffer v0.0.0-20240827114233-62e3c686e6c0 - github.com/stretchr/testify v1.8.4 - go.opentelemetry.io/otel v1.24.0 - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.24.0 - go.opentelemetry.io/otel/sdk v1.24.0 - go.opentelemetry.io/otel/trace v1.24.0 + github.com/stretchr/testify v1.8.1 + go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-logr/logr v1.4.1 // indirect - github.com/go-logr/stdr v1.2.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - go.opentelemetry.io/otel/metric v1.24.0 // indirect - go.uber.org/multierr v1.10.0 // indirect - golang.org/x/sys v0.17.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 76940b2..2e1fc07 100644 --- a/go.sum +++ b/go.sum @@ -1,35 +1,22 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/smallnest/ringbuffer v0.0.0-20240827114233-62e3c686e6c0 h1:6wTlHUWggWb8Y5Q4f7xnIBHa3L7DgijNQP8eM6oTEhQ= -github.com/smallnest/ringbuffer v0.0.0-20240827114233-62e3c686e6c0/go.mod h1:tAG61zBM1DYRaGIPloumExGvScf08oHuo0kFoOqdbT0= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= -go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.24.0 h1:s0PHtIkN+3xrbDOpt2M8OTG92cWqUESvzh2MxiR5xY8= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.24.0/go.mod h1:hZlFbDbRt++MMPCCfSJfmhkGIWnX1h3XjkfxZUjLrIA= -go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= -go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= -go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= -go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= -go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= -go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/handler.go b/handler.go new file mode 100644 index 0000000..0bf5561 --- /dev/null +++ b/handler.go @@ -0,0 +1,16 @@ +package logger + +import ( + "context" + + "go.uber.org/zap/zapcore" +) + +type Handler interface { + // Handle serializes the Entry and any Fields supplied at the log site and + // writes them to their destination. + // + // If called, Write should always log the Entry and Fields; it should not + // replicate the logic of Check. + Handle(context.Context, *zapcore.CheckedEntry, []zapcore.Field) ([]zapcore.Field, error) +} diff --git a/internal/bufferpool/bufferpool.go b/internal/bufferpool/bufferpool.go new file mode 100644 index 0000000..dad583a --- /dev/null +++ b/internal/bufferpool/bufferpool.go @@ -0,0 +1,31 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package bufferpool houses zap's shared internal buffer pool. Third-party +// packages can recreate the same functionality with buffers.NewPool. +package bufferpool + +import "go.uber.org/zap/buffer" + +var ( + _pool = buffer.NewPool() + // Get retrieves a buffer from the pool, creating one if necessary. + Get = _pool.Get +) diff --git a/internal/pool/pool.go b/internal/pool/pool.go new file mode 100644 index 0000000..60e9d2c --- /dev/null +++ b/internal/pool/pool.go @@ -0,0 +1,58 @@ +// Copyright (c) 2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package pool provides internal pool utilities. +package pool + +import ( + "sync" +) + +// A Pool is a generic wrapper around [sync.Pool] to provide strongly-typed +// object pooling. +// +// Note that SA6002 (ref: https://staticcheck.io/docs/checks/#SA6002) will +// not be detected, so all internal pool use must take care to only store +// pointer types. +type Pool[T any] struct { + pool sync.Pool +} + +// New returns a new [Pool] for T, and will use fn to construct new Ts when +// the pool is empty. +func New[T any](fn func() T) *Pool[T] { + return &Pool[T]{ + pool: sync.Pool{ + New: func() any { + return fn() + }, + }, + } +} + +// Get gets a T from the pool, or creates a new one if the pool is empty. +func (p *Pool[T]) Get() T { + return p.pool.Get().(T) +} + +// Put returns x into the pool. +func (p *Pool[T]) Put(x T) { + p.pool.Put(x) +} diff --git a/internal/pool/pool_test.go b/internal/pool/pool_test.go new file mode 100644 index 0000000..bcc9fff --- /dev/null +++ b/internal/pool/pool_test.go @@ -0,0 +1,106 @@ +// Copyright (c) 2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package pool_test + +import ( + "runtime/debug" + "sync" + "testing" + + "github.com/nextmicro/logger/internal/pool" + "github.com/stretchr/testify/require" +) + +type pooledValue[T any] struct { + value T +} + +func TestNew(t *testing.T) { + // Disable GC to avoid the victim cache during the test. + defer debug.SetGCPercent(debug.SetGCPercent(-1)) + + p := pool.New(func() *pooledValue[string] { + return &pooledValue[string]{ + value: "new", + } + }) + + // Probabilistically, 75% of sync.Pool.Put calls will succeed when -race + // is enabled (see ref below); attempt to make this quasi-deterministic by + // brute force (i.e., put significantly more objects in the pool than we + // will need for the test) in order to avoid testing without race enabled. + // + // ref: https://cs.opensource.google/go/go/+/refs/tags/go1.20.2:src/sync/pool.go;l=100-103 + for i := 0; i < 1_000; i++ { + p.Put(&pooledValue[string]{ + value: t.Name(), + }) + } + + // Ensure that we always get the expected value. Note that this must only + // run a fraction of the number of times that Put is called above. + for i := 0; i < 10; i++ { + func() { + x := p.Get() + defer p.Put(x) + require.Equal(t, t.Name(), x.value) + }() + } + + // Depool all objects that might be in the pool to ensure that it's empty. + for i := 0; i < 1_000; i++ { + p.Get() + } + + // Now that the pool is empty, it should use the value specified in the + // underlying sync.Pool.New func. + require.Equal(t, "new", p.Get().value) +} + +func TestNew_Race(t *testing.T) { + p := pool.New(func() *pooledValue[int] { + return &pooledValue[int]{ + value: -1, + } + }) + + var wg sync.WaitGroup + defer wg.Wait() + + // Run a number of goroutines that read and write pool object fields to + // tease out races. + for i := 0; i < 1_000; i++ { + i := i + + wg.Add(1) + go func() { + defer wg.Done() + + x := p.Get() + defer p.Put(x) + + // Must both read and write the field. + if n := x.value; n >= -1 { + x.value = i + } + }() + } +} diff --git a/internal/stacktrace/stack.go b/internal/stacktrace/stack.go new file mode 100644 index 0000000..01ed551 --- /dev/null +++ b/internal/stacktrace/stack.go @@ -0,0 +1,181 @@ +// Copyright (c) 2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package stacktrace provides support for gathering stack traces +// efficiently. +package stacktrace + +import ( + "runtime" + + "github.com/nextmicro/logger/internal/bufferpool" + "github.com/nextmicro/logger/internal/pool" + "go.uber.org/zap/buffer" +) + +var _stackPool = pool.New(func() *Stack { + return &Stack{ + storage: make([]uintptr, 64), + } +}) + +// Stack is a captured stack trace. +type Stack struct { + pcs []uintptr // program counters; always a subslice of storage + frames *runtime.Frames + + // The size of pcs varies depending on requirements: + // it will be one if the only the first frame was requested, + // and otherwise it will reflect the depth of the call stack. + // + // storage decouples the slice we need (pcs) from the slice we pool. + // We will always allocate a reasonably large storage, but we'll use + // only as much of it as we need. + storage []uintptr +} + +// Depth specifies how deep of a stack trace should be captured. +type Depth int + +const ( + // First captures only the first frame. + First Depth = iota + + // Full captures the entire call stack, allocating more + // storage for it if needed. + Full +) + +// Capture captures a stack trace of the specified depth, skipping +// the provided number of frames. skip=0 identifies the caller of +// Capture. +// +// The caller must call Free on the returned stacktrace after using it. +func Capture(skip int, depth Depth) *Stack { + stack := _stackPool.Get() + + switch depth { + case First: + stack.pcs = stack.storage[:1] + case Full: + stack.pcs = stack.storage + } + + // Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers + // itself. +2 to skip captureStacktrace and runtime.Callers. + numFrames := runtime.Callers( + skip+2, + stack.pcs, + ) + + // runtime.Callers truncates the recorded stacktrace if there is no + // room in the provided slice. For the full stack trace, keep expanding + // storage until there are fewer frames than there is room. + if depth == Full { + pcs := stack.pcs + for numFrames == len(pcs) { + pcs = make([]uintptr, len(pcs)*2) + numFrames = runtime.Callers(skip+2, pcs) + } + + // Discard old storage instead of returning it to the pool. + // This will adjust the pool size over time if stack traces are + // consistently very deep. + stack.storage = pcs + stack.pcs = pcs[:numFrames] + } else { + stack.pcs = stack.pcs[:numFrames] + } + + stack.frames = runtime.CallersFrames(stack.pcs) + return stack +} + +// Free releases resources associated with this stacktrace +// and returns it back to the pool. +func (st *Stack) Free() { + st.frames = nil + st.pcs = nil + _stackPool.Put(st) +} + +// Count reports the total number of frames in this stacktrace. +// Count DOES NOT change as Next is called. +func (st *Stack) Count() int { + return len(st.pcs) +} + +// Next returns the next frame in the stack trace, +// and a boolean indicating whether there are more after it. +func (st *Stack) Next() (_ runtime.Frame, more bool) { + return st.frames.Next() +} + +// Take returns a string representation of the current stacktrace. +// +// skip is the number of frames to skip before recording the stack trace. +// skip=0 identifies the caller of Take. +func Take(skip int) string { + stack := Capture(skip+1, Full) + defer stack.Free() + + buffer := bufferpool.Get() + defer buffer.Free() + + stackfmt := NewFormatter(buffer) + stackfmt.FormatStack(stack) + return buffer.String() +} + +// Formatter formats a stack trace into a readable string representation. +type Formatter struct { + b *buffer.Buffer + nonEmpty bool // whehther we've written at least one frame already +} + +// NewFormatter builds a new Formatter. +func NewFormatter(b *buffer.Buffer) Formatter { + return Formatter{b: b} +} + +// FormatStack formats all remaining frames in the provided stacktrace -- minus +// the final runtime.main/runtime.goexit frame. +func (sf *Formatter) FormatStack(stack *Stack) { + // Note: On the last iteration, frames.Next() returns false, with a valid + // frame, but we ignore this frame. The last frame is a runtime frame which + // adds noise, since it's only either runtime.main or runtime.goexit. + for frame, more := stack.Next(); more; frame, more = stack.Next() { + sf.FormatFrame(frame) + } +} + +// FormatFrame formats the given frame. +func (sf *Formatter) FormatFrame(frame runtime.Frame) { + if sf.nonEmpty { + sf.b.AppendByte('\n') + } + sf.nonEmpty = true + sf.b.AppendString(frame.Function) + sf.b.AppendByte('\n') + sf.b.AppendByte('\t') + sf.b.AppendString(frame.File) + sf.b.AppendByte(':') + sf.b.AppendInt(int64(frame.Line)) +} diff --git a/internal/stacktrace/stack_test.go b/internal/stacktrace/stack_test.go new file mode 100644 index 0000000..fe4f60f --- /dev/null +++ b/internal/stacktrace/stack_test.go @@ -0,0 +1,106 @@ +// Copyright (c) 2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package stacktrace + +import ( + "bytes" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTake(t *testing.T) { + trace := Take(0) + lines := strings.Split(trace, "\n") + require.NotEmpty(t, lines, "Expected stacktrace to have at least one frame.") + assert.Contains( + t, + lines[0], + "github.com/nextmicro/logger/internal/stacktrace.TestTake", + "Expected stacktrace to start with the test.", + ) +} + +func TestTakeWithSkip(t *testing.T) { + trace := Take(1) + lines := strings.Split(trace, "\n") + require.NotEmpty(t, lines, "Expected stacktrace to have at least one frame.") + assert.Contains( + t, + lines[0], + "testing.", + "Expected stacktrace to start with the test runner (skipping our own frame).", + ) +} + +func TestTakeWithSkipInnerFunc(t *testing.T) { + var trace string + func() { + trace = Take(2) + }() + lines := strings.Split(trace, "\n") + require.NotEmpty(t, lines, "Expected stacktrace to have at least one frame.") + assert.Contains( + t, + lines[0], + "testing.", + "Expected stacktrace to start with the test function (skipping the test function).", + ) +} + +func TestTakeDeepStack(t *testing.T) { + const ( + N = 500 + withStackDepthName = "github.com/nextmicro/logger/internal/stacktrace.withStackDepth" + ) + withStackDepth(N, func() { + trace := Take(0) + for found := 0; found < N; found++ { + i := strings.Index(trace, withStackDepthName) + if i < 0 { + t.Fatalf(`expected %v occurrences of %q, found %d`, + N, withStackDepthName, found) + } + trace = trace[i+len(withStackDepthName):] + } + }) +} + +func BenchmarkTake(b *testing.B) { + for i := 0; i < b.N; i++ { + Take(0) + } +} + +func withStackDepth(depth int, f func()) { + var recurse func(rune) rune + recurse = func(r rune) rune { + if r > 0 { + bytes.Map(recurse, []byte(string([]rune{r - 1}))) + } else { + f() + } + return 0 + } + recurse(rune(depth)) +} diff --git a/level.go b/level.go deleted file mode 100644 index c4e40f5..0000000 --- a/level.go +++ /dev/null @@ -1,92 +0,0 @@ -package logger - -import ( - "strings" - - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -type Level int8 - -const ( - // DebugLevel level. Usually only enabled when debugging. Very verbose logging. - DebugLevel = iota + 1 - // InfoLevel is the default logging priority. - // General operational entries about what's going on inside the application. - InfoLevel - // WarnLevel level. Non-critical entries that deserve eyes. - WarnLevel - // ErrorLevel level. Logs. Used for errors that should definitely be noted. - ErrorLevel - // FatalLevel level. Logs and then calls `logger.Exit(1)`. highest level of severity. - FatalLevel -) - -func (l Level) String() string { - switch l { - case DebugLevel: - return "DEBUG" - case InfoLevel: - return "INFO" - case WarnLevel: - return "WARN" - case ErrorLevel: - return "ERROR" - case FatalLevel: - return "FATAL" - } - return "" -} - -// ParseLevel parses a level string into a logger Level value. -func ParseLevel(s string) Level { - switch strings.ToUpper(s) { - case "DEBUG": - return DebugLevel - case "INFO": - return InfoLevel - case "WARN": - return WarnLevel - case "ERROR": - return ErrorLevel - case "FATAL": - return FatalLevel - } - return InfoLevel -} - -func (l Level) unmarshalZapLevel() zapcore.Level { - switch l { - case DebugLevel: - return zap.DebugLevel - case InfoLevel: - return zap.InfoLevel - case WarnLevel: - return zap.WarnLevel - case ErrorLevel: - return zap.ErrorLevel - case FatalLevel: - return zap.FatalLevel - default: - return zap.InfoLevel - } -} - -// Enabled returns true if the given level is at or above this level. -func (l Level) Enabled(lvl Level) bool { - return lvl >= l -} - -// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with -// an anonymous function. -// -// It's particularly useful when splitting log output between different -// outputs (e.g., standard error and standard out). For sample code, see the -// package-level AdvancedConfiguration example. -type LevelEnablerFunc func(zapcore.Level) bool - -// Enabled calls the wrapped function. -func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { - return f(lvl) -} diff --git a/logger.go b/logger.go index 853bdc4..d076d43 100644 --- a/logger.go +++ b/logger.go @@ -1,63 +1,548 @@ package logger +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + import ( "context" - "errors" -) + "fmt" + "io" + "os" + "strings" -var ( - // ErrLogPathNotSet is an error that indicates the log path is not set. - ErrLogPathNotSet = errors.New("log path must be set") + "github.com/nextmicro/logger/internal/bufferpool" + "github.com/nextmicro/logger/internal/stacktrace" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" ) -// Logger is the interface for Logger types -type Logger interface { - // SetLevel set logger level - SetLevel(lv Level) - // WithContext with context - WithContext(ctx context.Context) Logger - // WithFields set fields to always be logged - WithFields(fields map[string]any) Logger - // WithCallDepth with logger call depth. - WithCallDepth(callDepth int) Logger - // Debug uses fmt.Sprint to construct and log a message. - Debug(args ...interface{}) - // Info uses fmt.Sprint to construct and log a message. - Info(args ...interface{}) - // Warn uses fmt.Sprint to construct and log a message. - Warn(args ...interface{}) - // Error uses fmt.Sprint to construct and log a message. - Error(args ...interface{}) - // Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit. - Fatal(args ...interface{}) - // Debugf uses fmt.Sprintf to log a templated message. - Debugf(template string, args ...interface{}) - // Infof uses fmt.Sprintf to log a templated message. - Infof(template string, args ...interface{}) - // Warnf uses fmt.Sprintf to log a templated message. - Warnf(template string, args ...interface{}) - // Errorf uses fmt.Sprintf to log a templated message. - Errorf(template string, args ...interface{}) - // Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit. - Fatalf(template string, args ...interface{}) - // Debugw logs a message with some additional context. The variadic key-value - // pairs are treated as they are in With. +// A Logger provides fast, leveled, structured logging. All methods are safe +// for concurrent use. +// +// The Logger is designed for contexts in which every microsecond and every +// allocation matters, so its API intentionally favors performance and type +// safety over brevity. For most applications, the SugaredLogger strikes a +// better balance between performance and ergonomics. +type Logger struct { + core zapcore.Core + + development bool + addCaller bool + handler Handler + onPanic zapcore.CheckWriteHook // default is WriteThenPanic + onFatal zapcore.CheckWriteHook // default is WriteThenFatal + + name string + errorOutput zapcore.WriteSyncer + + addStack zapcore.LevelEnabler + + callerSkip int + + clock zapcore.Clock +} + +// New constructs a new Logger from the provided zapcore.Core and Options. If +// the passed zapcore.Core is nil, it falls back to using a no-op +// implementation. +// +// This is the most flexible way to construct a Logger, but also the most +// verbose. For typical use cases, the highly-opinionated presets +// (NewProduction, NewDevelopment, and NewExample) or the Config struct are +// more convenient. +// +// For sample code, see the package-level AdvancedConfiguration example. +func New(core zapcore.Core, options ...Option) *Logger { + if core == nil { + return NewNop() + } + log := &Logger{ + core: core, + errorOutput: zapcore.Lock(os.Stderr), + addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, + } + return log.WithOptions(options...) +} + +// NewNop returns a no-op Logger. It never writes out logs or internal errors, +// and it never runs user-defined hooks. +// +// Using WithOptions to replace the Core or error output of a no-op Logger can +// re-enable logging. +func NewNop() *Logger { + return &Logger{ + core: zapcore.NewNopCore(), + errorOutput: zapcore.AddSync(io.Discard), + addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, + } +} + +// Must is a helper that wraps a call to a function returning (*Logger, error) +// and panics if the error is non-nil. It is intended for use in variable +// initialization such as: +// +// var logger = zap.Must(zap.NewProduction()) +func Must(logger *Logger, err error) *Logger { + if err != nil { + panic(err) + } + + return logger +} + +// NewExample builds a Logger that's designed for use in zap's testable +// examples. It writes DebugLevel and above logs to standard out as JSON, but +// omits the timestamp and calling function to keep example output +// short and deterministic. +func NewExample(options ...Option) *Logger { + encoderCfg := zapcore.EncoderConfig{ + MessageKey: "msg", + LevelKey: "level", + NameKey: "logger", + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + } + core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, zapcore.DebugLevel) + return New(core).WithOptions(options...) +} + +// Sugar wraps the Logger to provide a more ergonomic, but slightly slower, +// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a +// single application to use both Loggers and SugaredLoggers, converting +// between them on the boundaries of performance-sensitive code. +func (log *Logger) Sugar() *SugaredLogger { + core := log.clone() + core.callerSkip += 2 + return &SugaredLogger{core} +} + +// Named adds a new path segment to the logger's name. Segments are joined by +// periods. By default, Loggers are unnamed. +func (log *Logger) Named(s string) *Logger { + if s == "" { + return log + } + l := log.clone() + if log.name == "" { + l.name = s + } else { + l.name = strings.Join([]string{l.name, s}, ".") + } + return l +} + +// WithOptions clones the current Logger, applies the supplied Options, and +// returns the resulting Logger. It's safe to use concurrently. +func (log *Logger) WithOptions(opts ...Option) *Logger { + c := log.clone() + for _, opt := range opts { + opt.apply(c) + } + return c +} + +// With creates a child logger and adds structured context to it. Fields added +// to the child don't affect the parent, and vice versa. Any fields that +// require evaluation (such as Objects) are evaluated upon invocation of With. +func (log *Logger) With(fields ...zapcore.Field) *Logger { + if len(fields) == 0 { + return log + } + l := log.clone() + l.core = l.core.With(fields) + return l +} + +// WithLazy creates a child logger and adds structured context to it lazily. +// +// The fields are evaluated only if the logger is further chained with [With] +// or is written to with any of the log level methods. +// Until that occurs, the logger may retain references to objects inside the fields, +// and logging will reflect the state of an object at the time of logging, +// not the time of WithLazy(). +// +// WithLazy provides a worthwhile performance optimization for contextual loggers +// when the likelihood of using the child logger is low, +// such as error paths and rarely taken branches. +// +// Similar to [With], fields added to the child don't affect the parent, and vice versa. +func (log *Logger) WithLazy(fields ...zapcore.Field) *Logger { + if len(fields) == 0 { + return log + } + return log.WithOptions(WrapCore(func(core zapcore.Core) zapcore.Core { + return zapcore.NewLazyWith(core, fields) + })) +} + +// Level reports the minimum enabled level for this logger. +// +// For NopLoggers, this is [zapcore.InvalidLevel]. +func (log *Logger) Level() zapcore.Level { + return zapcore.LevelOf(log.core) +} + +// Check returns a CheckedEntry if logging a message at the specified level +// is enabled. It's a completely optional optimization; in high-performance +// applications, Check can help avoid allocating a slice to hold fields. +func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + return log.check(lvl, msg) +} + +// Log logs a message at the specified level. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +// Any Fields that require evaluation (such as Objects) are evaluated upon +// invocation of Log. +func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...zapcore.Field) { + if ce := log.check(lvl, msg); ce != nil { + ce.Write(fields...) + } +} + +// LogContext logs a message at the specified level. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +// Any Fields that require evaluation (such as Objects) are evaluated upon +// invocation of LogContext. +func (log *Logger) LogContext(ctx context.Context, lvl zapcore.Level, msg string, fields ...zapcore.Field) { + if ce := log.check(lvl, msg); ce != nil { + _fields, err := log.handler.Handle(ctx, ce, fields) + if err != nil { + log.Error("Error occurred while handling log context", zap.Error(err)) + } else { + fields = append(fields, _fields...) + } + ce.Write(fields...) + } +} + +// Debug logs a message at DebugLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Debug(msg string, fields ...zapcore.Field) { + if ce := log.check(zapcore.DebugLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// DebugContext logs a message at DebugLevel. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +func (log *Logger) DebugContext(ctx context.Context, msg string, fields ...zapcore.Field) { + if ce := log.check(zapcore.DebugLevel, msg); ce != nil { + _fields, err := log.handler.Handle(ctx, ce, fields) + if err != nil { + log.Error("Error occurred while handling log context", zap.Error(err)) + } else { + fields = append(fields, _fields...) + } + ce.Write(fields...) + } +} + +// Info logs a message at InfoLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Info(msg string, fields ...zapcore.Field) { + if ce := log.check(zapcore.InfoLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// InfoContext logs a message at InfoLevel. The message includes any fields +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) InfoContext(ctx context.Context, msg string, fields ...zapcore.Field) { + if ce := log.check(zapcore.InfoLevel, msg); ce != nil { + _fields, err := log.handler.Handle(ctx, ce, fields) + if err != nil { + log.Error("Error occurred while handling log context", zap.Error(err)) + } else { + fields = append(fields, _fields...) + } + ce.Write(fields...) + } +} + +// Warn logs a message at WarnLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Warn(msg string, fields ...zapcore.Field) { + if ce := log.check(zapcore.WarnLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// WarnContext logs a message at WarnLevel. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +func (log *Logger) WarnContext(ctx context.Context, msg string, fields ...zapcore.Field) { + if ce := log.check(zapcore.WarnLevel, msg); ce != nil { + _fields, err := log.handler.Handle(ctx, ce, fields) + if err != nil { + log.Error("Error occurred while handling log context", zap.Error(err)) + } else { + fields = append(fields, _fields...) + } + ce.Write(fields...) + } +} + +// Error logs a message at ErrorLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Error(msg string, fields ...zapcore.Field) { + if ce := log.check(zapcore.ErrorLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// ErrorContext logs a message at ErrorLevel. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +func (log *Logger) ErrorContext(ctx context.Context, msg string, fields ...zapcore.Field) { + if ce := log.check(zapcore.ErrorLevel, msg); ce != nil { + _fields, err := log.handler.Handle(ctx, ce, fields) + if err != nil { + log.Error("Error occurred while handling log context", zap.Error(err)) + } else { + fields = append(fields, _fields...) + } + ce.Write(fields...) + } +} + +// DPanic logs a message at DPanicLevel. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +// +// If the logger is in development mode, it then panics (DPanic means +// "development panic"). This is useful for catching errors that are +// recoverable, but shouldn't ever happen. +func (log *Logger) DPanic(msg string, fields ...zapcore.Field) { + if ce := log.check(zapcore.DPanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// DPanicContext logs a message at DPanicLevel. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +// +// If the logger is in development mode, it then panics (DPanic means +// "development panic"). This is useful for catching errors that are +// recoverable, but shouldn't ever happen. +func (log *Logger) DPanicContext(ctx context.Context, msg string, fields ...zapcore.Field) { + if ce := log.check(zapcore.DPanicLevel, msg); ce != nil { + _fields, err := log.handler.Handle(ctx, ce, fields) + if err != nil { + log.Error("Error occurred while handling log context", zap.Error(err)) + } else { + fields = append(fields, _fields...) + } + ce.Write(fields...) + } +} + +// Panic logs a message at PanicLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then panics, even if logging at PanicLevel is disabled. +func (log *Logger) Panic(msg string, fields ...zapcore.Field) { + if ce := log.check(zapcore.PanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// PanicContext logs a message at PanicLevel. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +// +// The logger then panics, even if logging at PanicLevel is disabled. +func (log *Logger) PanicContext(ctx context.Context, msg string, fields ...zapcore.Field) { + if ce := log.check(zapcore.PanicLevel, msg); ce != nil { + _fields, err := log.handler.Handle(ctx, ce, fields) + if err != nil { + log.Error("Error occurred while handling log context", zap.Error(err)) + } else { + fields = append(fields, _fields...) + } + ce.Write(fields...) + } +} + +// Fatal logs a message at FatalLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then calls os.Exit(1), even if logging at FatalLevel is +// disabled. +func (log *Logger) Fatal(msg string, fields ...zapcore.Field) { + if ce := log.check(zapcore.FatalLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// FatalContext logs a message at FatalLevel. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +// +// The logger then calls os.Exit(1), even if logging at FatalLevel is +// disabled. +func (log *Logger) FatalContext(ctx context.Context, msg string, fields ...zapcore.Field) { + if ce := log.check(zapcore.FatalLevel, msg); ce != nil { + _fields, err := log.handler.Handle(ctx, ce, fields) + if err != nil { + log.Error("Error occurred while handling log context", zap.Error(err)) + } else { + fields = append(fields, _fields...) + } + } +} + +// Sync calls the underlying Core's Sync method, flushing any buffered log +// entries. Applications should take care to call Sync before exiting. +func (log *Logger) Sync() error { + return log.core.Sync() +} + +// Core returns the Logger's underlying zapcore.Core. +func (log *Logger) Core() zapcore.Core { + return log.core +} + +// Name returns the Logger's underlying name, +// or an empty string if the logger is unnamed. +func (log *Logger) Name() string { + return log.name +} + +func (log *Logger) clone() *Logger { + clone := *log + return &clone +} + +func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + // Logger.check must always be called directly by a method in the + // Logger interface (e.g., Check, Info, Fatal). + // This skips Logger.check and the Info/Fatal/Check/etc. method that + // called it. + const callerSkipOffset = 2 + + // Check the level first to reduce the cost of disabled log calls. + // Since Panic and higher may exit, we skip the optimization for those levels. + if lvl < zapcore.DPanicLevel && !log.core.Enabled(lvl) { + return nil + } + + // Create basic checked entry thru the core; this will be non-nil if the + // log message will actually be written somewhere. + ent := zapcore.Entry{ + LoggerName: log.name, + Time: log.clock.Now(), + Level: lvl, + Message: msg, + } + ce := log.core.Check(ent, nil) + willWrite := ce != nil + + // Set up any required terminal behavior. + switch ent.Level { + case zapcore.PanicLevel: + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) + case zapcore.FatalLevel: + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenFatal, log.onFatal)) + case zapcore.DPanicLevel: + if log.development { + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) + } + } + + // Only do further annotation if we're going to write this message; checked + // entries that exist only for terminal behavior don't benefit from + // annotation. + if !willWrite { + return ce + } + + // Thread the error output through to the CheckedEntry. + ce.ErrorOutput = log.errorOutput + + addStack := log.addStack.Enabled(ce.Level) + if !log.addCaller && !addStack { + return ce + } + + // Adding the caller or stack trace requires capturing the callers of + // this function. We'll share information between these two. + stackDepth := stacktrace.First + if addStack { + stackDepth = stacktrace.Full + } + stack := stacktrace.Capture(log.callerSkip+callerSkipOffset, stackDepth) + defer stack.Free() + + if stack.Count() == 0 { + if log.addCaller { + _, _ = fmt.Fprintf( + log.errorOutput, + "%v Logger.check error: failed to get caller\n", + ent.Time.UTC(), + ) + _ = log.errorOutput.Sync() + } + return ce + } + + frame, more := stack.Next() + + if log.addCaller { + ce.Caller = zapcore.EntryCaller{ + Defined: frame.PC != 0, + PC: frame.PC, + File: frame.File, + Line: frame.Line, + Function: frame.Function, + } + } + + if addStack { + buffer := bufferpool.Get() + defer buffer.Free() + + stackFmt := stacktrace.NewFormatter(buffer) + + // We've already extracted the first frame, so format that + // separately and defer to stackfmt for the rest. + stackFmt.FormatFrame(frame) + if more { + stackFmt.FormatStack(stack) + } + ce.Stack = buffer.String() + } + + return ce +} + +func terminalHookOverride(defaultHook, override zapcore.CheckWriteHook) zapcore.CheckWriteHook { + // A nil or WriteThenNoop hook will lead to continued execution after + // a Panic or Fatal log entry, which is unexpected. For example, + // + // f, err := os.Open(..) + // if err != nil { + // log.Fatal("cannot open", zap.Error(err)) + // } + // fmt.Println(f.Name()) // - // When debug-level logging is disabled, this is much faster than - // s.With(keysAndValues).Debug(msg) - Debugw(msg string, keysAndValues ...interface{}) - // Infow logs a message with some additional context. The variadic key-value - // pairs are treated as they are in With. - Infow(msg string, keysAndValues ...interface{}) - // Warnw logs a message with some additional context. The variadic key-value - // pairs are treated as they are in With. - Warnw(msg string, keysAndValues ...interface{}) - // Errorw logs a message with some additional context. The variadic key-value - // pairs are treated as they are in With. - Errorw(msg string, keysAndValues ...interface{}) - // Fatalw logs a message with some additional context, then calls os.Exit. The - // variadic key-value pairs are treated as they are in With. - Fatalw(msg string, keysAndValues ...interface{}) - // Sync logger sync - Sync() error + // The f.Name() will panic if we continue execution after the log.Fatal. + if override == nil || override == zapcore.WriteThenNoop { + return defaultHook + } + return override } diff --git a/logger_test.go b/logger_test.go new file mode 100644 index 0000000..a19c078 --- /dev/null +++ b/logger_test.go @@ -0,0 +1,45 @@ +package logger + +import ( + "errors" + "log/slog" + "net/http" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap" +) + +func TestNopLogger(t *testing.T) { + logger := NewNop() + + t.Run("basic levels", func(t *testing.T) { + logger.Debug("foo", zap.String("k", "v")) + logger.Info("bar", zap.Int("x", 42)) + logger.Warn("baz", zap.Strings("ks", []string{"a", "b"})) + logger.Error("qux", zap.Error(errors.New("great sadness"))) + }) + + t.Run("DPanic", func(t *testing.T) { + logger.With(zap.String("component", "whatever")).DPanic("stuff") + }) + + t.Run("Panic", func(t *testing.T) { + assert.Panics(t, func() { + logger.Panic("great sadness") + }, "Nop logger should still cause panics.") + }) +} + +func TestNewExample(t *testing.T) { + logger := slog.New( + slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{ + AddSource: true, + Level: slog.LevelInfo, + }), + ) + logger.Info("finished", + slog.Int("status", http.StatusOK), + ) +} diff --git a/logging.go b/logging.go deleted file mode 100644 index 459d4d4..0000000 --- a/logging.go +++ /dev/null @@ -1,481 +0,0 @@ -package logger - -import ( - "bytes" - "context" - "io" - "os" - "path" - - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -var _ Logger = (*Logging)(nil) - -// DefaultLogger is default logger. -var DefaultLogger Logger = New() - -type Logging struct { - opt Options - atomicLevel zap.AtomicLevel - lg *zap.SugaredLogger - - _rollingFiles []zapcore.WriteSyncer -} - -// WrappedWriteSyncer is a helper struct implementing zapcore.WriteSyncer to -// wrap a standard os.Stdout handle, giving control over the WriteSyncer's -// Sync() function. Sync() results in an error on Windows in combination with -// os.Stdout ("sync /dev/stdout: The handle is invalid."). WrappedWriteSyncer -// simply does nothing when Sync() is called by Zap. -type WrappedWriteSyncer struct { - file io.Writer -} - -func (mws WrappedWriteSyncer) Write(p []byte) (n int, err error) { - return mws.file.Write(p) -} -func (mws WrappedWriteSyncer) Sync() error { - return nil -} - -// NonColorable holds writer but removes escape sequence. -type NonColorable struct { - out zapcore.WriteSyncer -} - -// NewNonColorable returns new instance of Writer which removes escape sequence from Writer. -func NewNonColorable(w zapcore.WriteSyncer) io.Writer { - return &NonColorable{out: w} -} - -// Write writes data on console -func (w *NonColorable) Write(data []byte) (n int, err error) { - er := bytes.NewReader(data) - var plaintext bytes.Buffer -loop: - for { - c1, err := er.ReadByte() - if err != nil { - plaintext.WriteTo(w.out) - break loop - } - if c1 != 0x1b { - plaintext.WriteByte(c1) - continue - } - _, err = plaintext.WriteTo(w.out) - if err != nil { - break loop - } - c2, err := er.ReadByte() - if err != nil { - break loop - } - if c2 != 0x5b { - continue - } - - for { - c, err := er.ReadByte() - if err != nil { - break loop - } - if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { - break - } - } - } - - return len(data), nil -} - -// Sync flushes the buffer. -func (w *NonColorable) Sync() error { - return w.out.Sync() -} - -func New(opts ...Option) *Logging { - opt := newOptions(opts...) - l := &Logging{ - opt: opt, - atomicLevel: zap.NewAtomicLevelAt(opt.level.unmarshalZapLevel()), - } - if err := l.build(); err != nil { - panic(err) - } - return l -} - -func (l *Logging) LevelEnablerFunc(level zapcore.Level) LevelEnablerFunc { - return func(lvl zapcore.Level) bool { - if level == zapcore.FatalLevel { - return l.atomicLevel.Level() <= level && lvl >= level - } - return l.atomicLevel.Level() <= level && lvl == level - } -} - -func (l *Logging) build() error { - var ( - cores []zapcore.Core - ) - - switch l.opt.mode { - case FileMode: - var _cores []zapcore.Core - if l.opt.writer != nil { - _cores = l.buildCustomWriter() - } else if l.opt.filename != "" { - _cores = l.buildFile() - } else { - _cores = l.buildFiles() - } - if len(_cores) > 0 { - cores = append(cores, _cores...) - } - default: - _cores := l.buildConsole() - if len(_cores) > 0 { - cores = append(cores, _cores...) - } - } - - zapLog := zap.New(zapcore.NewTee(cores...), zap.AddCaller(), zap.AddCallerSkip(l.opt.callerSkip)).Sugar() - if len(l.opt.fields) > 0 { - zapLog = zapLog.With(CopyFields(l.opt.fields)...) - } - if l.opt.namespace != "" { - zapLog = zapLog.With(zap.Namespace(l.opt.namespace)) - } - - l.lg = zapLog - return nil -} - -// buildConsole build console. -func (l *Logging) buildConsole() []zapcore.Core { - var ( - sync zapcore.WriteSyncer - enc zapcore.Encoder - ) - - if l.opt.encoder.IsConsole() { - enc = zapcore.NewConsoleEncoder(l.opt.encoderConfig) - } else { - enc = zapcore.NewJSONEncoder(l.opt.encoderConfig) - } - - if l.opt.writer != nil { - sync = zapcore.AddSync(l.opt.writer) - } else { - sync = zapcore.AddSync(WrappedWriteSyncer{os.Stdout}) - } - return []zapcore.Core{zapcore.NewCore(enc, sync, l.atomicLevel)} -} - -// buildCustomWriter build custom writer. -func (l *Logging) buildCustomWriter() []zapcore.Core { - syncer := l.opt.writer - if syncer == nil { - syncer = zapcore.AddSync(WrappedWriteSyncer{os.Stdout}) - } - - var enc zapcore.Encoder - if l.opt.encoder.IsConsole() { - enc = zapcore.NewConsoleEncoder(l.opt.encoderConfig) - } else { - enc = zapcore.NewJSONEncoder(l.opt.encoderConfig) - } - - return []zapcore.Core{zapcore.NewCore(enc, zapcore.AddSync(syncer), l.atomicLevel)} -} - -// buildFile build rolling file. -func (l *Logging) buildFile() []zapcore.Core { - _ = l.Sync() - var enc zapcore.Encoder - if l.opt.encoder.IsConsole() { - enc = zapcore.NewConsoleEncoder(l.opt.encoderConfig) - } else { - enc = zapcore.NewJSONEncoder(l.opt.encoderConfig) - } - - syncerRolling := l.createOutput(path.Join(l.opt.path, l.opt.filename)) - l._rollingFiles = append(l._rollingFiles, []zapcore.WriteSyncer{syncerRolling}...) - return []zapcore.Core{zapcore.NewCore(enc, syncerRolling, l.atomicLevel)} -} - -// buildFiles build rolling files. -func (l *Logging) buildFiles() []zapcore.Core { - var ( - cores = make([]zapcore.Core, 0, 5) - syncerRollingDebug, syncerRollingInfo, syncerRollingWarn, - syncerRollingError, syncerRollingFatal zapcore.WriteSyncer - ) - - var enc zapcore.Encoder - if l.opt.encoder.IsConsole() { - enc = zapcore.NewConsoleEncoder(l.opt.encoderConfig) - } else { - enc = zapcore.NewJSONEncoder(l.opt.encoderConfig) - } - - if err := l.Sync(); err != nil { - return nil - } - - syncerRollingDebug = l.createOutput(path.Join(l.opt.path, debugFilename)) - - syncerRollingInfo = l.createOutput(path.Join(l.opt.path, infoFilename)) - - syncerRollingWarn = l.createOutput(path.Join(l.opt.path, warnFilename)) - - syncerRollingError = l.createOutput(path.Join(l.opt.path, errorFilename)) - - syncerRollingFatal = l.createOutput(path.Join(l.opt.path, fatalFilename)) - - cores = append(cores, - zapcore.NewCore(enc, syncerRollingDebug, l.LevelEnablerFunc(zap.DebugLevel)), - zapcore.NewCore(enc, syncerRollingInfo, l.LevelEnablerFunc(zap.InfoLevel)), - zapcore.NewCore(enc, syncerRollingWarn, l.LevelEnablerFunc(zap.WarnLevel)), - zapcore.NewCore(enc, syncerRollingError, l.LevelEnablerFunc(zap.ErrorLevel)), - zapcore.NewCore(enc, syncerRollingFatal, l.LevelEnablerFunc(zap.FatalLevel)), - ) - - l._rollingFiles = append(l._rollingFiles, []zapcore.WriteSyncer{syncerRollingDebug, syncerRollingInfo, syncerRollingWarn, syncerRollingError, syncerRollingFatal}...) - return cores -} - -func (l *Logging) createOutput(filename string) zapcore.WriteSyncer { - var rule = DefaultRotateRule(filename, backupFileDelimiter, l.opt.keepDays, l.opt.compress) - switch l.opt.rotation { - case sizeRotationRule: - rule = NewSizeLimitRotateRule(filename, backupFileDelimiter, l.opt.keepDays, l.opt.maxSize, l.opt.maxBackups, l.opt.compress) - case hourRotationRule: - rule = NewHourRotateRule(filename, backupFileDelimiter, l.opt.keepHours, l.opt.compress) - } - - log, err := NewRotateLogger(filename, rule, l.opt.compress) - if err != nil { - panic(err) - } - - return zapcore.AddSync(NewNonColorable(log)) -} - -func CopyFields(fields map[string]interface{}) []interface{} { - dst := make([]interface{}, 0, len(fields)*2) - for k, v := range fields { - dst = append(dst, k, v) - } - return dst -} - -func (l *Logging) WithContext(ctx context.Context) Logger { - spanId := SpanID(ctx) - traceId := TraceID(ctx) - fields := make([]interface{}, 0, 4) - if len(spanId) > 0 { - fields = append(fields, spanKey, spanId) - } - if len(traceId) > 0 { - fields = append(fields, traceKey, traceId) - } - - logger := &Logging{ - opt: l.opt, - atomicLevel: l.atomicLevel, - lg: l.lg.With(fields...).WithOptions(zap.AddCallerSkip(0)), - } - return logger -} - -func (l *Logging) WithFields(fields map[string]any) Logger { - return &Logging{ - opt: l.opt, - atomicLevel: l.atomicLevel, - lg: l.lg.With(CopyFields(fields)...).WithOptions(zap.AddCallerSkip(0)), - } -} - -func (l *Logging) WithCallDepth(callDepth int) Logger { - return &Logging{ - opt: l.opt, - atomicLevel: l.atomicLevel, - lg: l.lg.WithOptions(zap.AddCallerSkip(callDepth)), - } -} - -func (l *Logging) Options() Options { - return l.opt -} - -func (l *Logging) SetLevel(lv Level) { - l.opt.level = lv - l.atomicLevel.SetLevel(lv.unmarshalZapLevel()) -} - -func (l *Logging) Clone() *Logging { - _copy := *l - return &_copy -} - -func (l *Logging) Debug(args ...interface{}) { - l.lg.Debug(args...) -} - -func (l *Logging) Info(args ...interface{}) { - l.lg.Info(args...) -} - -func (l *Logging) Warn(args ...interface{}) { - l.lg.Warn(args...) -} - -func (l *Logging) Error(args ...interface{}) { - l.lg.Error(args...) -} - -func (l *Logging) Fatal(args ...interface{}) { - l.lg.Fatal(args...) -} - -func (l *Logging) Debugf(template string, args ...interface{}) { - l.lg.Debugf(template, args...) -} - -func (l *Logging) Infof(template string, args ...interface{}) { - l.lg.Infof(template, args...) -} - -func (l *Logging) Warnf(template string, args ...interface{}) { - l.lg.Warnf(template, args...) -} - -func (l *Logging) Errorf(template string, args ...interface{}) { - l.lg.Errorf(template, args...) -} - -func (l *Logging) Fatalf(template string, args ...interface{}) { - l.lg.Fatalf(template, args...) -} - -func (l *Logging) Debugw(msg string, keysAndValues ...interface{}) { - l.lg.Debugw(msg, keysAndValues...) -} - -func (l *Logging) Infow(msg string, keysAndValues ...interface{}) { - l.lg.Infow(msg, keysAndValues...) -} - -func (l *Logging) Warnw(msg string, keysAndValues ...interface{}) { - l.lg.Warnw(msg, keysAndValues...) -} - -func (l *Logging) Errorw(msg string, keysAndValues ...interface{}) { - l.lg.Errorw(msg, keysAndValues...) -} - -func (l *Logging) Fatalw(msg string, keysAndValues ...interface{}) { - l.lg.Fatalw(msg, keysAndValues...) -} - -func (l *Logging) Sync() (err error) { - if l.lg == nil { - return - } - - for _, w := range l._rollingFiles { - err = w.Sync() - } - - err = l.lg.Sync() - return -} - -// WithCallDepth returns a shallow copy of l with its caller skip -func WithCallDepth(callDepth int) Logger { - return DefaultLogger.WithCallDepth(callDepth) -} - -// WithContext returns a shallow copy of l with its context changed -// to ctx. The provided ctx must be non-nil. -func WithContext(ctx context.Context) Logger { - return DefaultLogger.WithContext(ctx) -} - -// WithFields is a helper to create a []interface{} of key-value pairs. -func WithFields(fields map[string]interface{}) Logger { - return DefaultLogger.WithFields(fields) -} - -// SetLevel set logger level -func SetLevel(lv Level) { - DefaultLogger.SetLevel(lv) -} - -func Debug(args ...interface{}) { - DefaultLogger.WithCallDepth(callerSkipOffset).Debug(args...) -} - -func Info(args ...interface{}) { - DefaultLogger.WithCallDepth(callerSkipOffset).Info(args...) -} - -func Warn(args ...interface{}) { - DefaultLogger.WithCallDepth(callerSkipOffset).Warn(args...) -} - -func Error(args ...interface{}) { - DefaultLogger.WithCallDepth(callerSkipOffset).Error(args...) -} - -func Fatal(args ...interface{}) { - DefaultLogger.WithCallDepth(callerSkipOffset).Fatal(args...) -} - -func Debugf(template string, args ...interface{}) { - DefaultLogger.WithCallDepth(callerSkipOffset).Debugf(template, args...) -} - -func Infof(template string, args ...interface{}) { - DefaultLogger.WithCallDepth(callerSkipOffset).Infof(template, args...) -} - -func Warnf(template string, args ...interface{}) { - DefaultLogger.WithCallDepth(callerSkipOffset).Warnf(template, args...) -} - -func Errorf(template string, args ...interface{}) { - DefaultLogger.WithCallDepth(callerSkipOffset).Errorf(template, args...) -} - -func Fatalf(template string, args ...interface{}) { - DefaultLogger.WithCallDepth(callerSkipOffset).Fatalf(template, args...) -} - -func Debugw(msg string, keysAndValues ...interface{}) { - DefaultLogger.WithCallDepth(callerSkipOffset).Debugw(msg, keysAndValues...) -} - -func Infow(msg string, keysAndValues ...interface{}) { - DefaultLogger.WithCallDepth(callerSkipOffset).Infow(msg, keysAndValues...) -} - -func Warnw(msg string, keysAndValues ...interface{}) { - DefaultLogger.WithCallDepth(callerSkipOffset).Warnw(msg, keysAndValues...) -} - -func Errorw(msg string, keysAndValues ...interface{}) { - DefaultLogger.WithCallDepth(callerSkipOffset).Errorw(msg, keysAndValues...) -} - -func Fatalw(msg string, keysAndValues ...interface{}) { - DefaultLogger.WithCallDepth(callerSkipOffset).Fatalw(msg, keysAndValues...) -} - -func Sync() error { - return DefaultLogger.Sync() -} diff --git a/logging_test.go b/logging_test.go deleted file mode 100644 index 2551740..0000000 --- a/logging_test.go +++ /dev/null @@ -1,218 +0,0 @@ -package logger_test - -import ( - "context" - "fmt" - "strings" - "testing" - - "github.com/nextmicro/logger" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" - "go.opentelemetry.io/otel/propagation" - sdktrace "go.opentelemetry.io/otel/sdk/trace" - "go.opentelemetry.io/otel/trace" -) - -func TestMain(t *testing.M) { - exporter, err := stdouttrace.New() - if err != nil { - panic(err) - } - - options := make([]sdktrace.TracerProviderOption, 0) - options = append(options, sdktrace.WithBatcher(exporter)) - provider := sdktrace.NewTracerProvider(options...) - otel.SetTracerProvider(provider) - - otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator( - propagation.TraceContext{}, propagation.Baggage{})) - otel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) { - logger.Errorf("[otel] error: %v", err) - })) - - t.Run() -} - -func TestLogging_WithContext(t *testing.T) { - logging := logger.New() - ctx, span := otel.Tracer("gokit").Start(context.TODO(), "HTTP Client Get /api/get") - defer span.End() - - spanContext := trace.SpanContextFromContext(ctx) - t.Logf("trace_id: %s", spanContext.TraceID().String()) - t.Logf("span_id: %s", spanContext.SpanID().String()) - - logging.WithContext(ctx).WithFields(map[string]any{ - "11": 222, - }).Info("TestDefault_WithContext") -} - -func TestLogging_WithFields(t *testing.T) { - logging := logger.New() - logging.WithFields(map[string]interface{}{ - "age": 22, - "order": 100, - }).Info("TestDefault_WithFields") -} - -func TestLogging_Debug(t *testing.T) { - logging := logger.New() - logging.WithFields(map[string]interface{}{ - "age": 22, - "order": 100, - }).Debug("TestDefault_WithFields") - - logging.SetLevel(logger.DebugLevel) - - logging.WithFields(map[string]interface{}{ - "age": 22, - "order": 100, - }).Debug("TestDefault_WithFields") -} - -func TestLogger(t *testing.T) { - // Create a new logger for testing - log := logger.New() - - // Test logging at different levels - log.Debug("Debug message") - log.Info("Info message") - log.Warn("Warning message") - log.Error("Error message") - //log.Fatal("Fatal message") - - // Test formatting - log.Debugf("Debug message with format: %s", "formatted") - log.Infof("Info message with format: %s", "formatted") - log.Warnf("Warning message with format: %s", "formatted") - log.Errorf("Error message with format: %s", "formatted") - //log.Fatalf("Fatal message with format: %s", "formatted") - - // Test log with fields - fields := map[string]interface{}{ - "user": "john_doe", - "status": "failed", - } - log.WithFields(fields).Info("User login failed") - - // Test setting log level - log.SetLevel(logger.DebugLevel) - log.Debug("Debug message should be visible") - log.SetLevel(logger.InfoLevel) - log.Debug("Debug message should not be visible") - - // Test syncing - err := logger.Sync() - if err != nil { - t.Errorf("Error syncing logger: %v", err) - } - - // Add more test cases as needed -} - -func TestLoggerWithContext(t *testing.T) { - // Create a context with some values for testing - ctx := context.WithValue(context.Background(), "spanId", "123") - ctx = context.WithValue(ctx, "traceId", "456") - - // Create a logger with context - log := logger.DefaultLogger.WithContext(ctx) - - // Test logging with context-based fields - log.Info("Logging with context") - - // Add more test cases as needed -} - -func TestLoggerWithFields(t *testing.T) { - // Create a logger with some initial fields - fields := map[string]interface{}{ - "app": "my_app", - "version": "1.0", - } - logger := logger.DefaultLogger.WithFields(fields) - - // Test logging with additional fields - additionalFields := map[string]interface{}{ - "user": "alice", - } - logger.WithFields(additionalFields).Info("Additional fields") - - // Add more test cases as needed -} - -func TestFilename(t *testing.T) { - logger.DefaultLogger = logger.New( - logger.WithLevel(logger.InfoLevel), - logger.WithFilename("stat.log"), - logger.WithRotation("hour"), - logger.WithNamespace("metrics"), - logger.WithMode(logger.FileMode), - logger.WithMaxSize(1), - logger.WithMaxBackups(3), - logger.WithKeepHours(1), - logger.WithCompress(false), - ) - - for i := 0; i < 10000; i++ { - logger.WithFields(map[string]interface{}{ - "a": 123, - "v": 1, - }).Info("test msg") - } - - logger.Sync() -} - -func TestLogs(t *testing.T) { - logger.DefaultLogger = logger.New( - logger.WithLevel(logger.InfoLevel), - logger.WithMode(logger.FileMode), - logger.WithMaxSize(1), - logger.WithMaxBackups(1), - logger.WithCompress(false), - logger.WithRotation("size"), - ) - - defer logger.Sync() - - for i := 0; i < 10000; i++ { - logger.Debug("test msg") - logger.Info("test msg") - logger.Warn("test msg") - logger.Error("test msg") - } -} - -type CustomOutput struct { -} - -func (c *CustomOutput) Write(p []byte) (n int, err error) { - msg := strings.Replace(string(p), "\n", "", 1) - fmt.Println(msg) - return len(p), nil -} - -func (c *CustomOutput) Sync() error { - fmt.Println("sync") - return nil -} - -// 自定义输出源 -func TestCustomOutput(t *testing.T) { - logger.DefaultLogger = logger.New( - logger.WithMode("custom"), - logger.WithWriter(&CustomOutput{}), - ) - - defer logger.Sync() - - for i := 0; i < 1000; i++ { - logger.Info("test msg") - } -} - -func TestInfo(t *testing.T) { - logger.Info("test msg") -} diff --git a/logtest/logtest.go b/logtest/logtest.go deleted file mode 100644 index c0945cf..0000000 --- a/logtest/logtest.go +++ /dev/null @@ -1,66 +0,0 @@ -package logtest - -import ( - "bytes" - "encoding/json" - "io" - "testing" - - "github.com/nextmicro/logger" -) - -type Buffer struct { - buf *bytes.Buffer - t *testing.T -} - -func Discard(t *testing.T) { - logger.DefaultLogger = logger.New(logger.WithWriter(io.Discard)) -} - -func NewCollector(t *testing.T) *Buffer { - var buf bytes.Buffer - logger.DefaultLogger = logger.New(logger.WithWriter(&buf)) - - t.Cleanup(func() { - logger.Sync() - }) - - return &Buffer{ - buf: &buf, - t: t, - } -} - -func (b *Buffer) Bytes() []byte { - return b.buf.Bytes() -} - -func (b *Buffer) Content() string { - var m map[string]interface{} - if err := json.Unmarshal(b.buf.Bytes(), &m); err != nil { - return "" - } - - content, ok := m["content"] - if !ok { - return "" - } - - switch val := content.(type) { - case string: - return val - default: - // err is impossible to be not nil, unmarshaled from b.buf.Bytes() - bs, _ := json.Marshal(content) - return string(bs) - } -} - -func (b *Buffer) Reset() { - b.buf.Reset() -} - -func (b *Buffer) String() string { - return b.buf.String() -} diff --git a/options.go b/options.go index 9f0fbff..f9336a6 100644 --- a/options.go +++ b/options.go @@ -1,231 +1,181 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + package logger import ( - "io" + "fmt" "go.uber.org/zap/zapcore" ) -const ( - spanKey = "span_id" - traceKey = "trace_id" - - callerSkipOffset = 1 - - FileMode = "file" - ConsoleMode = "console" -) - -const ( - debugFilename = "debug.log" - infoFilename = "info.log" - warnFilename = "warn.log" - errorFilename = "error.log" - fatalFilename = "fatal.log" -) - -type Option func(o *Options) - -type Options struct { - // The logging level the Logger should log at. default is `InfoLevel` - level Level - // callerSkip is the number of stack frames to ascend when logging caller info. - callerSkip int - // namespace is the namespace of logger. - namespace string - // fields is the fields of logger. - fields map[string]any - // encoder is the encoder of logger. - encoder Encoder - // encoderConfig is the encoder config of logger. - encoderConfig zapcore.EncoderConfig - - // mode is the logging mode. default is `consoleMode` - mode string - // path represents the log file path, default is `logs`. - path string - // filename is the log filename. default is `""` - filename string - // maxSize represents how much space the writing log file takes up. 0 means no limit. The unit is `MB`. - // Only take effect when RotationRuleType is `size` - maxSize int - // keepDays represents how many days the log files will be kept. Default to keep all files. - // Only take effect when Mode is `file` or `volume`, both work when Rotation is `daily` or `size`. - keepDays int - // keepHours represents how many hours the log files will be kept. Default to keep all files. - // Only take effect when Mode is `file` or `volume`, both work when Rotation is `daily` or `size`. - keepHours int - // maxBackups represents how many backup log files will be kept. 0 means all files will be kept forever. - // Only take effect when RotationRuleType is `size`. - // Even though `MaxBackups` sets 0, log files will still be removed - // if the `KeepDays` limitation is reached. - maxBackups int - // compress is the compression type for old logs. disabled by default. - compress bool - // rotation represents the type of log rotation rule. Default is `daily`. - // daily: daily rotation. - // size: size limited rotation. - rotation string - // writer is the writer of logger. - writer io.Writer -} - -func newOptions(opts ...Option) Options { - opt := Options{ - level: InfoLevel, - mode: ConsoleMode, - path: "./logs", - callerSkip: callerSkipOffset, - encoderConfig: zapcore.EncoderConfig{ - TimeKey: "ts", - MessageKey: "msg", - LevelKey: "level", - CallerKey: "caller", - StacktraceKey: "stack", - LineEnding: zapcore.DefaultLineEnding, - NameKey: "Logger", - EncodeCaller: zapcore.ShortCallerEncoder, - EncodeLevel: zapcore.LowercaseLevelEncoder, - EncodeTime: zapcore.ISO8601TimeEncoder, // 日期格式改为"ISO8601",例如:"2020-12-16T19:12:48.771+0800" - EncodeDuration: zapcore.StringDurationEncoder, - EncodeName: zapcore.FullNameEncoder, - }, - fields: make(map[string]any), - encoder: JsonEncoder, - } - - for _, o := range opts { - o(&opt) - } - - return opt -} - -type Encoder string - -func (e Encoder) String() string { - return string(e) -} - -// IsJson Whether json encoder. -func (e Encoder) IsJson() bool { - return e.String() == JsonEncoder.String() -} - -// IsConsole Whether console encoder. -func (e Encoder) IsConsole() bool { - return e.String() == ConsoleEncoder.String() -} - -const ( - JsonEncoder Encoder = "json" - ConsoleEncoder Encoder = "console" -) - -// WithLevel Setter function to set the logging level. -func WithLevel(level Level) Option { - return func(o *Options) { - o.level = level - } -} - -// WithMode Setter function to set the logging mode. -func WithMode(mode string) Option { - return func(o *Options) { - o.mode = mode - } -} - -// WithPath Setter function to set the log path. -func WithPath(path string) Option { - return func(o *Options) { - o.path = path - } -} - -// WithFilename Setter function to set the log filename. -func WithFilename(filename string) Option { - return func(o *Options) { - o.filename = filename - } -} - -// WithMaxSize Setter function to set the maximum log size. -func WithMaxSize(maxSize int) Option { - return func(o *Options) { - o.maxSize = maxSize - } -} - -// WithMaxBackups Setter function to set the maximum number of log backups. -func WithMaxBackups(maxBackups int) Option { - return func(o *Options) { - o.maxBackups = maxBackups - } -} - -// WithCompress Setter function to set the compress option. -func WithCompress(compress bool) Option { - return func(o *Options) { - o.compress = compress - } -} - -// WithCallerSkip Setter function to set the caller skip value. -func WithCallerSkip(callerSkip int) Option { - return func(o *Options) { - o.callerSkip = callerSkip - } -} - -// WithNamespace Setter function to set the namespace. -func WithNamespace(namespace string) Option { - return func(o *Options) { - o.namespace = namespace - } -} - -// Fields Setter function to set the logger fields. -func Fields(fields map[string]any) Option { - return func(o *Options) { - o.fields = fields - } +// An Option configures a Logger. +type Option interface { + apply(*Logger) } -// WithEncoder Setter function to set the encoder. -func WithEncoder(encoder Encoder) Option { - return func(o *Options) { - o.encoder = encoder - } -} - -// WithEncoderConfig Setter function to set the encoder config. -func WithEncoderConfig(encoderConfig zapcore.EncoderConfig) Option { - return func(o *Options) { - o.encoderConfig = encoderConfig - } -} - -func WithKeepHours(keepHours int) Option { - return func(o *Options) { - o.keepHours = keepHours - } -} - -func WithKeepDays(keepDays int) Option { - return func(o *Options) { - o.keepDays = keepDays - } -} - -func WithRotation(rotation string) Option { - return func(o *Options) { - o.rotation = rotation - } -} +// optionFunc wraps a func so it satisfies the Option interface. +type optionFunc func(*Logger) + +func (f optionFunc) apply(log *Logger) { + f(log) +} -func WithWriter(w io.Writer) Option { - return func(o *Options) { - o.writer = w - } +// WrapCore wraps or replaces the Logger's underlying zapcore.Core. +func WrapCore(f func(zapcore.Core) zapcore.Core) Option { + return optionFunc(func(log *Logger) { + log.core = f(log.core) + }) +} + +func WithHandler(h Handler) Option { + return optionFunc(func(log *Logger) { + log.handler = h + }) +} + +// Hooks registers functions which will be called each time the Logger writes +// out an Entry. Repeated use of Hooks is additive. +// +// Hooks are useful for simple side effects, like capturing metrics for the +// number of emitted logs. More complex side effects, including anything that +// requires access to the Entry's structured fields, should be implemented as +// a zapcore.Core instead. See zapcore.RegisterHooks for details. +func Hooks(hooks ...func(zapcore.Entry) error) Option { + return optionFunc(func(log *Logger) { + log.core = zapcore.RegisterHooks(log.core, hooks...) + }) +} + +// Fields adds fields to the Logger. +func Fields(fs ...zapcore.Field) Option { + return optionFunc(func(log *Logger) { + log.core = log.core.With(fs) + }) +} + +// ErrorOutput sets the destination for errors generated by the Logger. Note +// that this option only affects internal errors; for sample code that sends +// error-level logs to a different location from info- and debug-level logs, +// see the package-level AdvancedConfiguration example. +// +// The supplied WriteSyncer must be safe for concurrent use. The Open and +// zapcore.Lock functions are the simplest ways to protect files with a mutex. +func ErrorOutput(w zapcore.WriteSyncer) Option { + return optionFunc(func(log *Logger) { + log.errorOutput = w + }) +} + +// Development puts the logger in development mode, which makes DPanic-level +// logs panic instead of simply logging an error. +func Development() Option { + return optionFunc(func(log *Logger) { + log.development = true + }) +} + +// AddCaller configures the Logger to annotate each message with the filename, +// line number, and function name of zap's caller. See also WithCaller. +func AddCaller() Option { + return WithCaller(true) +} + +// WithCaller configures the Logger to annotate each message with the filename, +// line number, and function name of zap's caller, or not, depending on the +// value of enabled. This is a generalized form of AddCaller. +func WithCaller(enabled bool) Option { + return optionFunc(func(log *Logger) { + log.addCaller = enabled + }) +} + +// AddCallerSkip increases the number of callers skipped by caller annotation +// (as enabled by the AddCaller option). When building wrappers around the +// Logger and SugaredLogger, supplying this Option prevents zap from always +// reporting the wrapper code as the caller. +func AddCallerSkip(skip int) Option { + return optionFunc(func(log *Logger) { + log.callerSkip += skip + }) +} + +// AddStacktrace configures the Logger to record a stack trace for all messages at +// or above a given level. +func AddStacktrace(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + log.addStack = lvl + }) +} + +// IncreaseLevel increase the level of the logger. It has no effect if +// the passed in level tries to decrease the level of the logger. +func IncreaseLevel(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + core, err := zapcore.NewIncreaseLevelCore(log.core, lvl) + if err != nil { + fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v\n", err) + } else { + log.core = core + } + }) +} + +// WithPanicHook sets a CheckWriteHook to run on Panic/DPanic logs. +// Zap will call this hook after writing a log statement with a Panic/DPanic level. +// +// For example, the following builds a logger that will exit the current +// goroutine after writing a Panic/DPanic log message, but it will not start a panic. +// +// zap.New(core, zap.WithPanicHook(zapcore.WriteThenGoexit)) +// +// This is useful for testing Panic/DPanic log output. +func WithPanicHook(hook zapcore.CheckWriteHook) Option { + return optionFunc(func(log *Logger) { + log.onPanic = hook + }) +} + +// WithFatalHook sets a CheckWriteHook to run on fatal logs. +// Zap will call this hook after writing a log statement with a Fatal level. +// +// For example, the following builds a logger that will exit the current +// goroutine after writing a fatal log message, but it will not exit the +// program. +// +// zap.New(core, zap.WithFatalHook(zapcore.WriteThenGoexit)) +// +// It is important that the provided CheckWriteHook stops the control flow at +// the current statement to meet expectations of callers of the logger. +// We recommend calling os.Exit or runtime.Goexit inside custom hooks at +// minimum. +func WithFatalHook(hook zapcore.CheckWriteHook) Option { + return optionFunc(func(log *Logger) { + log.onFatal = hook + }) +} + +// WithClock specifies the clock used by the logger to determine the current +// time for logged entries. Defaults to the system clock with time.Now. +func WithClock(clock zapcore.Clock) Option { + return optionFunc(func(log *Logger) { + log.clock = clock + }) } diff --git a/pool.go b/pool.go deleted file mode 100644 index fc84c39..0000000 --- a/pool.go +++ /dev/null @@ -1,41 +0,0 @@ -package logger - -import ( - "bytes" - "sync" - "sync/atomic" -) - -var bpool = newBufferPool(500) - -type bufferPool struct { - p sync.Pool - count int64 - size int -} - -func newBufferPool(size int) *bufferPool { - p := &bufferPool{} - p.p.New = func() interface{} { return bytes.NewBuffer(nil) } - p.size = size - return p -} - -func putBuffer(b *bytes.Buffer) { - bpool.p.Put(b) - atomic.AddInt64(&bpool.count, -1) -} - -func getBuffer() *bytes.Buffer { - for { - c := atomic.LoadInt64(&bpool.count) - if c > int64(bpool.size) { - return nil - } - if atomic.CompareAndSwapInt64(&bpool.count, c, c+1) { - b := bpool.p.Get().(*bytes.Buffer) - b.Reset() - return b - } - } -} diff --git a/qodana.yaml b/qodana.yaml deleted file mode 100644 index 215d808..0000000 --- a/qodana.yaml +++ /dev/null @@ -1,29 +0,0 @@ -#-------------------------------------------------------------------------------# -# Qodana analysis is configured by qodana.yaml file # -# https://www.jetbrains.com/help/qodana/qodana-yaml.html # -#-------------------------------------------------------------------------------# -version: "1.0" - -#Specify inspection profile for code analysis -profile: - name: qodana.starter - -#Enable inspections -#include: -# - name: - -#Disable inspections -#exclude: -# - name: -# paths: -# - - -#Execute shell command before Qodana execution (Applied in CI/CD pipeline) -#bootstrap: sh ./prepare-qodana.sh - -#Install IDE plugins before Qodana execution (Applied in CI/CD pipeline) -#plugins: -# - id: #(plugin id can be found at https://plugins.jetbrains.com) - -#Specify Qodana linter for analysis (Applied in CI/CD pipeline) -linter: jetbrains/qodana-go:latest diff --git a/rotatelogger.go b/rotatelogger.go deleted file mode 100644 index 88ab66e..0000000 --- a/rotatelogger.go +++ /dev/null @@ -1,402 +0,0 @@ -package logger - -import ( - "bytes" - "compress/gzip" - "errors" - "fmt" - "log" - "os" - "path" - "runtime/debug" - "sync" - "time" -) - -var ( - // Placeholder is a placeholder object that can be used globally. - Placeholder PlaceholderType - - // ErrClosedRollingFile is returned when the rolling file is closed. - ErrClosedRollingFile = errors.New("rolling file is closed") - - ErrBuffer = errors.New("buffer exceeds the limit") -) - -type ( - // AnyType can be used to hold any type. - AnyType = any - // PlaceholderType represents a placeholder type. - PlaceholderType = struct{} -) - -const ( - defaultDirMode = 0o755 - defaultFileMode = 0o600 - gzipExt = ".gz" - backupFileDelimiter = "-" - sizeRotationRule = "size" - hourRotationRule = "hour" - dayRotationRule = "day" - megaBytes = 1 << 20 - logPageNumber = 2 - logPageCacheByteSize = 4096 // 4KB -) - -type ( - // A RotateLogger is a Logger that can rotate log files with given rules. - RotateLogger struct { - filename string - backup string - fp *os.File - - syncFlush chan struct{} - current *bytes.Buffer - fullBuffer chan *bytes.Buffer - - closed bool - done chan struct{} - rule RotateRule - compress bool - // can't use threading.RoutineGroup because of cycle import - waitGroup sync.WaitGroup - closeOnce sync.Once - currentSize int64 - - mu sync.Mutex - } -) - -// NewRotateLogger returns a RotateLogger with given filename and rule, etc. -func NewRotateLogger(filename string, rule RotateRule, compress bool) (*RotateLogger, error) { - l := &RotateLogger{ - filename: filename, - rule: rule, - compress: compress, - done: make(chan struct{}), - syncFlush: make(chan struct{}), - fullBuffer: make(chan *bytes.Buffer, logPageNumber+1), - current: getBuffer(), - } - if err := l.initialize(); err != nil { - return nil, err - } - - l.startWorker() - return l, nil -} - -// flush flushes the buffer to the file. -func (l *RotateLogger) flush() { - readyLen := len(l.fullBuffer) - for i := 0; i < readyLen; i++ { - buff := <-l.fullBuffer - l.writeBuffer(buff) - putBuffer(buff) - } - if l.current != nil { - l.writeBuffer(l.current) - putBuffer(l.current) - } - - l.current = nil - if l.fp != nil { - l.fp.Sync() - } -} - -func (l *RotateLogger) startWorker() { - l.waitGroup.Add(1) - - go func() { - defer l.waitGroup.Done() - - defer func() { - l.flush() - if l.fp != nil { - l.fp.Close() - l.fp = nil - } - }() - - t := time.NewTicker(time.Millisecond * 500) - defer t.Stop() - for { - select { - case <-l.syncFlush: - l.mu.Lock() - l.flush() - l.mu.Unlock() - l.syncFlush <- struct{}{} - case buff := <-l.fullBuffer: - l.writeBuffer(buff) - putBuffer(buff) - case <-t.C: - l.mu.Lock() - if len(l.fullBuffer) != 0 { - l.mu.Unlock() - continue - } - // 清空buffer - buff := l.current - if buff == nil { - l.mu.Unlock() - continue - } - l.current = nil - l.mu.Unlock() - - l.writeBuffer(buff) - putBuffer(buff) - case <-l.done: - return - } - } - }() -} - -func (l *RotateLogger) Write(b []byte) (n int, err error) { - l.mu.Lock() - if l.closed { - l.mu.Unlock() - return 0, ErrClosedRollingFile - } - - // check if logger is closed - select { - case <-l.done: - l.mu.Unlock() - return 0, fmt.Errorf("logger is closed") - default: - } - - // write to buffer - if l.current == nil { - l.current = getBuffer() - if l.current == nil { - l.mu.Unlock() - return 0, ErrBuffer - } - } - - // write to buffer - n, err = l.current.Write(b) - if l.current.Len() > logPageCacheByteSize { - buf := l.current - l.current = nil - l.mu.Unlock() - l.fullBuffer <- buf - } else { - l.mu.Unlock() - } - return -} - -func (l *RotateLogger) getBackupFilename() string { - if len(l.backup) == 0 { - return l.rule.BackupFileName() - } - - return l.backup -} - -func (l *RotateLogger) initialize() error { - l.backup = l.rule.BackupFileName() - - if fileInfo, err := os.Stat(l.filename); err != nil { - basePath := path.Dir(l.filename) - if _, err = os.Stat(basePath); err != nil { - if err = os.MkdirAll(basePath, defaultDirMode); err != nil { - return err - } - } - - if l.fp, err = os.Create(l.filename); err != nil { - return err - } - } else { - if l.fp, err = os.OpenFile(l.filename, os.O_APPEND|os.O_WRONLY, defaultFileMode); err != nil { - return err - } - - l.currentSize = fileInfo.Size() - } - - return nil -} - -func (l *RotateLogger) maybeCompressFile(file string) { - if !l.compress { - return - } - - defer func() { - if r := recover(); r != nil { - log.Printf(fmt.Sprintf("%s\n%s", r, string(debug.Stack()))) - } - }() - - if _, err := os.Stat(file); err != nil { - // file doesn't exist or another error, ignore compression - return - } - - compressLogFile(file) -} - -func (l *RotateLogger) maybeDeleteOutdatedFiles() { - files := l.rule.OutdatedFiles() - for _, file := range files { - if err := os.Remove(file); err != nil { - log.Printf("failed to remove outdated file: %s", file) - } - } -} - -func (l *RotateLogger) postRotate(file string) { - go func() { - // we cannot use threading.GoSafe here, because of import cycle. - l.maybeCompressFile(file) - l.maybeDeleteOutdatedFiles() - }() -} - -// rotate 日志轮转 -func (l *RotateLogger) rotate() error { - // close the current file - if err := l.close(); err != nil { - return err - } - - _, err := os.Stat(l.filename) - if err == nil && len(l.backup) > 0 { - backupFilename := l.getBackupFilename() - err = os.Rename(l.filename, backupFilename) - if err != nil { - return err - } - - l.postRotate(backupFilename) - } - - l.backup = l.rule.BackupFileName() - l.fp, err = os.Create(l.filename) - return err -} - -func (l *RotateLogger) writeBuffer(buff *bytes.Buffer) (int64, error) { - if l.rule.ShallRotate(l.currentSize + int64(buff.Len())) { - if err := l.rotate(); err != nil { - log.Println(err) - } else { - l.rule.MarkRotated() - l.currentSize = 0 - } - } - if l.fp == nil { - return 0, nil - } - - size, err := buff.WriteTo(l.fp) - if err != nil { - return size, err - } - l.currentSize += size - return size, nil -} - -// close file close the file -func (l *RotateLogger) close() (err error) { - if l.fp == nil { - return nil - } - - var errs []error - if err = l.fp.Sync(); err != nil { - errs = append(errs, err) - } - err = l.fp.Close() - if err != nil { - errs = append(errs, err) - } - - l.fp = nil - return errors.Join(errs...) -} - -// Close closes l. -func (l *RotateLogger) Close() (err error) { - l.mu.Lock() - if l.closed { - l.mu.Unlock() - return nil - } - l.mu.Unlock() - - l.closeOnce.Do(func() { - l.closed = true - close(l.done) - l.waitGroup.Wait() - err = l.close() - }) - - return err -} - -func (l *RotateLogger) Sync() error { - l.mu.Lock() - if l.closed { - l.mu.Unlock() - return ErrClosedRollingFile - } - l.mu.Unlock() - - l.syncFlush <- struct{}{} - <-l.syncFlush - return nil -} - -func compressLogFile(file string) { - start := time.Now() - log.Printf("compressing log file: %s", file) - if err := gzipFile(file, fileSys); err != nil { - log.Printf("compress error: %s", err) - } else { - log.Printf("compressed log file: %s, took %s", file, time.Since(start)) - } -} - -func gzipFile(file string, fsys FileSystem) (err error) { - in, err := fsys.Open(file) - if err != nil { - return err - } - defer func() { - if e := fsys.Close(in); e != nil { - log.Printf("failed to close file: %s, error: %v", file, e) - } - if err == nil { - // only remove the original file when compression is successful - err = fsys.Remove(file) - } - }() - - out, err := fsys.Create(fmt.Sprintf("%s%s", file, gzipExt)) - if err != nil { - return err - } - defer func() { - e := fsys.Close(out) - if err == nil { - err = e - } - }() - - w := gzip.NewWriter(out) - if _, err = fsys.Copy(w, in); err != nil { - // failed to copy, no need to close w - return err - } - - return fsys.Close(w) -} diff --git a/rotatelogger_test.go b/rotatelogger_test.go deleted file mode 100644 index 233d421..0000000 --- a/rotatelogger_test.go +++ /dev/null @@ -1,660 +0,0 @@ -package logger - -import ( - "crypto/md5" - "errors" - "fmt" - "io" - "os" - "path" - "path/filepath" - "sync/atomic" - "syscall" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -// Md5 returns the md5 bytes of data. -func Md5(data []byte) []byte { - digest := md5.New() - digest.Write(data) - return digest.Sum(nil) -} - -// Md5Hex returns the md5 hex string of data. -func Md5Hex(data []byte) string { - return fmt.Sprintf("%x", Md5(data)) -} - -// TempFileWithText creates the temporary file with the given content, -// and returns the opened *os.File instance. -// The file is kept as open, the caller should close the file handle, -// and remove the file by name. -func TempFileWithText(text string) (*os.File, error) { - tmpFile, err := os.CreateTemp(os.TempDir(), Md5Hex([]byte(text))) - if err != nil { - return nil, err - } - - if err := os.WriteFile(tmpFile.Name(), []byte(text), os.ModeTemporary); err != nil { - return nil, err - } - - return tmpFile, nil -} - -// TempFilenameWithText creates the file with the given content, -// and returns the filename (full path). -// The caller should remove the file after use. -func TempFilenameWithText(text string) (string, error) { - tmpFile, err := TempFileWithText(text) - if err != nil { - return "", err - } - - filename := tmpFile.Name() - if err = tmpFile.Close(); err != nil { - return "", err - } - - return filename, nil -} - -func TestDailyRotateRuleMarkRotated(t *testing.T) { - t.Run("daily rule", func(t *testing.T) { - var rule DailyRotateRule - rule.MarkRotated() - assert.Equal(t, getNowDate(), rule.rotatedTime) - }) - - t.Run("daily rule", func(t *testing.T) { - rule := DefaultRotateRule("test", "-", 1, false) - _, ok := rule.(*DailyRotateRule) - assert.True(t, ok) - }) -} - -func TestDailyRotateRuleOutdatedFiles(t *testing.T) { - t.Run("bad files", func(t *testing.T) { - rule := DailyRotateRule{ - filename: "[a-z", - } - assert.Empty(t, rule.OutdatedFiles()) - rule.days = 1 - assert.Empty(t, rule.OutdatedFiles()) - rule.gzip = true - assert.Empty(t, rule.OutdatedFiles()) - }) - - t.Run("temp files", func(t *testing.T) { - boundary := time.Now().Add(-time.Hour * time.Duration(hoursPerDay) * 2).Format(dateFormat) - f1, err := os.CreateTemp(os.TempDir(), "go-zero-test-"+boundary) - assert.NoError(t, err) - _ = f1.Close() - f2, err := os.CreateTemp(os.TempDir(), "go-zero-test-"+boundary) - assert.NoError(t, err) - _ = f2.Close() - t.Cleanup(func() { - _ = os.Remove(f1.Name()) - _ = os.Remove(f2.Name()) - }) - rule := DailyRotateRule{ - filename: path.Join(os.TempDir(), "go-zero-test-"), - days: 1, - } - assert.NotEmpty(t, rule.OutdatedFiles()) - }) -} - -func TestDailyRotateRuleShallRotate(t *testing.T) { - var rule DailyRotateRule - rule.rotatedTime = time.Now().Add(time.Hour * 24).Format(dateFormat) - assert.True(t, rule.ShallRotate(0)) -} - -func TestSizeLimitRotateRuleMarkRotated(t *testing.T) { - t.Run("size limit rule", func(t *testing.T) { - var rule SizeLimitRotateRule - rule.MarkRotated() - assert.Equal(t, getNowDateInRFC3339Format(), rule.rotatedTime) - }) - - t.Run("size limit rule", func(t *testing.T) { - rule := NewSizeLimitRotateRule("foo", "-", 1, 1, 1, false) - rule.MarkRotated() - assert.Equal(t, getNowDateInRFC3339Format(), rule.(*SizeLimitRotateRule).rotatedTime) - }) -} - -func TestSizeLimitRotateRuleOutdatedFiles(t *testing.T) { - t.Run("no files", func(t *testing.T) { - var rule SizeLimitRotateRule - assert.Empty(t, rule.OutdatedFiles()) - rule.days = 1 - assert.Empty(t, rule.OutdatedFiles()) - rule.gzip = true - assert.Empty(t, rule.OutdatedFiles()) - rule.maxBackups = 0 - assert.Empty(t, rule.OutdatedFiles()) - }) - - t.Run("bad files", func(t *testing.T) { - rule := SizeLimitRotateRule{ - DailyRotateRule: DailyRotateRule{ - filename: "[a-z", - }, - } - assert.Empty(t, rule.OutdatedFiles()) - rule.days = 1 - assert.Empty(t, rule.OutdatedFiles()) - rule.gzip = true - assert.Empty(t, rule.OutdatedFiles()) - }) - - t.Run("temp files", func(t *testing.T) { - boundary := time.Now().Add(-time.Hour * time.Duration(hoursPerDay) * 2).Format(dateFormat) - f1, err := os.CreateTemp(os.TempDir(), "go-zero-test-"+boundary) - assert.NoError(t, err) - f2, err := os.CreateTemp(os.TempDir(), "go-zero-test-"+boundary) - assert.NoError(t, err) - boundary1 := time.Now().Add(time.Hour * time.Duration(hoursPerDay) * 2).Format(dateFormat) - f3, err := os.CreateTemp(os.TempDir(), "go-zero-test-"+boundary1) - assert.NoError(t, err) - t.Cleanup(func() { - _ = f1.Close() - _ = os.Remove(f1.Name()) - _ = f2.Close() - _ = os.Remove(f2.Name()) - _ = f3.Close() - _ = os.Remove(f3.Name()) - }) - rule := SizeLimitRotateRule{ - DailyRotateRule: DailyRotateRule{ - filename: path.Join(os.TempDir(), "go-zero-test-"), - days: 1, - }, - maxBackups: 3, - } - assert.NotEmpty(t, rule.OutdatedFiles()) - }) - - t.Run("no backups", func(t *testing.T) { - boundary := time.Now().Add(-time.Hour * time.Duration(hoursPerDay) * 2).Format(dateFormat) - f1, err := os.CreateTemp(os.TempDir(), "go-zero-test-"+boundary) - assert.NoError(t, err) - f2, err := os.CreateTemp(os.TempDir(), "go-zero-test-"+boundary) - assert.NoError(t, err) - boundary1 := time.Now().Add(time.Hour * time.Duration(hoursPerDay) * 2).Format(dateFormat) - f3, err := os.CreateTemp(os.TempDir(), "go-zero-test-"+boundary1) - assert.NoError(t, err) - t.Cleanup(func() { - _ = f1.Close() - _ = os.Remove(f1.Name()) - _ = f2.Close() - _ = os.Remove(f2.Name()) - _ = f3.Close() - _ = os.Remove(f3.Name()) - }) - rule := SizeLimitRotateRule{ - DailyRotateRule: DailyRotateRule{ - filename: path.Join(os.TempDir(), "go-zero-test-"), - days: 1, - }, - } - assert.NotEmpty(t, rule.OutdatedFiles()) - - logger := new(RotateLogger) - logger.rule = &rule - logger.maybeDeleteOutdatedFiles() - assert.Empty(t, rule.OutdatedFiles()) - }) -} - -func TestSizeLimitRotateRuleShallRotate(t *testing.T) { - var rule SizeLimitRotateRule - rule.rotatedTime = time.Now().Add(time.Hour * 24).Format(fileTimeFormat) - rule.maxSize = 0 - assert.False(t, rule.ShallRotate(0)) - rule.maxSize = 100 - assert.False(t, rule.ShallRotate(0)) - assert.True(t, rule.ShallRotate(101*megaBytes)) -} - -func TestRotateLoggerClose(t *testing.T) { - t.Run("close", func(t *testing.T) { - filename, err := TempFilenameWithText("foo") - assert.Nil(t, err) - if len(filename) > 0 { - defer os.Remove(filename) - } - logger, err := NewRotateLogger(filename, new(DailyRotateRule), false) - assert.Nil(t, err) - _, err = logger.Write([]byte("foo")) - assert.Nil(t, err) - assert.Nil(t, logger.Close()) - }) - - t.Run("close without losing logs", func(t *testing.T) { - text := "foo" - filename, err := TempFilenameWithText(text) - assert.Nil(t, err) - if len(filename) > 0 { - defer os.Remove(filename) - } - logger, err := NewRotateLogger(filename, new(DailyRotateRule), false) - assert.Nil(t, err) - msg := []byte("foo") - n := 100 - for i := 0; i < n; i++ { - _, err = logger.Write(msg) - assert.Nil(t, err) - } - assert.Nil(t, logger.Close()) - bs, err := os.ReadFile(filename) - assert.Nil(t, err) - assert.Equal(t, len(msg)*n+len(text), len(bs)) - }) -} - -func TestRotateLoggerGetBackupFilename(t *testing.T) { - filename, err := TempFilenameWithText("foo") - assert.Nil(t, err) - if len(filename) > 0 { - defer os.Remove(filename) - } - logger, err := NewRotateLogger(filename, new(DailyRotateRule), false) - assert.Nil(t, err) - assert.True(t, len(logger.getBackupFilename()) > 0) - logger.backup = "" - assert.True(t, len(logger.getBackupFilename()) > 0) -} - -func TestRotateLoggerMayCompressFile(t *testing.T) { - old := os.Stdout - os.Stdout = os.NewFile(0, os.DevNull) - defer func() { - os.Stdout = old - }() - - filename, err := TempFilenameWithText("foo") - assert.Nil(t, err) - if len(filename) > 0 { - defer os.Remove(filename) - } - logger, err := NewRotateLogger(filename, new(DailyRotateRule), false) - assert.Nil(t, err) - logger.maybeCompressFile(filename) - _, err = os.Stat(filename) - assert.Nil(t, err) -} - -func TestRotateLoggerMayCompressFileTrue(t *testing.T) { - old := os.Stdout - os.Stdout = os.NewFile(0, os.DevNull) - defer func() { - os.Stdout = old - }() - - filename, err := TempFilenameWithText("foo") - assert.Nil(t, err) - logger, err := NewRotateLogger(filename, new(DailyRotateRule), true) - assert.Nil(t, err) - if len(filename) > 0 { - defer os.Remove(filepath.Base(logger.getBackupFilename()) + ".gz") - } - logger.maybeCompressFile(filename) - _, err = os.Stat(filename) - assert.NotNil(t, err) -} - -func TestRotateLoggerRotate(t *testing.T) { - filename, err := TempFilenameWithText("foo") - assert.Nil(t, err) - logger, err := NewRotateLogger(filename, new(DailyRotateRule), true) - assert.Nil(t, err) - if len(filename) > 0 { - defer func() { - os.Remove(logger.getBackupFilename()) - os.Remove(filepath.Base(logger.getBackupFilename()) + ".gz") - }() - } - err = logger.rotate() - switch v := err.(type) { - case *os.LinkError: - // avoid rename error on docker container - assert.Equal(t, syscall.EXDEV, v.Err) - case *os.PathError: - // ignore remove error for tests, - // files are cleaned in GitHub actions. - assert.Equal(t, "remove", v.Op) - default: - assert.Nil(t, err) - } -} - -func TestRotateLoggerWrite(t *testing.T) { - filename, err := TempFilenameWithText("foo") - assert.Nil(t, err) - rule := new(DailyRotateRule) - logger, err := NewRotateLogger(filename, rule, true) - assert.Nil(t, err) - if len(filename) > 0 { - defer func() { - os.Remove(logger.getBackupFilename()) - os.Remove(filepath.Base(logger.getBackupFilename()) + ".gz") - }() - } - // the following write calls cannot be changed to Write, because of DATA RACE. - logger.write([]byte(`foo`)) - rule.rotatedTime = time.Now().Add(-time.Hour * 24).Format(dateFormat) - logger.write([]byte(`bar`)) - logger.Close() - logger.write([]byte(`baz`)) -} - -func TestRotateLoggerWithSizeLimitRotateRuleClose(t *testing.T) { - filename, err := TempFilenameWithText("foo") - assert.Nil(t, err) - if len(filename) > 0 { - defer os.Remove(filename) - } - logger, err := NewRotateLogger(filename, new(SizeLimitRotateRule), false) - assert.Nil(t, err) - _ = logger.Close() -} - -func TestRotateLoggerGetBackupWithSizeLimitRotateRuleFilename(t *testing.T) { - filename, err := TempFilenameWithText("foo") - assert.Nil(t, err) - if len(filename) > 0 { - defer os.Remove(filename) - } - logger, err := NewRotateLogger(filename, new(SizeLimitRotateRule), false) - assert.Nil(t, err) - assert.True(t, len(logger.getBackupFilename()) > 0) - logger.backup = "" - assert.True(t, len(logger.getBackupFilename()) > 0) -} - -func TestRotateLoggerWithSizeLimitRotateRuleMayCompressFile(t *testing.T) { - old := os.Stdout - os.Stdout = os.NewFile(0, os.DevNull) - defer func() { - os.Stdout = old - }() - - filename, err := TempFilenameWithText("foo") - assert.Nil(t, err) - if len(filename) > 0 { - defer os.Remove(filename) - } - logger, err := NewRotateLogger(filename, new(SizeLimitRotateRule), false) - assert.Nil(t, err) - logger.maybeCompressFile(filename) - _, err = os.Stat(filename) - assert.Nil(t, err) -} - -func TestRotateLoggerWithSizeLimitRotateRuleMayCompressFileTrue(t *testing.T) { - old := os.Stdout - os.Stdout = os.NewFile(0, os.DevNull) - defer func() { - os.Stdout = old - }() - - filename, err := TempFilenameWithText("foo") - assert.Nil(t, err) - logger, err := NewRotateLogger(filename, new(SizeLimitRotateRule), true) - assert.Nil(t, err) - if len(filename) > 0 { - defer os.Remove(filepath.Base(logger.getBackupFilename()) + ".gz") - } - logger.maybeCompressFile(filename) - _, err = os.Stat(filename) - assert.NotNil(t, err) -} - -func TestRotateLoggerWithSizeLimitRotateRuleMayCompressFileFailed(t *testing.T) { - old := os.Stdout - os.Stdout = os.NewFile(0, os.DevNull) - defer func() { - os.Stdout = old - }() - - filename := "./logs/info.log" - logger, err := NewRotateLogger(filename, new(SizeLimitRotateRule), true) - defer os.Remove(filename) - if assert.NoError(t, err) { - assert.NotPanics(t, func() { - logger.maybeCompressFile(filename) - }) - } -} - -func TestRotateLoggerWithSizeLimitRotateRuleRotate(t *testing.T) { - filename, err := TempFilenameWithText("foo") - assert.Nil(t, err) - logger, err := NewRotateLogger(filename, new(SizeLimitRotateRule), true) - assert.Nil(t, err) - if len(filename) > 0 { - defer func() { - os.Remove(logger.getBackupFilename()) - os.Remove(filepath.Base(logger.getBackupFilename()) + ".gz") - }() - } - err = logger.rotate() - switch v := err.(type) { - case *os.LinkError: - // avoid rename error on docker container - assert.Equal(t, syscall.EXDEV, v.Err) - case *os.PathError: - // ignore remove error for tests, - // files are cleaned in GitHub actions. - assert.Equal(t, "remove", v.Op) - default: - assert.Nil(t, err) - } -} - -func TestRotateLoggerWithSizeLimitRotateRuleWrite(t *testing.T) { - filename, err := TempFilenameWithText("foo") - assert.Nil(t, err) - rule := new(SizeLimitRotateRule) - logger, err := NewRotateLogger(filename, rule, true) - assert.Nil(t, err) - if len(filename) > 0 { - defer func() { - os.Remove(logger.getBackupFilename()) - os.Remove(filepath.Base(logger.getBackupFilename()) + ".gz") - }() - } - // the following write calls cannot be changed to Write, because of DATA RACE. - logger.write([]byte(`foo`)) - rule.rotatedTime = time.Now().Add(-time.Hour * 24).Format(dateFormat) - logger.write([]byte(`bar`)) - logger.Close() - logger.write([]byte(`baz`)) -} - -func TestGzipFile(t *testing.T) { - err := errors.New("any error") - - t.Run("gzip file open failed", func(t *testing.T) { - fsys := &fakeFileSystem{ - openFn: func(name string) (*os.File, error) { - return nil, err - }, - } - assert.ErrorIs(t, err, gzipFile("any", fsys)) - assert.False(t, fsys.Removed()) - }) - - t.Run("gzip file create failed", func(t *testing.T) { - fsys := &fakeFileSystem{ - createFn: func(name string) (*os.File, error) { - return nil, err - }, - } - assert.ErrorIs(t, err, gzipFile("any", fsys)) - assert.False(t, fsys.Removed()) - }) - - t.Run("gzip file copy failed", func(t *testing.T) { - fsys := &fakeFileSystem{ - copyFn: func(writer io.Writer, reader io.Reader) (int64, error) { - return 0, err - }, - } - assert.ErrorIs(t, err, gzipFile("any", fsys)) - assert.False(t, fsys.Removed()) - }) - - t.Run("gzip file last close failed", func(t *testing.T) { - var called int32 - fsys := &fakeFileSystem{ - closeFn: func(closer io.Closer) error { - if atomic.AddInt32(&called, 1) > 2 { - return err - } - return nil - }, - } - assert.NoError(t, gzipFile("any", fsys)) - assert.True(t, fsys.Removed()) - }) - - t.Run("gzip file remove failed", func(t *testing.T) { - fsys := &fakeFileSystem{ - removeFn: func(name string) error { - return err - }, - } - assert.Error(t, err, gzipFile("any", fsys)) - assert.True(t, fsys.Removed()) - }) - - t.Run("gzip file everything ok", func(t *testing.T) { - fsys := &fakeFileSystem{} - assert.NoError(t, gzipFile("any", fsys)) - assert.True(t, fsys.Removed()) - }) -} - -func TestRotateLogger_WithExistingFile(t *testing.T) { - const body = "foo" - filename, err := TempFilenameWithText(body) - assert.Nil(t, err) - if len(filename) > 0 { - defer os.Remove(filename) - } - - rule := NewSizeLimitRotateRule(filename, "-", 1, 100, 3, false) - logger, err := NewRotateLogger(filename, rule, false) - assert.Nil(t, err) - assert.Equal(t, int64(len(body)), logger.currentSize) - assert.Nil(t, logger.Close()) -} - -func BenchmarkRotateLogger(b *testing.B) { - filename := "./test.log" - filename2 := "./test2.log" - dailyRotateRuleLogger, err1 := NewRotateLogger( - filename, - DefaultRotateRule( - filename, - backupFileDelimiter, - 1, - true, - ), - true, - ) - if err1 != nil { - b.Logf("Failed to new daily rotate rule logger: %v", err1) - b.FailNow() - } - sizeLimitRotateRuleLogger, err2 := NewRotateLogger( - filename2, - NewSizeLimitRotateRule( - filename, - backupFileDelimiter, - 1, - 100, - 10, - true, - ), - true, - ) - if err2 != nil { - b.Logf("Failed to new size limit rotate rule logger: %v", err1) - b.FailNow() - } - defer func() { - dailyRotateRuleLogger.Close() - sizeLimitRotateRuleLogger.Close() - os.Remove(filename) - os.Remove(filename2) - }() - - b.Run("daily rotate rule", func(b *testing.B) { - for i := 0; i < b.N; i++ { - dailyRotateRuleLogger.write([]byte("testing\ntesting\n")) - } - }) - b.Run("size limit rotate rule", func(b *testing.B) { - for i := 0; i < b.N; i++ { - sizeLimitRotateRuleLogger.write([]byte("testing\ntesting\n")) - } - }) -} - -type fakeFileSystem struct { - removed int32 - closeFn func(closer io.Closer) error - copyFn func(writer io.Writer, reader io.Reader) (int64, error) - createFn func(name string) (*os.File, error) - openFn func(name string) (*os.File, error) - removeFn func(name string) error -} - -func (f *fakeFileSystem) Close(closer io.Closer) error { - if f.closeFn != nil { - return f.closeFn(closer) - } - return nil -} - -func (f *fakeFileSystem) Copy(writer io.Writer, reader io.Reader) (int64, error) { - if f.copyFn != nil { - return f.copyFn(writer, reader) - } - return 0, nil -} - -func (f *fakeFileSystem) Create(name string) (*os.File, error) { - if f.createFn != nil { - return f.createFn(name) - } - return nil, nil -} - -func (f *fakeFileSystem) Open(name string) (*os.File, error) { - if f.openFn != nil { - return f.openFn(name) - } - return nil, nil -} - -func (f *fakeFileSystem) Remove(name string) error { - atomic.AddInt32(&f.removed, 1) - - if f.removeFn != nil { - return f.removeFn(name) - } - return nil -} - -func (f *fakeFileSystem) Removed() bool { - return atomic.LoadInt32(&f.removed) > 0 -} diff --git a/rotaterule.go b/rotaterule.go deleted file mode 100644 index 3e43c75..0000000 --- a/rotaterule.go +++ /dev/null @@ -1,265 +0,0 @@ -package logger - -import ( - "fmt" - "log" - "path/filepath" - "sort" - "strings" - "time" -) - -// A RotateRule interface is used to define the log rotating rules. -type RotateRule interface { - BackupFileName() string - MarkRotated() - OutdatedFiles() []string - ShallRotate(size int64) bool -} - -type ( - // A DailyRotateRule is a rule to daily rotate the log files. - DailyRotateRule struct { - rotatedTime string - filename string - delimiter string - days int - gzip bool - } - - // HourRotateRule a rotation rule that make the log file rotated base on hour - HourRotateRule struct { - rotatedTime string - filename string - delimiter string - hours int - gzip bool - } - - // SizeLimitRotateRule a rotation rule that make the log file rotated base on size - SizeLimitRotateRule struct { - DailyRotateRule - maxSize int64 - maxBackups int - } -) - -// NewHourRotateRule new a hour rotate rule -func NewHourRotateRule(filename, delimiter string, hours int, gzip bool) *HourRotateRule { - return &HourRotateRule{ - rotatedTime: getNowHour(), - filename: filename, - delimiter: delimiter, - hours: hours, - gzip: gzip, - } -} - -// BackupFileName returns the backup filename on rotating. -func (r *HourRotateRule) BackupFileName() string { - return fmt.Sprintf("%s%s%s", r.filename, r.delimiter, getNowHour()) -} - -// MarkRotated marks the rotated time of r to be the current time. -func (r *HourRotateRule) MarkRotated() { - r.rotatedTime = getNowHour() -} - -// OutdatedFiles returns the files that exceeded the keeping hours. -func (r *HourRotateRule) OutdatedFiles() []string { - if r.hours <= 0 { - return nil - } - - var pattern string - if r.gzip { - pattern = fmt.Sprintf("%s%s*%s", r.filename, r.delimiter, ".gz") - } else { - pattern = fmt.Sprintf("%s%s*", r.filename, r.delimiter) - } - - files, err := filepath.Glob(pattern) - if err != nil { - fmt.Printf("failed to find outdated log files, error: %s\n", err) - return nil - } - - var buf strings.Builder - boundary := time.Now().Add(-time.Hour * time.Duration(r.hours)).Format(hourFormat) - buf.WriteString(r.filename) - buf.WriteString(r.delimiter) - buf.WriteString(boundary) - if r.gzip { - buf.WriteString(gzipExt) - } - boundaryFile := buf.String() - - var outdates []string - for _, file := range files { - if file < boundaryFile { - outdates = append(outdates, file) - } - } - - return outdates -} - -// ShallRotate checks if the file should be rotated. -func (r *HourRotateRule) ShallRotate(_ int64) bool { - return len(r.rotatedTime) > 0 && getNowHour() != r.rotatedTime -} - -// DefaultRotateRule is a default log rotating rule, currently DailyRotateRule. -func DefaultRotateRule(filename, delimiter string, days int, gzip bool) RotateRule { - return &DailyRotateRule{ - rotatedTime: getNowDate(), - filename: filename, - delimiter: delimiter, - days: days, - gzip: gzip, - } -} - -// BackupFileName returns the backup filename on rotating. -func (r *DailyRotateRule) BackupFileName() string { - return fmt.Sprintf("%s%s%s", r.filename, r.delimiter, getNowDate()) -} - -// MarkRotated marks the rotated time of r to be the current time. -func (r *DailyRotateRule) MarkRotated() { - r.rotatedTime = getNowDate() -} - -// OutdatedFiles returns the files that exceeded the keeping days. -func (r *DailyRotateRule) OutdatedFiles() []string { - if r.days <= 0 { - return nil - } - - var pattern string - if r.gzip { - pattern = fmt.Sprintf("%s%s*%s", r.filename, r.delimiter, gzipExt) - } else { - pattern = fmt.Sprintf("%s%s*", r.filename, r.delimiter) - } - - files, err := filepath.Glob(pattern) - if err != nil { - Errorf("failed to delete outdated log files, error: %s", err) - return nil - } - - var buf strings.Builder - boundary := time.Now().Add(-time.Hour * time.Duration(hoursPerDay*r.days)).Format(dateFormat) - buf.WriteString(r.filename) - buf.WriteString(r.delimiter) - buf.WriteString(boundary) - if r.gzip { - buf.WriteString(gzipExt) - } - boundaryFile := buf.String() - - var outdates []string - for _, file := range files { - if file < boundaryFile { - outdates = append(outdates, file) - } - } - - return outdates -} - -// ShallRotate checks if the file should be rotated. -func (r *DailyRotateRule) ShallRotate(_ int64) bool { - return len(r.rotatedTime) > 0 && getNowDate() != r.rotatedTime -} - -// NewSizeLimitRotateRule returns the rotation rule with size limit -func NewSizeLimitRotateRule(filename, delimiter string, days, maxSize, maxBackups int, gzip bool) RotateRule { - return &SizeLimitRotateRule{ - DailyRotateRule: DailyRotateRule{ - rotatedTime: getNowDateInRFC3339Format(), - filename: filename, - delimiter: delimiter, - days: days, - gzip: gzip, - }, - maxSize: int64(maxSize) * megaBytes, - maxBackups: maxBackups, - } -} - -func (r *SizeLimitRotateRule) BackupFileName() string { - dir := filepath.Dir(r.filename) - prefix, ext := r.parseFilename() - timestamp := getNowDateInRFC3339Format() - return filepath.Join(dir, fmt.Sprintf("%s%s%s%s", prefix, r.delimiter, timestamp, ext)) -} - -func (r *SizeLimitRotateRule) MarkRotated() { - r.rotatedTime = getNowDateInRFC3339Format() -} - -func (r *SizeLimitRotateRule) OutdatedFiles() []string { - dir := filepath.Dir(r.filename) - prefix, ext := r.parseFilename() - - var pattern string - if r.gzip { - pattern = fmt.Sprintf("%s%s%s%s*%s%s", dir, string(filepath.Separator), - prefix, r.delimiter, ext, gzipExt) - } else { - pattern = fmt.Sprintf("%s%s%s%s*%s", dir, string(filepath.Separator), - prefix, r.delimiter, ext) - } - - files, err := filepath.Glob(pattern) - if err != nil { - log.Printf("failed to delete outdated log files, error: %s\n", err) - return nil - } - - sort.Strings(files) - - outdated := make(map[string]PlaceholderType) - - // test if too many backups - if r.maxBackups > 0 && len(files) > r.maxBackups { - for _, f := range files[:len(files)-r.maxBackups] { - outdated[f] = Placeholder - } - files = files[len(files)-r.maxBackups:] - } - - // test if any too old backups - if r.days > 0 { - boundary := time.Now().Add(-time.Hour * time.Duration(hoursPerDay*r.days)).Format(fileTimeFormat) - boundaryFile := filepath.Join(dir, fmt.Sprintf("%s%s%s%s", prefix, r.delimiter, boundary, ext)) - if r.gzip { - boundaryFile += gzipExt - } - for _, f := range files { - if f >= boundaryFile { - break - } - outdated[f] = Placeholder - } - } - - var result []string - for k := range outdated { - result = append(result, k) - } - return result -} - -func (r *SizeLimitRotateRule) ShallRotate(size int64) bool { - return r.maxSize > 0 && r.maxSize < size -} - -func (r *SizeLimitRotateRule) parseFilename() (prefix, ext string) { - logName := filepath.Base(r.filename) - ext = filepath.Ext(r.filename) - prefix = logName[:len(logName)-len(ext)] - return -} diff --git a/sugar.go b/sugar.go new file mode 100644 index 0000000..f8915d7 --- /dev/null +++ b/sugar.go @@ -0,0 +1,671 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package logger + +import ( + "context" + "fmt" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "go.uber.org/multierr" +) + +const ( + _oddNumberErrMsg = "Ignored key without a value." + _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." + _multipleErrMsg = "Multiple errors without a key." +) + +// A SugaredLogger wraps the base Logger functionality in a slower, but less +// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar +// method. +// +// Unlike the Logger, the SugaredLogger doesn't insist on structured logging. +// For each log level, it exposes four methods: +// +// - methods named after the log level for log.Print-style logging +// - methods ending in "w" for loosely-typed structured logging +// - methods ending in "f" for log.Printf-style logging +// - methods ending in "ln" for log.Println-style logging +// +// For example, the methods for InfoLevel are: +// +// Info(...any) Print-style logging +// Infow(...any) Structured logging (read as "info with") +// Infof(string, ...any) Printf-style logging +// Infoln(...any) Println-style logging +type SugaredLogger struct { + base *Logger +} + +// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring +// is quite inexpensive, so it's reasonable for a single application to use +// both Loggers and SugaredLoggers, converting between them on the boundaries +// of performance-sensitive code. +func (s *SugaredLogger) Desugar() *Logger { + base := s.base.clone() + base.callerSkip -= 2 + return base +} + +// Named adds a sub-scope to the logger's name. See Logger.Named for details. +func (s *SugaredLogger) Named(name string) *SugaredLogger { + return &SugaredLogger{base: s.base.Named(name)} +} + +// WithOptions clones the current SugaredLogger, applies the supplied Options, +// and returns the result. It's safe to use concurrently. +func (s *SugaredLogger) WithOptions(opts ...Option) *SugaredLogger { + base := s.base.clone() + for _, opt := range opts { + opt.apply(base) + } + return &SugaredLogger{base: base} +} + +// With adds a variadic number of fields to the logging context. It accepts a +// mix of strongly-typed Field objects and loosely-typed key-value pairs. When +// processing pairs, the first element of the pair is used as the field key +// and the second as the field value. +// +// For example, +// +// sugaredLogger.With( +// "hello", "world", +// "failure", errors.New("oh no"), +// Stack(), +// "count", 42, +// "user", User{Name: "alice"}, +// ) +// +// is the equivalent of +// +// unsugared.With( +// String("hello", "world"), +// String("failure", "oh no"), +// Stack(), +// Int("count", 42), +// Object("user", User{Name: "alice"}), +// ) +// +// Note that the keys in key-value pairs should be strings. In development, +// passing a non-string key panics. In production, the logger is more +// forgiving: a separate error is logged, but the key-value pair is skipped +// and execution continues. Passing an orphaned key triggers similar behavior: +// panics in development and errors in production. +func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} +} + +// WithLazy adds a variadic number of fields to the logging context lazily. +// The fields are evaluated only if the logger is further chained with [With] +// or is written to with any of the log level methods. +// Until that occurs, the logger may retain references to objects inside the fields, +// and logging will reflect the state of an object at the time of logging, +// not the time of WithLazy(). +// +// Similar to [With], fields added to the child don't affect the parent, +// and vice versa. Also, the keys in key-value pairs should be strings. In development, +// passing a non-string key panics, while in production it logs an error and skips the pair. +// Passing an orphaned key has the same behavior. +func (s *SugaredLogger) WithLazy(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.WithLazy(s.sweetenFields(args)...)} +} + +// Level reports the minimum enabled level for this logger. +// +// For NopLoggers, this is [zapcore.InvalidLevel]. +func (s *SugaredLogger) Level() zapcore.Level { + return zapcore.LevelOf(s.base.core) +} + +// Log logs the provided arguments at provided level. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Log(lvl zapcore.Level, args ...interface{}) { + s.log(context.Background(), lvl, "", args, nil) +} + +// LogContext logs the provided arguments at provided level. +func (s *SugaredLogger) LogContext(ctx context.Context, lvl zapcore.Level, args ...interface{}) { + s.log(ctx, lvl, "", args, nil) +} + +// Debug logs the provided arguments at [DebugLevel]. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Debug(args ...interface{}) { + s.log(context.Background(), zapcore.DebugLevel, "", args, nil) +} + +// DebugContext logs the provided arguments at [DebugLevel]. +func (s *SugaredLogger) DebugContext(ctx context.Context, args ...interface{}) { + s.log(ctx, zapcore.DebugLevel, "", args, nil) +} + +// Info logs the provided arguments at [InfoLevel]. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Info(args ...interface{}) { + s.log(context.Background(), zapcore.InfoLevel, "", args, nil) +} + +// InfoContext logs the provided arguments at [InfoLevel]. +func (s *SugaredLogger) InfoContext(ctx context.Context, args ...interface{}) { + s.log(ctx, zapcore.InfoLevel, "", args, nil) +} + +// Warn logs the provided arguments at [WarnLevel]. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Warn(args ...interface{}) { + s.log(context.Background(), zapcore.WarnLevel, "", args, nil) +} + +// WarnContext logs the provided arguments at [WarnLevel]. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) WarnContext(ctx context.Context, args ...interface{}) { + s.log(ctx, zapcore.WarnLevel, "", args, nil) +} + +// Error logs the provided arguments at [ErrorLevel]. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Error(args ...interface{}) { + s.log(context.Background(), zapcore.ErrorLevel, "", args, nil) +} + +// ErrorContext logs the provided arguments at [ErrorLevel]. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) ErrorContext(ctx context.Context, args ...interface{}) { + s.log(ctx, zapcore.ErrorLevel, "", args, nil) +} + +// DPanic logs the provided arguments at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) DPanic(args ...interface{}) { + s.log(context.Background(), zapcore.DPanicLevel, "", args, nil) +} + +// DPanicContext logs the provided arguments at [DPanicLevel]. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) DPanicContext(ctx context.Context, args ...interface{}) { + s.log(ctx, zapcore.DPanicLevel, "", args, nil) +} + +// Panic constructs a message with the provided arguments and panics. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Panic(args ...interface{}) { + s.log(context.Background(), zapcore.PanicLevel, "", args, nil) +} + +// PanicContext constructs a message with the provided arguments and panics. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) PanicContext(ctx context.Context, args ...interface{}) { + s.log(ctx, zapcore.PanicLevel, "", args, nil) +} + +// Fatal constructs a message with the provided arguments and calls os.Exit. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Fatal(args ...interface{}) { + s.log(context.Background(), zapcore.FatalLevel, "", args, nil) +} + +// FatalContext constructs a message with the provided arguments and calls os.Exit. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) FatalContext(ctx context.Context, args ...interface{}) { + s.log(ctx, zapcore.FatalLevel, "", args, nil) +} + +// Logf formats the message according to the format specifier +// and logs it at provided level. +func (s *SugaredLogger) Logf(lvl zapcore.Level, template string, args ...interface{}) { + s.log(context.Background(), lvl, template, args, nil) +} + +// LogContextf formats the message according to the format specifier +// and logs it at provided level. +func (s *SugaredLogger) LogContextf(ctx context.Context, lvl zapcore.Level, template string, args ...interface{}) { + s.log(ctx, lvl, template, args, nil) +} + +// Debugf formats the message according to the format specifier +// and logs it at [DebugLevel]. +func (s *SugaredLogger) Debugf(template string, args ...interface{}) { + s.log(context.Background(), zapcore.DebugLevel, template, args, nil) +} + +// DebugContextf formats the message according to the format specifier +// and logs it at [DebugLevel]. +func (s *SugaredLogger) DebugContextf(ctx context.Context, template string, args ...interface{}) { + s.log(ctx, zapcore.DebugLevel, template, args, nil) +} + +// Infof formats the message according to the format specifier +// and logs it at [InfoLevel]. +func (s *SugaredLogger) Infof(template string, args ...interface{}) { + s.log(context.Background(), zapcore.InfoLevel, template, args, nil) +} + +// InfoContextf formats the message according to the format specifier +// and logs it at [InfoLevel]. +func (s *SugaredLogger) InfoContextf(ctx context.Context, template string, args ...interface{}) { + s.log(ctx, zapcore.InfoLevel, template, args, nil) +} + +// Warnf formats the message according to the format specifier +// and logs it at [WarnLevel]. +func (s *SugaredLogger) Warnf(template string, args ...interface{}) { + s.log(context.Background(), zapcore.WarnLevel, template, args, nil) +} + +// WarnContextf formats the message according to the format specifier +// and logs it at [WarnLevel]. +func (s *SugaredLogger) WarnContextf(ctx context.Context, template string, args ...interface{}) { + s.log(ctx, zapcore.WarnLevel, template, args, nil) +} + +// Errorf formats the message according to the format specifier +// and logs it at [ErrorLevel]. +func (s *SugaredLogger) Errorf(template string, args ...interface{}) { + s.log(context.Background(), zapcore.ErrorLevel, template, args, nil) +} + +// ErrorContextf formats the message according to the format specifier +// and logs it at [ErrorLevel]. +func (s *SugaredLogger) ErrorContextf(ctx context.Context, template string, args ...interface{}) { + s.log(ctx, zapcore.ErrorLevel, template, args, nil) +} + +// DPanicf formats the message according to the format specifier +// and logs it at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) +func (s *SugaredLogger) DPanicf(template string, args ...interface{}) { + s.log(context.Background(), zapcore.DPanicLevel, template, args, nil) +} + +// DPanicContextf formats the message according to the format specifier +// and logs it at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) +func (s *SugaredLogger) DPanicContextf(ctx context.Context, template string, args ...interface{}) { + s.log(ctx, zapcore.DPanicLevel, template, args, nil) +} + +// Panicf formats the message according to the format specifier +// and panics. +func (s *SugaredLogger) Panicf(template string, args ...interface{}) { + s.log(context.Background(), zapcore.PanicLevel, template, args, nil) +} + +// PanicContextf formats the message according to the format specifier +// and panics. +func (s *SugaredLogger) PanicContextf(ctx context.Context, template string, args ...interface{}) { + s.log(ctx, zapcore.PanicLevel, template, args, nil) +} + +// Fatalf formats the message according to the format specifier +// and calls os.Exit. +func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { + s.log(context.Background(), zapcore.FatalLevel, template, args, nil) +} + +// FatalfContext formats the message according to the format specifier +// and calls os.Exit. +func (s *SugaredLogger) FatalfContext(ctx context.Context, template string, args ...interface{}) { + s.log(ctx, zapcore.FatalLevel, template, args, nil) +} + +// Logw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Logw(lvl zapcore.Level, msg string, keysAndValues ...interface{}) { + s.log(context.Background(), lvl, msg, nil, keysAndValues) +} + +// LogwContext logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) LogwContext(ctx context.Context, lvl zapcore.Level, msg string, keysAndValues ...interface{}) { + s.log(ctx, lvl, msg, nil, keysAndValues) +} + +// Debugw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +// +// When debug-level logging is disabled, this is much faster than +// +// s.With(keysAndValues).Debug(msg) +func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) { + s.log(context.Background(), zapcore.DebugLevel, msg, nil, keysAndValues) +} + +// DebugwContext logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +// +// When debug-level logging is disabled, this is much faster than +// +// s.With(keysAndValues).Debug(msg) +func (s *SugaredLogger) DebugwContext(ctx context.Context, msg string, keysAndValues ...interface{}) { + s.log(ctx, zapcore.DebugLevel, msg, nil, keysAndValues) +} + +// Infow logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) { + s.log(context.Background(), zapcore.InfoLevel, msg, nil, keysAndValues) +} + +// InfowContext logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) InfowContext(ctx context.Context, msg string, keysAndValues ...interface{}) { + s.log(ctx, zapcore.InfoLevel, msg, nil, keysAndValues) +} + +// Warnw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) { + s.log(context.Background(), zapcore.WarnLevel, msg, nil, keysAndValues) +} + +// WarnwContext logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) WarnwContext(ctx context.Context, msg string, keysAndValues ...interface{}) { + s.log(ctx, zapcore.WarnLevel, msg, nil, keysAndValues) +} + +// Errorw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) { + s.log(context.Background(), zapcore.ErrorLevel, msg, nil, keysAndValues) +} + +// ErrorwContext logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) ErrorwContext(ctx context.Context, msg string, keysAndValues ...interface{}) { + s.log(ctx, zapcore.ErrorLevel, msg, nil, keysAndValues) +} + +// DPanicw logs a message with some additional context. In development, the +// logger then panics. (See DPanicLevel for details.) The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) { + s.log(context.Background(), zapcore.DPanicLevel, msg, nil, keysAndValues) +} + +// DPanicwContext logs a message with some additional context. In development, the +// logger then panics. (See DPanicLevel for details.) The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) DPanicwContext(ctx context.Context, msg string, keysAndValues ...interface{}) { + s.log(ctx, zapcore.DPanicLevel, msg, nil, keysAndValues) +} + +// Panicw logs a message with some additional context, then panics. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) { + s.log(context.Background(), zapcore.PanicLevel, msg, nil, keysAndValues) +} + +// PanicwContext logs a message with some additional context, then panics. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) PanicwContext(ctx context.Context, msg string, keysAndValues ...interface{}) { + s.log(ctx, zapcore.PanicLevel, msg, nil, keysAndValues) +} + +// Fatalw logs a message with some additional context, then calls os.Exit. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { + s.log(context.Background(), zapcore.FatalLevel, msg, nil, keysAndValues) +} + +// FatalwContext logs a message with some additional context, then calls os.Exit. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) FatalwContext(ctx context.Context, msg string, keysAndValues ...interface{}) { + s.log(ctx, zapcore.FatalLevel, msg, nil, keysAndValues) +} + +// Logln logs a message at provided level. +// Spaces are always added between arguments. +func (s *SugaredLogger) Logln(lvl zapcore.Level, args ...interface{}) { + s.logln(context.Background(), lvl, args, nil) +} + +func (s *SugaredLogger) LoglnContext(ctx context.Context, lvl zapcore.Level, args ...interface{}) { + s.logln(ctx, lvl, args, nil) +} + +// Debugln logs a message at [DebugLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Debugln(args ...interface{}) { + s.logln(context.Background(), zapcore.DebugLevel, args, nil) +} + +func (s *SugaredLogger) DebuglnContext(ctx context.Context, args ...interface{}) { + s.logln(ctx, zapcore.DebugLevel, args, nil) +} + +// Infoln logs a message at [InfoLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Infoln(args ...interface{}) { + s.logln(context.Background(), zapcore.InfoLevel, args, nil) +} + +func (s *SugaredLogger) InfolnContext(ctx context.Context, args ...interface{}) { + s.logln(ctx, zapcore.InfoLevel, args, nil) +} + +// Warnln logs a message at [WarnLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Warnln(args ...interface{}) { + s.logln(context.Background(), zapcore.WarnLevel, args, nil) +} + +func (s *SugaredLogger) WarnlnContext(ctx context.Context, args ...interface{}) { + s.logln(ctx, zapcore.WarnLevel, args, nil) +} + +// Errorln logs a message at [ErrorLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Errorln(args ...interface{}) { + s.logln(context.Background(), zapcore.ErrorLevel, args, nil) +} + +func (s *SugaredLogger) ErrorlnContext(ctx context.Context, args ...interface{}) { + s.logln(ctx, zapcore.ErrorLevel, args, nil) +} + +// DPanicln logs a message at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) +// Spaces are always added between arguments. +func (s *SugaredLogger) DPanicln(args ...interface{}) { + s.logln(context.Background(), zapcore.DPanicLevel, args, nil) +} + +func (s *SugaredLogger) DPaniclnContext(ctx context.Context, args ...interface{}) { + s.logln(ctx, zapcore.DPanicLevel, args, nil) +} + +// Panicln logs a message at [PanicLevel] and panics. +// Spaces are always added between arguments. +func (s *SugaredLogger) Panicln(args ...interface{}) { + s.logln(context.Background(), zapcore.PanicLevel, args, nil) +} + +func (s *SugaredLogger) PaniclnContext(ctx context.Context, args ...interface{}) { + s.logln(ctx, zapcore.PanicLevel, args, nil) +} + +// Fatalln logs a message at [FatalLevel] and calls os.Exit. +// Spaces are always added between arguments. +func (s *SugaredLogger) Fatalln(args ...interface{}) { + s.logln(context.Background(), zapcore.FatalLevel, args, nil) +} + +func (s *SugaredLogger) FatallnContext(ctx context.Context, args ...interface{}) { + s.logln(ctx, zapcore.FatalLevel, args, nil) +} + +// Sync flushes any buffered log entries. +func (s *SugaredLogger) Sync() error { + return s.base.Sync() +} + +// log message with Sprint, Sprintf, or neither. +func (s *SugaredLogger) log(ctx context.Context, lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) { + // If logging at this level is completely disabled, skip the overhead of + // string formatting. + if lvl < zapcore.DPanicLevel && !s.base.Core().Enabled(lvl) { + return + } + + msg := getMessage(template, fmtArgs) + if ce := s.base.Check(lvl, msg); ce != nil { + fields := s.sweetenFields(context) + _fields, err := s.base.handler.Handle(ctx, ce, fields) + if err != nil { + s.Error("Error occurred while handling log context", zap.Error(err)) + } else { + fields = append(fields, _fields...) + } + ce.Write(fields...) + } +} + +// logln message with Sprintln +func (s *SugaredLogger) logln(ctx context.Context, lvl zapcore.Level, fmtArgs []interface{}, context []interface{}) { + if lvl < zapcore.DPanicLevel && !s.base.Core().Enabled(lvl) { + return + } + + msg := getMessageln(fmtArgs) + if ce := s.base.Check(lvl, msg); ce != nil { + fields := s.sweetenFields(context) + _fields, err := s.base.handler.Handle(ctx, ce, fields) + if err != nil { + s.Error("Error occurred while handling log context", zap.Error(err)) + } else { + fields = append(fields, _fields...) + } + ce.Write(fields...) + } +} + +// getMessage format with Sprint, Sprintf, or neither. +func getMessage(template string, fmtArgs []interface{}) string { + if len(fmtArgs) == 0 { + return template + } + + if template != "" { + return fmt.Sprintf(template, fmtArgs...) + } + + if len(fmtArgs) == 1 { + if str, ok := fmtArgs[0].(string); ok { + return str + } + } + return fmt.Sprint(fmtArgs...) +} + +// getMessageln format with Sprintln. +func getMessageln(fmtArgs []interface{}) string { + msg := fmt.Sprintln(fmtArgs...) + return msg[:len(msg)-1] +} + +func (s *SugaredLogger) sweetenFields(args []interface{}) []zapcore.Field { + if len(args) == 0 { + return nil + } + + var ( + // Allocate enough space for the worst case; if users pass only structured + // fields, we shouldn't penalize them with extra allocations. + fields = make([]zapcore.Field, 0, len(args)) + invalid invalidPairs + seenError bool + ) + + for i := 0; i < len(args); { + // This is a strongly-typed field. Consume it and move on. + if f, ok := args[i].(zapcore.Field); ok { + fields = append(fields, f) + i++ + continue + } + + // If it is an error, consume it and move on. + if err, ok := args[i].(error); ok { + if !seenError { + seenError = true + fields = append(fields, zap.Error(err)) + } else { + s.base.Error(_multipleErrMsg, zap.Error(err)) + } + i++ + continue + } + + // Make sure this element isn't a dangling key. + if i == len(args)-1 { + s.base.Error(_oddNumberErrMsg, zap.Any("ignored", args[i])) + break + } + + // Consume this value and the next, treating them as a key-value pair. If the + // key isn't a string, add this pair to the slice of invalid pairs. + key, val := args[i], args[i+1] + if keyStr, ok := key.(string); !ok { + // Subsequent errors are likely, so allocate once up front. + if cap(invalid) == 0 { + invalid = make(invalidPairs, 0, len(args)/2) + } + invalid = append(invalid, invalidPair{i, key, val}) + } else { + fields = append(fields, zap.Any(keyStr, val)) + } + i += 2 + } + + // If we encountered any invalid key-value pairs, log an error. + if len(invalid) > 0 { + s.base.Error(_nonStringKeyErrMsg, zap.Array("invalid", invalid)) + } + return fields +} + +type invalidPair struct { + position int + key, value interface{} +} + +func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error { + enc.AddInt64("position", int64(p.position)) + zap.Any("key", p.key).AddTo(enc) + zap.Any("value", p.value).AddTo(enc) + return nil +} + +type invalidPairs []invalidPair + +func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error { + var err error + for i := range ps { + err = multierr.Append(err, enc.AppendObject(ps[i])) + } + return err +} diff --git a/value.go b/value.go deleted file mode 100644 index b849ae4..0000000 --- a/value.go +++ /dev/null @@ -1,23 +0,0 @@ -package logger - -import ( - "context" - - "go.opentelemetry.io/otel/trace" -) - -// TraceID returns a traceid valuer. -func TraceID(ctx context.Context) string { - if span := trace.SpanContextFromContext(ctx); span.HasTraceID() { - return span.TraceID().String() - } - return "" -} - -// SpanID returns a spanid valuer. -func SpanID(ctx context.Context) string { - if span := trace.SpanContextFromContext(ctx); span.HasSpanID() { - return span.SpanID().String() - } - return "" -} From d180d9247e5fce7c1b1e4940fdcc0d409320cd6a Mon Sep 17 00:00:00 2001 From: JellyTony Date: Thu, 21 Nov 2024 02:06:34 +0800 Subject: [PATCH 2/2] fix bug --- handler.go | 20 ++++++++++++++++++++ logger.go | 5 ++++- logger_test.go | 18 ++++++++++++++++++ 3 files changed, 42 insertions(+), 1 deletion(-) diff --git a/handler.go b/handler.go index 0bf5561..6d71e87 100644 --- a/handler.go +++ b/handler.go @@ -14,3 +14,23 @@ type Handler interface { // replicate the logic of Check. Handle(context.Context, *zapcore.CheckedEntry, []zapcore.Field) ([]zapcore.Field, error) } + +// CommonHandler wraps a Handler and implements Handler. +type CommonHandler struct { + handler Handler +} + +// NewCommonHandler returns a new CommonHandler wrapping h. +func NewCommonHandler(h Handler) Handler { + return &CommonHandler{handler: h} +} + +// Handler returns l's Handler. +func (h *CommonHandler) Handler() Handler { return h.handler } + +func (h *CommonHandler) Handle(ctx context.Context, entry *zapcore.CheckedEntry, fields []zapcore.Field) ([]zapcore.Field, error) { + if h.Handler() == nil { + return fields, nil + } + return h.Handler().Handle(ctx, entry, fields) +} diff --git a/logger.go b/logger.go index d076d43..2ee772a 100644 --- a/logger.go +++ b/logger.go @@ -79,7 +79,10 @@ func New(core zapcore.Core, options ...Option) *Logger { addStack: zapcore.FatalLevel + 1, clock: zapcore.DefaultClock, } - return log.WithOptions(options...) + log.WithOptions(options...) + log.handler = NewCommonHandler(log.handler) + + return log } // NewNop returns a no-op Logger. It never writes out logs or internal errors, diff --git a/logger_test.go b/logger_test.go index a19c078..c2ffe6b 100644 --- a/logger_test.go +++ b/logger_test.go @@ -1,6 +1,7 @@ package logger import ( + "context" "errors" "log/slog" "net/http" @@ -9,6 +10,7 @@ import ( "github.com/stretchr/testify/assert" "go.uber.org/zap" + "go.uber.org/zap/zapcore" ) func TestNopLogger(t *testing.T) { @@ -43,3 +45,19 @@ func TestNewExample(t *testing.T) { slog.Int("status", http.StatusOK), ) } + +type CustomHandler struct{} + +func (h *CustomHandler) Handle(ctx context.Context, entry *zapcore.CheckedEntry, fields []zapcore.Field) ([]zapcore.Field, error) { + v, ok := ctx.Value("tid").(string) + if ok { + fields = append(fields, zap.String("tid", v)) + } + return fields, nil +} + +func TestHandler(t *testing.T) { + logger := NewExample(WithHandler(&CustomHandler{})) + ctx := context.WithValue(context.Background(), "tid", "123456") + logger.InfoContext(ctx, "hello world") +}