Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add log configuration #63

Open
wants to merge 7 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/actions.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ jobs:
- name: golangci-lint
uses: reviewdog/action-golangci-lint@v1
with:
golangci_lint_flags: "--enable-all --timeout=10m --exclude-use-default=false --tests=false --disable=gochecknoinits,gochecknoglobals,exhaustive,nakedret,wrapcheck"
golangci_lint_flags: "--enable-all --timeout=10m --exclude-use-default=false --tests=false --disable=gochecknoinits,gochecknoglobals,exhaustive,nakedret,wrapcheck -D G302"

test:
name: test
Expand Down
210 changes: 208 additions & 2 deletions log/logger.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,16 @@
package log

import (
"compress/gzip"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime/debug"
"sort"
"strconv"
"strings"
"sync"
"time"
Expand Down Expand Up @@ -76,7 +80,7 @@ type Logger struct {
}

type LoggerInfo struct {
LogRoller Roller
LogRoller *Roller
FileName string
CreateTime time.Time
}
Expand Down Expand Up @@ -205,6 +209,7 @@ func (l *Logger) start() error {
l.create = time.Now()
}
l.writer = file
l.mill()
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这里执行mill ,看上去可能会有一些耗时。在这期间这个logger实际上是没有办法正常工作的(后面的l.handler没有执行)。如果这里mill 卡太久,可能会导致日志丢失等情况出现,这是需要考虑的风险。

是否可以考虑将这个mill 做成异步的,可以按照日志路径有锁避免并发冲突?但是不影响start的时候及时可以执行写新日志,可以讨论一下

l.once.Do(l.startRotate) // start rotate, only once
}
}
Expand Down Expand Up @@ -411,7 +416,7 @@ func doRotateFunc(l *Logger, interval time.Duration) {
case <-timer.C:
now := time.Now()
info := LoggerInfo{FileName: l.output, CreateTime: l.create}
info.LogRoller = *l.roller
info.LogRoller = l.roller
l.roller.Handler(&info)
l.create = now
go l.Reopen()
Expand Down Expand Up @@ -471,3 +476,204 @@ func parseSyslogAddress(location string) *syslogAddress {

return nil
}

const (
compressSuffix = ".gz"
)

// millRunOnce performs compression and removal of stale log files.
// Log files are compressed if enabled via configuration and old log
// files are removed, keeping at most l.MaxBackups files, as long as
// none of them are older than MaxAge.
func (l *Logger) millRunOnce() error {
files, err := l.oldLogFiles()
if err != nil {
return err
}

compress, remove := l.screeningCompressFile(files)

for _, f := range remove {
_ = os.Remove(filepath.Join(l.dir(), f.FileName))
}
var wg sync.WaitGroup
for _, f := range compress {
var fnCompress, fileName string
fileName = f.FileName
wg.Add(1)
fnCompress, err = l.findCompressFile(fileName)
if err != nil {
fmt.Fprintf(os.Stderr, "logger %s findCompressFile , error: %v", l.output, err)
return err
}
go func(fnCompress, fileName string, wg *sync.WaitGroup) {
defer wg.Done()
err = l.compressLogFile(fileName, fnCompress)
if err != nil {
fmt.Fprintf(os.Stderr, "logger %s compressLogFile , error: %v", l.output, err)
}
}(fnCompress, fileName, &wg)
}
wg.Wait()
return err
}

func (l *Logger) screeningCompressFile(files []LoggerInfo) (compress, remove []LoggerInfo) {
resFiles, removeByMaxAge := l.screeningCompressFileByMaxAge(files)
resFiles, remove = l.screeningCompressFileByMaxBackups(resFiles, removeByMaxAge)

if l.roller.Compress {
for i := range resFiles {
if !strings.HasSuffix(resFiles[i].FileName, compressSuffix) {
compress = append(compress, resFiles[i])
}
}
}
return
}

func (l *Logger) screeningCompressFileByMaxAge(files []LoggerInfo) (resFiles, remove []LoggerInfo) {
if l.roller.MaxAge > 0 {
diff := time.Duration(int64(maxRotateHour*time.Hour) * int64(l.roller.MaxAge))
cutoff := time.Now().Add(-1 * diff)

for i := range files {
if files[i].CreateTime.Before(cutoff) {
remove = append(remove, files[i])
} else {
resFiles = append(resFiles, files[i])
}
}
} else {
resFiles = files
}
return
}

func (l *Logger) screeningCompressFileByMaxBackups(files, remove []LoggerInfo) (resFiles, resRemove []LoggerInfo) {
if l.roller.MaxBackups > 0 && l.roller.MaxBackups < len(files) {
preserved := make(map[string]bool)

for i := range files {
// Only count the uncompressed log file or the
// compressed log file, not both.
fn := files[i].FileName

preserved[strings.TrimSuffix(fn, compressSuffix)] = true

if len(preserved) > l.roller.MaxBackups {
remove = append(remove, files[i])
} else {
resFiles = append(resFiles, files[i])
}
}
} else {
resFiles = files
}
resRemove = remove
return
}

//findCompressFile Find the compressed file name based on the file name ,compressed file is not exist。
func (l *Logger) findCompressFile(fileName string) (string, error) {
var (
num = 1
statName = fileName
err error
)

for i := 0; i <= l.roller.MaxBackups; i++ {
if _, err = os.Stat(l.dir() + statName + compressSuffix); os.IsNotExist(err) {
return statName + compressSuffix, nil
}
statName = fileName + "." + strconv.Itoa(num)
num++
}
return fileName, err
}

func (l *Logger) mill() {
if l.roller.MaxBackups != defaultRotateKeep || l.roller.MaxAge != defaultRotateAge || l.roller.Compress {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个判断套件不太对啊。
如果配置成和default 一样的值就不执行,这个行为应该是不对的。
为了保持之前默认不执行的逻辑,是不是新加一个开关比较合适。

_ = l.millRunOnce()
}
}

// oldLogFiles returns the list of backup log files stored in the same
// directory as the current log file, sorted by ModTime
func (l *Logger) oldLogFiles() ([]LoggerInfo, error) {
files, err := ioutil.ReadDir(l.dir())
if err != nil {
return nil, err
}
logFiles := []LoggerInfo{}

for _, f := range files {
if f.IsDir() {
continue
}
if !strings.HasPrefix(f.Name(), filepath.Base(l.output)+".") {
continue
}
logFiles = append(logFiles, LoggerInfo{l.roller, f.Name(), f.ModTime()})
}
sort.Sort(byFormatTime(logFiles))

return logFiles, nil
}

// dir returns the directory for the current filename.
func (l *Logger) dir() string {
return filepath.Dir(l.output)
}

// compressLogFile compresses the given log file, removing the
// uncompressed log file if successful.
func (l *Logger) compressLogFile(srcFile, dstFile string) error {
f, err := os.Open(filepath.Join(l.dir(), filepath.Clean(srcFile)))
if err != nil {
return err
}

defer func() {
_ = f.Close()
}()

gzf, err := os.OpenFile(filepath.Join(l.dir(), filepath.Clean(dstFile)), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
return err
}

defer func() {
_ = gzf.Close()
if err != nil {
_ = os.Remove(filepath.Join(l.dir(), filepath.Clean(dstFile)))
}
}()

gz := gzip.NewWriter(gzf)

if _, err = io.Copy(gz, f); err != nil {
nejisama marked this conversation as resolved.
Show resolved Hide resolved
return err
}

if err = gz.Close(); err != nil {
return err
}

return os.Remove(filepath.Join(l.dir(), filepath.Clean(srcFile)))
}

// byFormatTime sorts by newest time formatted in the name.
type byFormatTime []LoggerInfo

func (b byFormatTime) Less(i, j int) bool {
return b[i].CreateTime.After(b[j].CreateTime)
}

func (b byFormatTime) Swap(i, j int) {
b[i], b[j] = b[j], b[i]
}

func (b byFormatTime) Len() int {
return len(b)
}
58 changes: 58 additions & 0 deletions log/logger_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -400,3 +400,61 @@ WAIT:
t.Logf("received %d reopens", reopens)
close(l.stopRotate)
}

func TestLogRollerTimeAndCompress(t *testing.T) {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

测试应该覆盖如下场景:

  1. 配置按时间轮转,Compress与MaxBackup(比如1),那么至少应该有3个文件,第一个文件正常写,第二个文件写的时候,第一个被轮转,第三个写的时候第二个被轮转,第一个被压缩
  2. 同样的情况,没有配置Compress,那第一个应该是被删除
  3. 配置按照大小轮转,其他同1)和2)

logName := "/tmp/mosn_bench/defaultCompress.log"
rollerName := logName + "." + time.Now().Format("2006-01-02_15")
os.Remove(logName)
os.Remove(rollerName)
// replace rotate interval for test
doRotate = testRotateByKeep
defer func() {
doRotate = testRotateByKeep
}()
//default MaxBackups=10
logger, err := GetOrCreateLogger(logName, &Roller{MaxTime: 1, Handler: rollerHandler, Compress: true, MaxBackups: 1})
if err != nil {
t.Fatal(err)
}
// 1111 will be rotated to rollerName
logger.Print(newLogBufferString("1111111"), false)
time.Sleep(2 * time.Second)
// 2222 will be writed in logName
logger.Print(newLogBufferString("2222222"), false)
time.Sleep(1 * time.Second)
logger.Close() // stop the rotate

if !exists(rollerName + compressSuffix) {
t.Fatalf("compress is failed")
}
}

func testRotateByKeep(l *Logger, interval time.Duration) {
doRotateFunc(l, 1*time.Second)
}

func TestLogRollerTimeAndKeep(t *testing.T) {
yzj0911 marked this conversation as resolved.
Show resolved Hide resolved
logName := "/tmp/mosn_bench/defaultKeep.log"
rollerName := logName + "." + time.Now().Format("2006-01-02_15")
os.Remove(logName)
os.Remove(rollerName)
// replace rotate interval for test
doRotate = testRotateByKeep
defer func() {
doRotate = testRotateByKeep
}()
logger, err := GetOrCreateLogger(logName, &Roller{MaxTime: 2, Handler: rollerHandler, MaxBackups: 1})
if err != nil {
t.Fatal(err)
}
logger.Print(newLogBufferString("1111111"), false)
time.Sleep(2 * time.Second)
logger.Print(newLogBufferString("2222222"), false)
time.Sleep(2 * time.Second)
logger.Print(newLogBufferString("3333333"), false)
time.Sleep(1 * time.Second)
logger.Close() // stop the rotate
if exists(rollerName) {
t.Fatalf(" %s is exists", rollerName)
}
}
2 changes: 1 addition & 1 deletion log/roller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ func TestRollerHandler(t *testing.T) {
return ioutil.WriteFile(name, data, 0644)
}
linfo := &LoggerInfo{
LogRoller: Roller{
LogRoller: &Roller{
MaxTime: defaultRotateTime,
MaxBackups: 10,
},
Expand Down