Skip to content

Commit

Permalink
first log
Browse files Browse the repository at this point in the history
  • Loading branch information
yzj authored and yzj committed Jun 29, 2022
1 parent 949046a commit 6ab30ea
Showing 1 changed file with 271 additions and 1 deletion.
272 changes: 271 additions & 1 deletion log/logger.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,15 @@
package log

import (
"compress/gzip"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime/debug"
"strconv"
"strings"
"sync"
"time"
Expand Down Expand Up @@ -73,6 +76,9 @@ type Logger struct {
reopenChan chan struct{}
closeChan chan struct{}
writeBufferChan chan LogBuffer
size int64
millCh chan struct{}
mu sync.Mutex
}

type LoggerInfo struct {
Expand Down Expand Up @@ -181,13 +187,36 @@ func (l *Logger) start() error {
}
l.writer = writer
} else { // write to file
fmt.Println(l.output)
if err := os.MkdirAll(filepath.Dir(l.output), 0755); err != nil {
return err
}
file, err := os.OpenFile(l.output, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
return err
}

if fileInfo, err := os.Stat(l.output); err == nil {
l.size = fileInfo.Size()
}

//if l.roller.MaxBackups != 0 {
// fileInfoList, err := ioutil.ReadDir(filepath.Dir(l.output))
// if err != nil {
// return err
// }
// logFilesNum := 0
// for i := range fileInfoList {
// if strings.HasPrefix(fileInfoList[i].Name(), strings.TrimSuffix(path.Base(l.output), path.Ext(path.Base(l.output)))) {
// logFilesNum++
// }
// }
// if l.roller.MaxBackups <= logFilesNum {
// l.writer = file
// break
// }
//}

if l.roller.MaxTime == 0 {
file.Close()
l.roller.Filename = l.output
Expand Down Expand Up @@ -247,6 +276,7 @@ func (l *Logger) handler() {
}
}
case buf := <-l.writeBufferChan:

l.Write(buf.Bytes())
PutLogBuffer(buf)
}
Expand Down Expand Up @@ -276,6 +306,7 @@ func (l *Logger) reopen() error {
if err := closer.Close(); err != nil {
fmt.Fprintf(os.Stderr, "logger %s close error when restart, error: %v", l.output, err)
}
l.mill()
return l.start()
}
return ErrReopenUnsupported
Expand Down Expand Up @@ -427,7 +458,37 @@ func doRotateFunc(l *Logger, interval time.Duration) {
}

func (l *Logger) Write(p []byte) (n int, err error) {
return l.writer.Write(p)
l.mu.Lock()
defer l.mu.Unlock()
writeLen := int64(len(p))
if writeLen > l.max() {
return 0, fmt.Errorf(
"write length %d exceeds maximum file size %d", writeLen, l.roller.MaxSize,
)
}

if l.writer == nil {
if err = l.start(); err != nil {
return 0, err
}
}

if l.size+writeLen > l.max() {
rollerHandler(&LoggerInfo{
LogRoller: *l.roller,
FileName: l.output,
CreateTime: time.Now(),
})
if err := l.reopen(); err != nil {
return 0, err
}
}

n, err = l.writer.Write(p)
l.size += int64(n)

return n, err
//return l.writer.Write(p)
}

func (l *Logger) Close() error {
Expand Down Expand Up @@ -471,3 +532,212 @@ func parseSyslogAddress(location string) *syslogAddress {

return nil
}

//-------------------------------------------
const (
compressSuffix = ".gz"
megabyte = 1024 * 1024
)

// max returns the maximum size in bytes of log files before rolling.
func (l *Logger) max() int64 {
if l.roller.MaxSize == 0 {
return int64(defaultRotateSize * megabyte)
}
return int64(l.roller.MaxSize) * int64(megabyte)
}

// millRunOnce performs compression and removal of stale log files.
// Log files are compressed if enabled via configuration and old log
// files are removed, keeping at most l.MaxBackups files, as long as
// none of them are older than MaxAge.
func (l *Logger) millRunOnce() error {
if l.roller.MaxBackups == 0 && l.roller.MaxAge == 0 && !l.roller.Compress {
return nil
}

files, err := l.oldLogFiles()
if err != nil {
return err
}

var compress, remove []LoggerInfo

if l.roller.MaxBackups > 0 && l.roller.MaxBackups < len(files) {
preserved := make(map[string]bool)
var remaining []LoggerInfo
for _, f := range files {
// Only count the uncompressed log file or the
// compressed log file, not both.
fn := f.FileName
if strings.HasSuffix(fn, compressSuffix) {
fn = fn[:len(fn)-len(compressSuffix)]
}
preserved[fn] = true

if len(preserved) > l.roller.MaxBackups {
remove = append(remove, f)
} else {
remaining = append(remaining, f)
}
}
files = remaining
}
if l.roller.MaxAge > 0 {
diff := time.Duration(int64(24*time.Hour) * int64(l.roller.MaxAge))
cutoff := time.Now().Add(-1 * diff)

var remaining []LoggerInfo
for _, f := range files {
if f.CreateTime.Before(cutoff) {
remove = append(remove, f)
} else {
remaining = append(remaining, f)
}
}
files = remaining
}

if l.roller.Compress {
for _, f := range files {
if !strings.HasSuffix(f.FileName, compressSuffix) {
compress = append(compress, f)
}
}
}

for _, f := range remove {
errRemove := os.Remove(filepath.Join(l.dir(), f.FileName))
if err == nil && errRemove != nil {
err = errRemove
}
}
for _, f := range compress {
fn := filepath.Join(l.dir(), f.FileName)
fnCompress := compressSuffixFile(fn + compressSuffix)
errCompress := compressLogFile(fn, fnCompress)
if err == nil && errCompress != nil {
err = errCompress
}
}

return err
}

func compressSuffixFile(fileName string) string {
var num int
for {
if _, err := os.Stat(fileName); !os.IsNotExist(err) {
return fileName
}
fileName = fileName + "." + strconv.Itoa(num)
num++
}
}

// mill performs post-rotation compression and removal of stale log files,
// starting the mill goroutine if necessary.
func (l *Logger) mill() {
l.millRunOnce()
}

// oldLogFiles returns the list of backup log files stored in the same
// directory as the current log file, sorted by ModTime
func (l *Logger) oldLogFiles() ([]LoggerInfo, error) {
files, err := ioutil.ReadDir(l.dir())
if err != nil {
return nil, fmt.Errorf("can't read log file directory: %s", err)
}
logFiles := []LoggerInfo{}

for _, f := range files {
if f.IsDir() {
continue
}
if !strings.HasPrefix(f.Name(), filepath.Base(l.output)+".") {
continue
}
//use modTime replace createTime
logFiles = append(logFiles, LoggerInfo{*l.roller, f.Name(), f.ModTime()})
}

return logFiles, nil
}

// timeFromName extracts the formatted time from the filename by stripping off
// the filename's prefix and extension. This prevents someone's filename from
// confusing time.parse.
func (l *Logger) timeFromName(filename, prefix, ext string) (time.Time, error) {
if !strings.HasPrefix(filename, prefix) {
return time.Time{}, errors.New("mismatched prefix")
}
if !strings.HasSuffix(filename, ext) {
return time.Time{}, errors.New("mismatched extension")
}
return time.Now(), nil
}

// prefixAndExt returns the filename part and extension part from the Logger's
// filename.
func (l *Logger) prefixAndExt() (prefix, ext string) {
filename := filepath.Base(l.output)

prefix = filename + "."

return prefix, ext
}

// dir returns the directory for the current filename.
func (l *Logger) dir() string {
return filepath.Dir(l.output)
}

// compressLogFile compresses the given log file, removing the
// uncompressed log file if successful.
func compressLogFile(src, dst string) (err error) {
f, err := os.Open(src)
if err != nil {
return fmt.Errorf("failed to open log file: %v", err)
}
defer f.Close()

fi, err := os.Stat(src)
if err != nil {
return fmt.Errorf("failed to stat log file: %v", err)
}
// If this file already exists, we presume it was created by
// a previous attempt to compress the log file.
gzf, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, fi.Mode())
if err != nil {
return fmt.Errorf("failed to open compressed log file: %v", err)
}
defer gzf.Close()

gz := gzip.NewWriter(gzf)

defer func() {
if err != nil {
os.Remove(dst)
err = fmt.Errorf("failed to compress log file: %v", err)
}
}()

if _, err := io.Copy(gz, f); err != nil {
return err
}
if err := gz.Close(); err != nil {
return err
}
if err := gzf.Close(); err != nil {
return err
}

if err := f.Close(); err != nil {
return err
}
if err := os.Remove(src); err != nil {
return err
}

return nil
}

0 comments on commit 6ab30ea

Please sign in to comment.