mirror of
https://github.com/ipfs/kubo.git
synced 2026-03-10 18:57:57 +08:00
Merge pull request #326 from jbenet/feat/machine-readable-logging
feature: machine-readable logging
This commit is contained in:
commit
4af3e0ff97
12
Godeps/Godeps.json
generated
12
Godeps/Godeps.json
generated
@ -1,6 +1,6 @@
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/go-ipfs",
|
||||
"GoVersion": "go1.3",
|
||||
"GoVersion": "go1.3.3",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
@ -131,6 +131,11 @@
|
||||
"ImportPath": "github.com/kr/binarydist",
|
||||
"Rev": "9955b0ab8708602d411341e55fffd7e0700f86bd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/maybebtc/logrus",
|
||||
"Comment": "v0.6.0-7-g51cc99e",
|
||||
"Rev": "51cc99e4b07ec5516233a8370c28b5eedd096b6b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/go-homedir",
|
||||
"Rev": "7d2d8c8a4e078ce3c58736ab521a40b37a504c52"
|
||||
@ -142,6 +147,11 @@
|
||||
{
|
||||
"ImportPath": "github.com/tuxychandru/pubsub",
|
||||
"Rev": "02de8aa2db3d570c5ab1be5ba67b456fd0fb7c4e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/natefinch/lumberjack.v2",
|
||||
"Comment": "v1.0-12-gd28785c",
|
||||
"Rev": "d28785c2f27cd682d872df46ccd8232843629f54"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
1
Godeps/_workspace/src/github.com/maybebtc/logrus/.gitignore
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/maybebtc/logrus/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
logrus
|
||||
9
Godeps/_workspace/src/github.com/maybebtc/logrus/.travis.yml
generated
vendored
Normal file
9
Godeps/_workspace/src/github.com/maybebtc/logrus/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.2
|
||||
- 1.3
|
||||
- tip
|
||||
install:
|
||||
- go get github.com/stretchr/testify
|
||||
- go get github.com/stvp/go-udp-testing
|
||||
- go get github.com/tobi/airbrake-go
|
||||
21
Godeps/_workspace/src/github.com/maybebtc/logrus/LICENSE
generated
vendored
Normal file
21
Godeps/_workspace/src/github.com/maybebtc/logrus/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Simon Eskildsen
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
342
Godeps/_workspace/src/github.com/maybebtc/logrus/README.md
generated
vendored
Normal file
342
Godeps/_workspace/src/github.com/maybebtc/logrus/README.md
generated
vendored
Normal file
@ -0,0 +1,342 @@
|
||||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [](https://travis-ci.org/Sirupsen/logrus)
|
||||
|
||||
Logrus is a structured logger for Go (golang), completely API compatible with
|
||||
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
|
||||
yet stable (pre 1.0), the core API is unlikely change much but please version
|
||||
control your Logrus to make sure you aren't fetching latest `master` on every
|
||||
build.**
|
||||
|
||||
Nicely color-coded in development (when a TTY is attached, otherwise just
|
||||
plain text):
|
||||
|
||||

|
||||
|
||||
With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
|
||||
or Splunk:
|
||||
|
||||
```json
|
||||
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
|
||||
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
|
||||
|
||||
{"level":"warning","msg":"The group's number increased tremendously!",
|
||||
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
|
||||
|
||||
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
|
||||
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
|
||||
|
||||
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
|
||||
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
|
||||
|
||||
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
|
||||
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
|
||||
```
|
||||
|
||||
With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not
|
||||
attached, the output is compatible with the
|
||||
[l2met](http://r.32k.io/l2met-introduction) format:
|
||||
|
||||
```text
|
||||
time="2014-04-20 15:36:23.830442383 -0400 EDT" level="info" msg="A group of walrus emerges from the ocean" animal="walrus" size=10
|
||||
time="2014-04-20 15:36:23.830584199 -0400 EDT" level="warning" msg="The group's number increased tremendously!" omg=true number=122
|
||||
time="2014-04-20 15:36:23.830596521 -0400 EDT" level="info" msg="A giant walrus appears!" animal="walrus" size=10
|
||||
time="2014-04-20 15:36:23.830611837 -0400 EDT" level="info" msg="Tremendously sized cow enters the ocean." animal="walrus" size=9
|
||||
time="2014-04-20 15:36:23.830626464 -0400 EDT" level="fatal" msg="The ice breaks!" omg=true number=100
|
||||
```
|
||||
|
||||
#### Example
|
||||
|
||||
The simplest way to use Logrus is simply the package-level exported logger:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
}).Info("A walrus appears")
|
||||
}
|
||||
```
|
||||
|
||||
Note that it's completely api-compatible with the stdlib logger, so you can
|
||||
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
|
||||
and you'll now have the flexibility of Logrus. You can customize it all you
|
||||
want:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus/hooks/airbrake"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Log as JSON instead of the default ASCII formatter.
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
|
||||
// Use the Airbrake hook to report errors that have Error severity or above to
|
||||
// an exception tracker. You can create custom hooks, see the Hooks section.
|
||||
log.AddHook(&logrus_airbrake.AirbrakeHook{})
|
||||
|
||||
// Output to stderr instead of stdout, could also be a file.
|
||||
log.SetOutput(os.Stderr)
|
||||
|
||||
// Only log the warning severity or above.
|
||||
log.SetLevel(log.WarnLevel)
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"omg": true,
|
||||
"number": 100,
|
||||
}).Fatal("The ice breaks!")
|
||||
}
|
||||
```
|
||||
|
||||
For more advanced usage such as logging to multiple locations from the same
|
||||
application, you can also create an instance of the `logrus` Logger:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Create a new instance of the logger. You can have any number of instances.
|
||||
var log = logrus.New()
|
||||
|
||||
func main() {
|
||||
// The API for setting attributes is a little different than the package level
|
||||
// exported logger. See Godoc.
|
||||
log.Out = os.Stderr
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
}
|
||||
```
|
||||
|
||||
#### Fields
|
||||
|
||||
Logrus encourages careful, structured logging though logging fields instead of
|
||||
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
|
||||
to send event %s to topic %s with key %d")`, you should log the much more
|
||||
discoverable:
|
||||
|
||||
```go
|
||||
log.WithFields(log.Fields{
|
||||
"event": event,
|
||||
"topic": topic,
|
||||
"key": key,
|
||||
}).Fatal("Failed to send event")
|
||||
```
|
||||
|
||||
We've found this API forces you to think about logging in a way that produces
|
||||
much more useful logging messages. We've been in countless situations where just
|
||||
a single added field to a log statement that was already there would've saved us
|
||||
hours. The `WithFields` call is optional.
|
||||
|
||||
In general, with Logrus using any of the `printf`-family functions should be
|
||||
seen as a hint you should add a field, however, you can still use the
|
||||
`printf`-family functions with Logrus.
|
||||
|
||||
#### Hooks
|
||||
|
||||
You can add hooks for logging levels. For example to send errors to an exception
|
||||
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
|
||||
multiple places simultaneously, e.g. syslog.
|
||||
|
||||
```go
|
||||
// Not the real implementation of the Airbrake hook. Just a simple sample.
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.AddHook(new(AirbrakeHook))
|
||||
}
|
||||
|
||||
type AirbrakeHook struct{}
|
||||
|
||||
// `Fire()` takes the entry that the hook is fired for. `entry.Data[]` contains
|
||||
// the fields for the entry. See the Fields section of the README.
|
||||
func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error {
|
||||
err := airbrake.Notify(entry.Data["error"].(error))
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"source": "airbrake",
|
||||
"endpoint": airbrake.Endpoint,
|
||||
}).Info("Failed to send error to Airbrake")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// `Levels()` returns a slice of `Levels` the hook is fired for.
|
||||
func (hook *AirbrakeHook) Levels() []log.Level {
|
||||
return []log.Level{
|
||||
log.ErrorLevel,
|
||||
log.FatalLevel,
|
||||
log.PanicLevel,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Logrus comes with built-in hooks. Add those, or your custom hook, in `init`:
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus/hooks/airbrake"
|
||||
"github.com/Sirupsen/logrus/hooks/syslog"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.AddHook(new(logrus_airbrake.AirbrakeHook))
|
||||
log.AddHook(logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, ""))
|
||||
}
|
||||
```
|
||||
|
||||
* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go)
|
||||
Send errors to an exception tracking service compatible with the Airbrake API.
|
||||
Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes.
|
||||
|
||||
* [`github.com/Sirupsen/logrus/hooks/papertrail`](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go)
|
||||
Send errors to the Papertrail hosted logging service via UDP.
|
||||
|
||||
* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go)
|
||||
Send errors to remote syslog server.
|
||||
Uses standard library `log/syslog` behind the scenes.
|
||||
|
||||
* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus)
|
||||
Send errors to a channel in hipchat.
|
||||
|
||||
#### Level logging
|
||||
|
||||
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
|
||||
|
||||
```go
|
||||
log.Debug("Useful debugging information.")
|
||||
log.Info("Something noteworthy happened!")
|
||||
log.Warn("You should probably take a look at this.")
|
||||
log.Error("Something failed but I'm not quitting.")
|
||||
// Calls os.Exit(1) after logging
|
||||
log.Fatal("Bye.")
|
||||
// Calls panic() after logging
|
||||
log.Panic("I'm bailing.")
|
||||
```
|
||||
|
||||
You can set the logging level on a `Logger`, then it will only log entries with
|
||||
that severity or anything above it:
|
||||
|
||||
```go
|
||||
// Will log anything that is info or above (warn, error, fatal, panic). Default.
|
||||
log.SetLevel(log.InfoLevel)
|
||||
```
|
||||
|
||||
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
|
||||
environment if your application has that.
|
||||
|
||||
#### Entries
|
||||
|
||||
Besides the fields added with `WithField` or `WithFields` some fields are
|
||||
automatically added to all logging events:
|
||||
|
||||
1. `time`. The timestamp when the entry was created.
|
||||
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
|
||||
the `AddFields` call. E.g. `Failed to send event.`
|
||||
3. `level`. The logging level. E.g. `info`.
|
||||
|
||||
#### Environments
|
||||
|
||||
Logrus has no notion of environment.
|
||||
|
||||
If you wish for hooks and formatters to only be used in specific environments,
|
||||
you should handle that yourself. For example, if your application has a global
|
||||
variable `Environment`, which is a string representation of the environment you
|
||||
could do:
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
init() {
|
||||
// do something here to set environment depending on an environment variable
|
||||
// or command-line flag
|
||||
if Environment == "production" {
|
||||
log.SetFormatter(logrus.JSONFormatter)
|
||||
} else {
|
||||
// The TextFormatter is default, you don't actually have to do this.
|
||||
log.SetFormatter(logrus.TextFormatter)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This configuration is how `logrus` was intended to be used, but JSON in
|
||||
production is mostly only useful if you do log aggregation with tools like
|
||||
Splunk or Logstash.
|
||||
|
||||
#### Formatters
|
||||
|
||||
The built-in logging formatters are:
|
||||
|
||||
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
|
||||
without colors.
|
||||
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
|
||||
field to `true`. To force no colored output even if there is a TTY set the
|
||||
`DisableColors` field to `true`
|
||||
* `logrus.JSONFormatter`. Logs fields as JSON.
|
||||
|
||||
Third party logging formatters:
|
||||
|
||||
* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
||||
|
||||
You can define your formatter by implementing the `Formatter` interface,
|
||||
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
|
||||
`Fields` type (`map[string]interface{}`) with all your fields as well as the
|
||||
default ones (see Entries section above):
|
||||
|
||||
```go
|
||||
type MyJSONFormatter struct {
|
||||
}
|
||||
|
||||
log.SetFormatter(new(MyJSONFormatter))
|
||||
|
||||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
// Note this doesn't include Time, Level and Message which are available on
|
||||
// the Entry. Consult `godoc` on information about those fields or read the
|
||||
// source of the official loggers.
|
||||
serialized, err := json.Marshal(entry.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
||||
```
|
||||
|
||||
#### Rotation
|
||||
|
||||
Log rotation is not provided with Logrus. Log rotation should be done by an
|
||||
external program (like `logrotated(8)`) that can compress and delete old log
|
||||
entries. It should not be a feature of the application-level logger.
|
||||
|
||||
|
||||
[godoc]: https://godoc.org/github.com/Sirupsen/logrus
|
||||
253
Godeps/_workspace/src/github.com/maybebtc/logrus/entry.go
generated
vendored
Normal file
253
Godeps/_workspace/src/github.com/maybebtc/logrus/entry.go
generated
vendored
Normal file
@ -0,0 +1,253 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An entry is the final or intermediate Logrus logging entry. It contains all
|
||||
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
|
||||
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
|
||||
// passed around as much as you wish to avoid field duplication.
|
||||
type Entry struct {
|
||||
Logger *Logger
|
||||
|
||||
// Contains all the fields set by the user.
|
||||
Data Fields
|
||||
|
||||
// Time at which the log entry was created
|
||||
Time time.Time
|
||||
|
||||
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
|
||||
Level Level
|
||||
|
||||
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
|
||||
Message string
|
||||
}
|
||||
|
||||
func NewEntry(logger *Logger) *Entry {
|
||||
return &Entry{
|
||||
Logger: logger,
|
||||
// Default is three fields, give a little extra room
|
||||
Data: make(Fields, 5),
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a reader for the entry, which is a proxy to the formatter.
|
||||
func (entry *Entry) Reader() (*bytes.Buffer, error) {
|
||||
serialized, err := entry.Logger.Formatter.Format(entry)
|
||||
return bytes.NewBuffer(serialized), err
|
||||
}
|
||||
|
||||
// Returns the string representation from the reader and ultimately the
|
||||
// formatter.
|
||||
func (entry *Entry) String() (string, error) {
|
||||
reader, err := entry.Reader()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return reader.String(), err
|
||||
}
|
||||
|
||||
// Add a single field to the Entry.
|
||||
func (entry *Entry) WithField(key string, value interface{}) *Entry {
|
||||
return entry.WithFields(Fields{key: value})
|
||||
}
|
||||
|
||||
// Add a map of fields to the Entry.
|
||||
func (entry *Entry) WithFields(fields Fields) *Entry {
|
||||
data := Fields{}
|
||||
for k, v := range entry.Data {
|
||||
data[k] = v
|
||||
}
|
||||
for k, v := range fields {
|
||||
data[k] = v
|
||||
}
|
||||
return &Entry{Logger: entry.Logger, Data: data}
|
||||
}
|
||||
|
||||
func (entry *Entry) log(level Level, msg string) {
|
||||
|
||||
entry.Logger.mu.Lock()
|
||||
if entry.Logger.WriteFields {
|
||||
entry.Time = time.Now()
|
||||
entry.Level = level
|
||||
entry.Message = msg
|
||||
}
|
||||
entry.Logger.mu.Unlock()
|
||||
|
||||
if err := entry.Logger.Hooks.Fire(level, entry); err != nil {
|
||||
entry.Logger.mu.Lock()
|
||||
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
||||
entry.Logger.mu.Unlock()
|
||||
}
|
||||
|
||||
reader, err := entry.Reader()
|
||||
if err != nil {
|
||||
entry.Logger.mu.Lock()
|
||||
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
||||
entry.Logger.mu.Unlock()
|
||||
}
|
||||
|
||||
entry.Logger.mu.Lock()
|
||||
defer entry.Logger.mu.Unlock()
|
||||
|
||||
_, err = io.Copy(entry.Logger.Out, reader)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
||||
}
|
||||
|
||||
// To avoid Entry#log() returning a value that only would make sense for
|
||||
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
||||
// directly here.
|
||||
if level <= PanicLevel {
|
||||
panic(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Debug(args ...interface{}) {
|
||||
if entry.Logger.Level >= DebugLevel {
|
||||
entry.log(DebugLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Print(args ...interface{}) {
|
||||
entry.Info(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Info(args ...interface{}) {
|
||||
if entry.Logger.Level >= InfoLevel {
|
||||
entry.log(InfoLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warn(args ...interface{}) {
|
||||
if entry.Logger.Level >= WarnLevel {
|
||||
entry.log(WarnLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Error(args ...interface{}) {
|
||||
if entry.Logger.Level >= ErrorLevel {
|
||||
entry.log(ErrorLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatal(args ...interface{}) {
|
||||
if entry.Logger.Level >= FatalLevel {
|
||||
entry.log(FatalLevel, fmt.Sprint(args...))
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panic(args ...interface{}) {
|
||||
if entry.Logger.Level >= PanicLevel {
|
||||
entry.log(PanicLevel, fmt.Sprint(args...))
|
||||
}
|
||||
panic(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Entry Printf family functions
|
||||
|
||||
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= DebugLevel {
|
||||
entry.Debug(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Infof(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= InfoLevel {
|
||||
entry.Info(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Printf(format string, args ...interface{}) {
|
||||
entry.Infof(format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= WarnLevel {
|
||||
entry.Warn(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningf(format string, args ...interface{}) {
|
||||
entry.Warnf(format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= ErrorLevel {
|
||||
entry.Error(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= FatalLevel {
|
||||
entry.Fatal(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= PanicLevel {
|
||||
entry.Panic(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
// Entry Println family functions
|
||||
|
||||
func (entry *Entry) Debugln(args ...interface{}) {
|
||||
if entry.Logger.Level >= DebugLevel {
|
||||
entry.Debug(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Infoln(args ...interface{}) {
|
||||
if entry.Logger.Level >= InfoLevel {
|
||||
entry.Info(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Println(args ...interface{}) {
|
||||
entry.Infoln(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warnln(args ...interface{}) {
|
||||
if entry.Logger.Level >= WarnLevel {
|
||||
entry.Warn(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningln(args ...interface{}) {
|
||||
entry.Warnln(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Errorln(args ...interface{}) {
|
||||
if entry.Logger.Level >= ErrorLevel {
|
||||
entry.Error(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalln(args ...interface{}) {
|
||||
if entry.Logger.Level >= FatalLevel {
|
||||
entry.Fatal(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicln(args ...interface{}) {
|
||||
if entry.Logger.Level >= PanicLevel {
|
||||
entry.Panic(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
// Sprintlnn => Sprint no newline. This is to get the behavior of how
|
||||
// fmt.Sprintln where spaces are always added between operands, regardless of
|
||||
// their type. Instead of vendoring the Sprintln implementation to spare a
|
||||
// string allocation, we do the simplest thing.
|
||||
func (entry *Entry) sprintlnn(args ...interface{}) string {
|
||||
msg := fmt.Sprintln(args...)
|
||||
return msg[:len(msg)-1]
|
||||
}
|
||||
55
Godeps/_workspace/src/github.com/maybebtc/logrus/entry_test.go
generated
vendored
Normal file
55
Godeps/_workspace/src/github.com/maybebtc/logrus/entry_test.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestEntryPanicln(t *testing.T) {
|
||||
errBoom := fmt.Errorf("boom time")
|
||||
|
||||
defer func() {
|
||||
p := recover()
|
||||
assert.NotNil(t, p)
|
||||
|
||||
switch pVal := p.(type) {
|
||||
case *Entry:
|
||||
assert.Equal(t, "kaboom", pVal.Message)
|
||||
assert.Equal(t, errBoom, pVal.Data["err"])
|
||||
default:
|
||||
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
|
||||
}
|
||||
}()
|
||||
|
||||
logger := New()
|
||||
logger.Out = &bytes.Buffer{}
|
||||
logger.WriteFields = true
|
||||
entry := NewEntry(logger)
|
||||
entry.WithField("err", errBoom).Panicln("kaboom")
|
||||
}
|
||||
|
||||
func TestEntryPanicf(t *testing.T) {
|
||||
errBoom := fmt.Errorf("boom again")
|
||||
|
||||
defer func() {
|
||||
p := recover()
|
||||
assert.NotNil(t, p)
|
||||
|
||||
switch pVal := p.(type) {
|
||||
case *Entry:
|
||||
assert.Equal(t, "kaboom true", pVal.Message)
|
||||
assert.Equal(t, errBoom, pVal.Data["err"])
|
||||
default:
|
||||
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
|
||||
}
|
||||
}()
|
||||
|
||||
logger := New()
|
||||
logger.Out = &bytes.Buffer{}
|
||||
logger.WriteFields = true
|
||||
entry := NewEntry(logger)
|
||||
entry.WithField("err", errBoom).Panicf("kaboom %v", true)
|
||||
}
|
||||
40
Godeps/_workspace/src/github.com/maybebtc/logrus/examples/basic/basic.go
generated
vendored
Normal file
40
Godeps/_workspace/src/github.com/maybebtc/logrus/examples/basic/basic.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
var log = logrus.New()
|
||||
|
||||
func init() {
|
||||
log.Formatter = new(logrus.JSONFormatter)
|
||||
log.Formatter = new(logrus.TextFormatter) // default
|
||||
}
|
||||
|
||||
func main() {
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err != nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"err": err,
|
||||
"number": 100,
|
||||
}).Fatal("The ice breaks!")
|
||||
}
|
||||
}()
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "orca",
|
||||
"size": 9009,
|
||||
}).Panic("It's over 9000!")
|
||||
}
|
||||
35
Godeps/_workspace/src/github.com/maybebtc/logrus/examples/hook/hook.go
generated
vendored
Normal file
35
Godeps/_workspace/src/github.com/maybebtc/logrus/examples/hook/hook.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus/hooks/airbrake"
|
||||
"github.com/tobi/airbrake-go"
|
||||
)
|
||||
|
||||
var log = logrus.New()
|
||||
|
||||
func init() {
|
||||
log.Formatter = new(logrus.TextFormatter) // default
|
||||
log.Hooks.Add(new(logrus_airbrake.AirbrakeHook))
|
||||
}
|
||||
|
||||
func main() {
|
||||
airbrake.Endpoint = "https://exceptions.whatever.com/notifier_api/v2/notices.xml"
|
||||
airbrake.ApiKey = "whatever"
|
||||
airbrake.Environment = "production"
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"number": 100,
|
||||
}).Fatal("The ice breaks!")
|
||||
}
|
||||
185
Godeps/_workspace/src/github.com/maybebtc/logrus/exported.go
generated
vendored
Normal file
185
Godeps/_workspace/src/github.com/maybebtc/logrus/exported.go
generated
vendored
Normal file
@ -0,0 +1,185 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// std is the name of the standard logger in stdlib `log`
|
||||
std = New()
|
||||
)
|
||||
|
||||
// SetOutput sets the standard logger output.
|
||||
func SetOutput(out io.Writer) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Out = out
|
||||
}
|
||||
|
||||
// SetFormatter sets the standard logger formatter.
|
||||
func SetFormatter(formatter Formatter) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Formatter = formatter
|
||||
}
|
||||
|
||||
// SetLevel sets the standard logger level.
|
||||
func SetLevel(level Level) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Level = level
|
||||
}
|
||||
|
||||
// AddHook adds a hook to the standard logger hooks.
|
||||
func AddHook(hook Hook) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Hooks.Add(hook)
|
||||
}
|
||||
|
||||
// WithField creates an entry from the standard logger and adds a field to
|
||||
// it. If you want multiple fields, use `WithFields`.
|
||||
//
|
||||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||
// or Panic on the Entry it returns.
|
||||
func WithField(key string, value interface{}) *Entry {
|
||||
return std.WithField(key, value)
|
||||
}
|
||||
|
||||
// WithFields creates an entry from the standard logger and adds multiple
|
||||
// fields to it. This is simply a helper for `WithField`, invoking it
|
||||
// once for each field.
|
||||
//
|
||||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||
// or Panic on the Entry it returns.
|
||||
func WithFields(fields Fields) *Entry {
|
||||
return std.WithFields(fields)
|
||||
}
|
||||
|
||||
// WriteFields gives the Logger permission to add time, level, msg info to
|
||||
// Entries
|
||||
func WriteFields(b bool) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.WriteFields = b
|
||||
}
|
||||
|
||||
// Debug logs a message at level Debug on the standard logger.
|
||||
func Debug(args ...interface{}) {
|
||||
std.Debug(args...)
|
||||
}
|
||||
|
||||
// Print logs a message at level Info on the standard logger.
|
||||
func Print(args ...interface{}) {
|
||||
std.Print(args...)
|
||||
}
|
||||
|
||||
// Info logs a message at level Info on the standard logger.
|
||||
func Info(args ...interface{}) {
|
||||
std.Info(args...)
|
||||
}
|
||||
|
||||
// Warn logs a message at level Warn on the standard logger.
|
||||
func Warn(args ...interface{}) {
|
||||
std.Warn(args...)
|
||||
}
|
||||
|
||||
// Warning logs a message at level Warn on the standard logger.
|
||||
func Warning(args ...interface{}) {
|
||||
std.Warning(args...)
|
||||
}
|
||||
|
||||
// Error logs a message at level Error on the standard logger.
|
||||
func Error(args ...interface{}) {
|
||||
std.Error(args...)
|
||||
}
|
||||
|
||||
// Panic logs a message at level Panic on the standard logger.
|
||||
func Panic(args ...interface{}) {
|
||||
std.Panic(args...)
|
||||
}
|
||||
|
||||
// Fatal logs a message at level Fatal on the standard logger.
|
||||
func Fatal(args ...interface{}) {
|
||||
std.Fatal(args...)
|
||||
}
|
||||
|
||||
// Debugf logs a message at level Debug on the standard logger.
|
||||
func Debugf(format string, args ...interface{}) {
|
||||
std.Debugf(format, args...)
|
||||
}
|
||||
|
||||
// Printf logs a message at level Info on the standard logger.
|
||||
func Printf(format string, args ...interface{}) {
|
||||
std.Printf(format, args...)
|
||||
}
|
||||
|
||||
// Infof logs a message at level Info on the standard logger.
|
||||
func Infof(format string, args ...interface{}) {
|
||||
std.Infof(format, args...)
|
||||
}
|
||||
|
||||
// Warnf logs a message at level Warn on the standard logger.
|
||||
func Warnf(format string, args ...interface{}) {
|
||||
std.Warnf(format, args...)
|
||||
}
|
||||
|
||||
// Warningf logs a message at level Warn on the standard logger.
|
||||
func Warningf(format string, args ...interface{}) {
|
||||
std.Warningf(format, args...)
|
||||
}
|
||||
|
||||
// Errorf logs a message at level Error on the standard logger.
|
||||
func Errorf(format string, args ...interface{}) {
|
||||
std.Errorf(format, args...)
|
||||
}
|
||||
|
||||
// Panicf logs a message at level Panic on the standard logger.
|
||||
func Panicf(format string, args ...interface{}) {
|
||||
std.Panicf(format, args...)
|
||||
}
|
||||
|
||||
// Fatalf logs a message at level Fatal on the standard logger.
|
||||
func Fatalf(format string, args ...interface{}) {
|
||||
std.Fatalf(format, args...)
|
||||
}
|
||||
|
||||
// Debugln logs a message at level Debug on the standard logger.
|
||||
func Debugln(args ...interface{}) {
|
||||
std.Debugln(args...)
|
||||
}
|
||||
|
||||
// Println logs a message at level Info on the standard logger.
|
||||
func Println(args ...interface{}) {
|
||||
std.Println(args...)
|
||||
}
|
||||
|
||||
// Infoln logs a message at level Info on the standard logger.
|
||||
func Infoln(args ...interface{}) {
|
||||
std.Infoln(args...)
|
||||
}
|
||||
|
||||
// Warnln logs a message at level Warn on the standard logger.
|
||||
func Warnln(args ...interface{}) {
|
||||
std.Warnln(args...)
|
||||
}
|
||||
|
||||
// Warningln logs a message at level Warn on the standard logger.
|
||||
func Warningln(args ...interface{}) {
|
||||
std.Warningln(args...)
|
||||
}
|
||||
|
||||
// Errorln logs a message at level Error on the standard logger.
|
||||
func Errorln(args ...interface{}) {
|
||||
std.Errorln(args...)
|
||||
}
|
||||
|
||||
// Panicln logs a message at level Panic on the standard logger.
|
||||
func Panicln(args ...interface{}) {
|
||||
std.Panicln(args...)
|
||||
}
|
||||
|
||||
// Fatalln logs a message at level Fatal on the standard logger.
|
||||
func Fatalln(args ...interface{}) {
|
||||
std.Fatalln(args...)
|
||||
}
|
||||
44
Godeps/_workspace/src/github.com/maybebtc/logrus/formatter.go
generated
vendored
Normal file
44
Godeps/_workspace/src/github.com/maybebtc/logrus/formatter.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
package logrus
|
||||
|
||||
// The Formatter interface is used to implement a custom Formatter. It takes an
|
||||
// `Entry`. It exposes all the fields, including the default ones:
|
||||
//
|
||||
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
|
||||
// * `entry.Data["time"]`. The timestamp.
|
||||
// * `entry.Data["level"]. The level the entry was logged at.
|
||||
//
|
||||
// Any additional fields added with `WithField` or `WithFields` are also in
|
||||
// `entry.Data`. Format is expected to return an array of bytes which are then
|
||||
// logged to `logger.Out`.
|
||||
type Formatter interface {
|
||||
Format(*Entry) ([]byte, error)
|
||||
}
|
||||
|
||||
// This is to not silently overwrite `time`, `msg` and `level` fields when
|
||||
// dumping it. If this code wasn't there doing:
|
||||
//
|
||||
// logrus.WithField("level", 1).Info("hello")
|
||||
//
|
||||
// Would just silently drop the user provided level. Instead with this code
|
||||
// it'll logged as:
|
||||
//
|
||||
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
|
||||
//
|
||||
// It's not exported because it's still using Data in an opinionated way. It's to
|
||||
// avoid code duplication between the two default formatters.
|
||||
func prefixFieldClashes(entry *Entry) {
|
||||
_, ok := entry.Data["time"]
|
||||
if ok {
|
||||
entry.Data["fields.time"] = entry.Data["time"]
|
||||
}
|
||||
|
||||
_, ok = entry.Data["msg"]
|
||||
if ok {
|
||||
entry.Data["fields.msg"] = entry.Data["msg"]
|
||||
}
|
||||
|
||||
_, ok = entry.Data["level"]
|
||||
if ok {
|
||||
entry.Data["fields.level"] = entry.Data["level"]
|
||||
}
|
||||
}
|
||||
88
Godeps/_workspace/src/github.com/maybebtc/logrus/formatter_bench_test.go
generated
vendored
Normal file
88
Godeps/_workspace/src/github.com/maybebtc/logrus/formatter_bench_test.go
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// smallFields is a small size data set for benchmarking
|
||||
var smallFields = Fields{
|
||||
"foo": "bar",
|
||||
"baz": "qux",
|
||||
"one": "two",
|
||||
"three": "four",
|
||||
}
|
||||
|
||||
// largeFields is a large size data set for benchmarking
|
||||
var largeFields = Fields{
|
||||
"foo": "bar",
|
||||
"baz": "qux",
|
||||
"one": "two",
|
||||
"three": "four",
|
||||
"five": "six",
|
||||
"seven": "eight",
|
||||
"nine": "ten",
|
||||
"eleven": "twelve",
|
||||
"thirteen": "fourteen",
|
||||
"fifteen": "sixteen",
|
||||
"seventeen": "eighteen",
|
||||
"nineteen": "twenty",
|
||||
"a": "b",
|
||||
"c": "d",
|
||||
"e": "f",
|
||||
"g": "h",
|
||||
"i": "j",
|
||||
"k": "l",
|
||||
"m": "n",
|
||||
"o": "p",
|
||||
"q": "r",
|
||||
"s": "t",
|
||||
"u": "v",
|
||||
"w": "x",
|
||||
"y": "z",
|
||||
"this": "will",
|
||||
"make": "thirty",
|
||||
"entries": "yeah",
|
||||
}
|
||||
|
||||
func BenchmarkSmallTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
|
||||
}
|
||||
|
||||
func BenchmarkLargeTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields)
|
||||
}
|
||||
|
||||
func BenchmarkSmallColoredTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields)
|
||||
}
|
||||
|
||||
func BenchmarkLargeColoredTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields)
|
||||
}
|
||||
|
||||
func BenchmarkSmallJSONFormatter(b *testing.B) {
|
||||
doBenchmark(b, &JSONFormatter{}, smallFields)
|
||||
}
|
||||
|
||||
func BenchmarkLargeJSONFormatter(b *testing.B) {
|
||||
doBenchmark(b, &JSONFormatter{}, largeFields)
|
||||
}
|
||||
|
||||
func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
|
||||
entry := &Entry{
|
||||
Time: time.Time{},
|
||||
Level: InfoLevel,
|
||||
Message: "message",
|
||||
Data: fields,
|
||||
}
|
||||
var d []byte
|
||||
var err error
|
||||
for i := 0; i < b.N; i++ {
|
||||
d, err = formatter.Format(entry)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.SetBytes(int64(len(d)))
|
||||
}
|
||||
}
|
||||
122
Godeps/_workspace/src/github.com/maybebtc/logrus/hook_test.go
generated
vendored
Normal file
122
Godeps/_workspace/src/github.com/maybebtc/logrus/hook_test.go
generated
vendored
Normal file
@ -0,0 +1,122 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type TestHook struct {
|
||||
Fired bool
|
||||
}
|
||||
|
||||
func (hook *TestHook) Fire(entry *Entry) error {
|
||||
hook.Fired = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hook *TestHook) Levels() []Level {
|
||||
return []Level{
|
||||
DebugLevel,
|
||||
InfoLevel,
|
||||
WarnLevel,
|
||||
ErrorLevel,
|
||||
FatalLevel,
|
||||
PanicLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func TestHookFires(t *testing.T) {
|
||||
hook := new(TestHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
assert.Equal(t, hook.Fired, false)
|
||||
|
||||
log.Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, hook.Fired, true)
|
||||
})
|
||||
}
|
||||
|
||||
type ModifyHook struct {
|
||||
}
|
||||
|
||||
func (hook *ModifyHook) Fire(entry *Entry) error {
|
||||
entry.Data["wow"] = "whale"
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hook *ModifyHook) Levels() []Level {
|
||||
return []Level{
|
||||
DebugLevel,
|
||||
InfoLevel,
|
||||
WarnLevel,
|
||||
ErrorLevel,
|
||||
FatalLevel,
|
||||
PanicLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func TestHookCanModifyEntry(t *testing.T) {
|
||||
hook := new(ModifyHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
log.WithField("wow", "elephant").Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["wow"], "whale")
|
||||
})
|
||||
}
|
||||
|
||||
func TestCanFireMultipleHooks(t *testing.T) {
|
||||
hook1 := new(ModifyHook)
|
||||
hook2 := new(TestHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook1)
|
||||
log.Hooks.Add(hook2)
|
||||
|
||||
log.WithField("wow", "elephant").Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["wow"], "whale")
|
||||
assert.Equal(t, hook2.Fired, true)
|
||||
})
|
||||
}
|
||||
|
||||
type ErrorHook struct {
|
||||
Fired bool
|
||||
}
|
||||
|
||||
func (hook *ErrorHook) Fire(entry *Entry) error {
|
||||
hook.Fired = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hook *ErrorHook) Levels() []Level {
|
||||
return []Level{
|
||||
ErrorLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorHookShouldntFireOnInfo(t *testing.T) {
|
||||
hook := new(ErrorHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
log.Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, hook.Fired, false)
|
||||
})
|
||||
}
|
||||
|
||||
func TestErrorHookShouldFireOnError(t *testing.T) {
|
||||
hook := new(ErrorHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
log.Error("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, hook.Fired, true)
|
||||
})
|
||||
}
|
||||
34
Godeps/_workspace/src/github.com/maybebtc/logrus/hooks.go
generated
vendored
Normal file
34
Godeps/_workspace/src/github.com/maybebtc/logrus/hooks.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
package logrus
|
||||
|
||||
// A hook to be fired when logging on the logging levels returned from
|
||||
// `Levels()` on your implementation of the interface. Note that this is not
|
||||
// fired in a goroutine or a channel with workers, you should handle such
|
||||
// functionality yourself if your call is non-blocking and you don't wish for
|
||||
// the logging calls for levels returned from `Levels()` to block.
|
||||
type Hook interface {
|
||||
Levels() []Level
|
||||
Fire(*Entry) error
|
||||
}
|
||||
|
||||
// Internal type for storing the hooks on a logger instance.
|
||||
type levelHooks map[Level][]Hook
|
||||
|
||||
// Add a hook to an instance of logger. This is called with
|
||||
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
|
||||
func (hooks levelHooks) Add(hook Hook) {
|
||||
for _, level := range hook.Levels() {
|
||||
hooks[level] = append(hooks[level], hook)
|
||||
}
|
||||
}
|
||||
|
||||
// Fire all the hooks for the passed level. Used by `entry.log` to fire
|
||||
// appropriate hooks for a log entry.
|
||||
func (hooks levelHooks) Fire(level Level, entry *Entry) error {
|
||||
for _, hook := range hooks[level] {
|
||||
if err := hook.Fire(entry); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
54
Godeps/_workspace/src/github.com/maybebtc/logrus/hooks/airbrake/airbrake.go
generated
vendored
Normal file
54
Godeps/_workspace/src/github.com/maybebtc/logrus/hooks/airbrake/airbrake.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
package logrus_airbrake
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/tobi/airbrake-go"
|
||||
)
|
||||
|
||||
// AirbrakeHook to send exceptions to an exception-tracking service compatible
|
||||
// with the Airbrake API. You must set:
|
||||
// * airbrake.Endpoint
|
||||
// * airbrake.ApiKey
|
||||
// * airbrake.Environment (only sends exceptions when set to "production")
|
||||
//
|
||||
// Before using this hook, to send an error. Entries that trigger an Error,
|
||||
// Fatal or Panic should now include an "error" field to send to Airbrake.
|
||||
type AirbrakeHook struct{}
|
||||
|
||||
func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error {
|
||||
if entry.Data["error"] == nil {
|
||||
entry.Logger.WithFields(logrus.Fields{
|
||||
"source": "airbrake",
|
||||
"endpoint": airbrake.Endpoint,
|
||||
}).Warn("Exceptions sent to Airbrake must have an 'error' key with the error")
|
||||
return nil
|
||||
}
|
||||
|
||||
err, ok := entry.Data["error"].(error)
|
||||
if !ok {
|
||||
entry.Logger.WithFields(logrus.Fields{
|
||||
"source": "airbrake",
|
||||
"endpoint": airbrake.Endpoint,
|
||||
}).Warn("Exceptions sent to Airbrake must have an `error` key of type `error`")
|
||||
return nil
|
||||
}
|
||||
|
||||
airErr := airbrake.Notify(err)
|
||||
if airErr != nil {
|
||||
entry.Logger.WithFields(logrus.Fields{
|
||||
"source": "airbrake",
|
||||
"endpoint": airbrake.Endpoint,
|
||||
"error": airErr,
|
||||
}).Warn("Failed to send error to Airbrake")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hook *AirbrakeHook) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.ErrorLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.PanicLevel,
|
||||
}
|
||||
}
|
||||
28
Godeps/_workspace/src/github.com/maybebtc/logrus/hooks/papertrail/README.md
generated
vendored
Normal file
28
Godeps/_workspace/src/github.com/maybebtc/logrus/hooks/papertrail/README.md
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
# Papertrail Hook for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:" />
|
||||
|
||||
[Papertrail](https://papertrailapp.com) provides hosted log management. Once stored in Papertrail, you can [group](http://help.papertrailapp.com/kb/how-it-works/groups/) your logs on various dimensions, [search](http://help.papertrailapp.com/kb/how-it-works/search-syntax) them, and trigger [alerts](http://help.papertrailapp.com/kb/how-it-works/alerts).
|
||||
|
||||
In most deployments, you'll want to send logs to Papertrail via their [remote_syslog](http://help.papertrailapp.com/kb/configuration/configuring-centralized-logging-from-text-log-files-in-unix/) daemon, which requires no application-specific configuration. This hook is intended for relatively low-volume logging, likely in managed cloud hosting deployments where installing `remote_syslog` is not possible.
|
||||
|
||||
## Usage
|
||||
|
||||
You can find your Papertrail UDP port on your [Papertrail account page](https://papertrailapp.com/account/destinations). Substitute it below for `YOUR_PAPERTRAIL_UDP_PORT`.
|
||||
|
||||
For `YOUR_APP_NAME`, substitute a short string that will readily identify your application or service in the logs.
|
||||
|
||||
```go
|
||||
import (
|
||||
"log/syslog"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus/hooks/papertrail"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log := logrus.New()
|
||||
hook, err := logrus_papertrail.NewPapertrailHook("logs.papertrailapp.com", YOUR_PAPERTRAIL_UDP_PORT, YOUR_APP_NAME)
|
||||
|
||||
if err == nil {
|
||||
log.Hooks.Add(hook)
|
||||
}
|
||||
}
|
||||
```
|
||||
54
Godeps/_workspace/src/github.com/maybebtc/logrus/hooks/papertrail/papertrail.go
generated
vendored
Normal file
54
Godeps/_workspace/src/github.com/maybebtc/logrus/hooks/papertrail/papertrail.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
package logrus_papertrail
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
format = "Jan 2 15:04:05"
|
||||
)
|
||||
|
||||
// PapertrailHook to send logs to a logging service compatible with the Papertrail API.
|
||||
type PapertrailHook struct {
|
||||
Host string
|
||||
Port int
|
||||
AppName string
|
||||
UDPConn net.Conn
|
||||
}
|
||||
|
||||
// NewPapertrailHook creates a hook to be added to an instance of logger.
|
||||
func NewPapertrailHook(host string, port int, appName string) (*PapertrailHook, error) {
|
||||
conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", host, port))
|
||||
return &PapertrailHook{host, port, appName, conn}, err
|
||||
}
|
||||
|
||||
// Fire is called when a log event is fired.
|
||||
func (hook *PapertrailHook) Fire(entry *logrus.Entry) error {
|
||||
date := time.Now().Format(format)
|
||||
payload := fmt.Sprintf("<22> %s %s: [%s] %s", date, hook.AppName, entry.Data["level"], entry.Message)
|
||||
|
||||
bytesWritten, err := hook.UDPConn.Write([]byte(payload))
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Unable to send log line to Papertrail via UDP. Wrote %d bytes before error: %v", bytesWritten, err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Levels returns the available logging levels.
|
||||
func (hook *PapertrailHook) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.PanicLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.ErrorLevel,
|
||||
logrus.WarnLevel,
|
||||
logrus.InfoLevel,
|
||||
logrus.DebugLevel,
|
||||
}
|
||||
}
|
||||
26
Godeps/_workspace/src/github.com/maybebtc/logrus/hooks/papertrail/papertrail_test.go
generated
vendored
Normal file
26
Godeps/_workspace/src/github.com/maybebtc/logrus/hooks/papertrail/papertrail_test.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
package logrus_papertrail
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/stvp/go-udp-testing"
|
||||
)
|
||||
|
||||
func TestWritingToUDP(t *testing.T) {
|
||||
port := 16661
|
||||
udp.SetAddr(fmt.Sprintf(":%d", port))
|
||||
|
||||
hook, err := NewPapertrailHook("localhost", port, "test")
|
||||
if err != nil {
|
||||
t.Errorf("Unable to connect to local UDP server.")
|
||||
}
|
||||
|
||||
log := logrus.New()
|
||||
log.Hooks.Add(hook)
|
||||
|
||||
udp.ShouldReceive(t, "foo", func() {
|
||||
log.Info("foo")
|
||||
})
|
||||
}
|
||||
20
Godeps/_workspace/src/github.com/maybebtc/logrus/hooks/syslog/README.md
generated
vendored
Normal file
20
Godeps/_workspace/src/github.com/maybebtc/logrus/hooks/syslog/README.md
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
# Syslog Hooks for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
import (
|
||||
"log/syslog"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus/hooks/syslog"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log := logrus.New()
|
||||
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
|
||||
if err == nil {
|
||||
log.Hooks.Add(hook)
|
||||
}
|
||||
}
|
||||
```
|
||||
59
Godeps/_workspace/src/github.com/maybebtc/logrus/hooks/syslog/syslog.go
generated
vendored
Normal file
59
Godeps/_workspace/src/github.com/maybebtc/logrus/hooks/syslog/syslog.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
package logrus_syslog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/Sirupsen/logrus"
|
||||
"log/syslog"
|
||||
"os"
|
||||
)
|
||||
|
||||
// SyslogHook to send logs via syslog.
|
||||
type SyslogHook struct {
|
||||
Writer *syslog.Writer
|
||||
SyslogNetwork string
|
||||
SyslogRaddr string
|
||||
}
|
||||
|
||||
// Creates a hook to be added to an instance of logger. This is called with
|
||||
// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
|
||||
// `if err == nil { log.Hooks.Add(hook) }`
|
||||
func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
|
||||
w, err := syslog.Dial(network, raddr, priority, tag)
|
||||
return &SyslogHook{w, network, raddr}, err
|
||||
}
|
||||
|
||||
func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
|
||||
line, err := entry.String()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
switch entry.Data["level"] {
|
||||
case "panic":
|
||||
return hook.Writer.Crit(line)
|
||||
case "fatal":
|
||||
return hook.Writer.Crit(line)
|
||||
case "error":
|
||||
return hook.Writer.Err(line)
|
||||
case "warn":
|
||||
return hook.Writer.Warning(line)
|
||||
case "info":
|
||||
return hook.Writer.Info(line)
|
||||
case "debug":
|
||||
return hook.Writer.Debug(line)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (hook *SyslogHook) Levels() []logrus.Level {
|
||||
return []logrus.Level{
|
||||
logrus.PanicLevel,
|
||||
logrus.FatalLevel,
|
||||
logrus.ErrorLevel,
|
||||
logrus.WarnLevel,
|
||||
logrus.InfoLevel,
|
||||
logrus.DebugLevel,
|
||||
}
|
||||
}
|
||||
26
Godeps/_workspace/src/github.com/maybebtc/logrus/hooks/syslog/syslog_test.go
generated
vendored
Normal file
26
Godeps/_workspace/src/github.com/maybebtc/logrus/hooks/syslog/syslog_test.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
package logrus_syslog
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"log/syslog"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLocalhostAddAndPrint(t *testing.T) {
|
||||
log := logrus.New()
|
||||
hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Unable to connect to local syslog.")
|
||||
}
|
||||
|
||||
log.Hooks.Add(hook)
|
||||
|
||||
for _, level := range hook.Levels() {
|
||||
if len(log.Hooks[level]) != 1 {
|
||||
t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level]))
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Congratulations!")
|
||||
}
|
||||
22
Godeps/_workspace/src/github.com/maybebtc/logrus/json_formatter.go
generated
vendored
Normal file
22
Godeps/_workspace/src/github.com/maybebtc/logrus/json_formatter.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
type JSONFormatter struct{}
|
||||
|
||||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
prefixFieldClashes(entry)
|
||||
entry.Data["time"] = entry.Time.Format(time.RFC3339)
|
||||
entry.Data["msg"] = entry.Message
|
||||
entry.Data["level"] = entry.Level.String()
|
||||
|
||||
serialized, err := json.Marshal(entry.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
||||
166
Godeps/_workspace/src/github.com/maybebtc/logrus/logger.go
generated
vendored
Normal file
166
Godeps/_workspace/src/github.com/maybebtc/logrus/logger.go
generated
vendored
Normal file
@ -0,0 +1,166 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Logger struct {
|
||||
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
|
||||
// file, or leave it default which is `os.Stdout`. You can also set this to
|
||||
// something more adventorous, such as logging to Kafka.
|
||||
Out io.Writer
|
||||
// Hooks for the logger instance. These allow firing events based on logging
|
||||
// levels and log entries. For example, to send errors to an error tracking
|
||||
// service, log to StatsD or dump the core on fatal errors.
|
||||
Hooks levelHooks
|
||||
// All log entries pass through the formatter before logged to Out. The
|
||||
// included formatters are `TextFormatter` and `JSONFormatter` for which
|
||||
// TextFormatter is the default. In development (when a TTY is attached) it
|
||||
// logs with colors, but to a file it wouldn't. You can easily implement your
|
||||
// own that implements the `Formatter` interface, see the `README` or included
|
||||
// formatters for examples.
|
||||
Formatter Formatter
|
||||
// The logging level the logger should log at. This is typically (and defaults
|
||||
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
||||
// logged. `logrus.Debug` is useful in
|
||||
Level Level
|
||||
|
||||
// WriteFields permits the logger to add time, msg, level data to
|
||||
// user-provided Fields
|
||||
WriteFields bool
|
||||
|
||||
// Used to sync writing to the log.
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// Creates a new logger. Configuration should be set by changing `Formatter`,
|
||||
// `Out` and `Hooks` directly on the default logger instance. You can also just
|
||||
// instantiate your own:
|
||||
//
|
||||
// var log = &Logger{
|
||||
// Out: os.Stderr,
|
||||
// Formatter: new(JSONFormatter),
|
||||
// Hooks: make(levelHooks),
|
||||
// Level: logrus.Debug,
|
||||
// }
|
||||
//
|
||||
// It's recommended to make this a global instance called `log`.
|
||||
func New() *Logger {
|
||||
return &Logger{
|
||||
Out: os.Stdout,
|
||||
Formatter: new(TextFormatter),
|
||||
Hooks: make(levelHooks),
|
||||
Level: InfoLevel,
|
||||
}
|
||||
}
|
||||
|
||||
// Adds a field to the log entry, note that you it doesn't log until you call
|
||||
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
|
||||
// Ff you want multiple fields, use `WithFields`.
|
||||
func (logger *Logger) WithField(key string, value interface{}) *Entry {
|
||||
return NewEntry(logger).WithField(key, value)
|
||||
}
|
||||
|
||||
// Adds a struct of fields to the log entry. All it does is call `WithField` for
|
||||
// each `Field`.
|
||||
func (logger *Logger) WithFields(fields Fields) *Entry {
|
||||
return NewEntry(logger).WithFields(fields)
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||
NewEntry(logger).Debugf(format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Infof(format string, args ...interface{}) {
|
||||
NewEntry(logger).Infof(format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Printf(format string, args ...interface{}) {
|
||||
NewEntry(logger).Printf(format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||
NewEntry(logger).Warnf(format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||
NewEntry(logger).Warnf(format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||
NewEntry(logger).Errorf(format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||
NewEntry(logger).Fatalf(format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||
NewEntry(logger).Panicf(format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Debug(args ...interface{}) {
|
||||
NewEntry(logger).Debug(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Info(args ...interface{}) {
|
||||
NewEntry(logger).Info(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Print(args ...interface{}) {
|
||||
NewEntry(logger).Info(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warn(args ...interface{}) {
|
||||
NewEntry(logger).Warn(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warning(args ...interface{}) {
|
||||
NewEntry(logger).Warn(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Error(args ...interface{}) {
|
||||
NewEntry(logger).Error(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatal(args ...interface{}) {
|
||||
NewEntry(logger).Fatal(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panic(args ...interface{}) {
|
||||
NewEntry(logger).Panic(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugln(args ...interface{}) {
|
||||
NewEntry(logger).Debugln(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Infoln(args ...interface{}) {
|
||||
NewEntry(logger).Infoln(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Println(args ...interface{}) {
|
||||
NewEntry(logger).Println(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warnln(args ...interface{}) {
|
||||
NewEntry(logger).Warnln(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningln(args ...interface{}) {
|
||||
NewEntry(logger).Warnln(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorln(args ...interface{}) {
|
||||
NewEntry(logger).Errorln(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalln(args ...interface{}) {
|
||||
NewEntry(logger).Fatalln(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicln(args ...interface{}) {
|
||||
NewEntry(logger).Panicln(args...)
|
||||
}
|
||||
94
Godeps/_workspace/src/github.com/maybebtc/logrus/logrus.go
generated
vendored
Normal file
94
Godeps/_workspace/src/github.com/maybebtc/logrus/logrus.go
generated
vendored
Normal file
@ -0,0 +1,94 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
)
|
||||
|
||||
// Fields type, used to pass to `WithFields`.
|
||||
type Fields map[string]interface{}
|
||||
|
||||
// Level type
|
||||
type Level uint8
|
||||
|
||||
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
||||
func (level Level) String() string {
|
||||
switch level {
|
||||
case DebugLevel:
|
||||
return "debug"
|
||||
case InfoLevel:
|
||||
return "info"
|
||||
case WarnLevel:
|
||||
return "warning"
|
||||
case ErrorLevel:
|
||||
return "error"
|
||||
case FatalLevel:
|
||||
return "fatal"
|
||||
case PanicLevel:
|
||||
return "panic"
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// ParseLevel takes a string level and returns the Logrus log level constant.
|
||||
func ParseLevel(lvl string) (Level, error) {
|
||||
switch lvl {
|
||||
case "panic":
|
||||
return PanicLevel, nil
|
||||
case "fatal":
|
||||
return FatalLevel, nil
|
||||
case "error":
|
||||
return ErrorLevel, nil
|
||||
case "warn", "warning":
|
||||
return WarnLevel, nil
|
||||
case "info":
|
||||
return InfoLevel, nil
|
||||
case "debug":
|
||||
return DebugLevel, nil
|
||||
}
|
||||
|
||||
var l Level
|
||||
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
|
||||
}
|
||||
|
||||
// These are the different logging levels. You can set the logging level to log
|
||||
// on your instance of logger, obtained with `logrus.New()`.
|
||||
const (
|
||||
// PanicLevel level, highest level of severity. Logs and then calls panic with the
|
||||
// message passed to Debug, Info, ...
|
||||
PanicLevel Level = iota
|
||||
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
|
||||
// logging level is set to Panic.
|
||||
FatalLevel
|
||||
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
|
||||
// Commonly used for hooks to send errors to an error tracking service.
|
||||
ErrorLevel
|
||||
// WarnLevel level. Non-critical entries that deserve eyes.
|
||||
WarnLevel
|
||||
// InfoLevel level. General operational entries about what's going on inside the
|
||||
// application.
|
||||
InfoLevel
|
||||
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
|
||||
DebugLevel
|
||||
)
|
||||
|
||||
// Won't compile if StdLogger can't be realized by a log.Logger
|
||||
var _ StdLogger = &log.Logger{}
|
||||
|
||||
// StdLogger is what your logrus-enabled library should take, that way
|
||||
// it'll accept a stdlib logger and a logrus logger. There's no standard
|
||||
// interface, this is the closest we get, unfortunately.
|
||||
type StdLogger interface {
|
||||
Print(...interface{})
|
||||
Printf(string, ...interface{})
|
||||
Println(...interface{})
|
||||
|
||||
Fatal(...interface{})
|
||||
Fatalf(string, ...interface{})
|
||||
Fatalln(...interface{})
|
||||
|
||||
Panic(...interface{})
|
||||
Panicf(string, ...interface{})
|
||||
Panicln(...interface{})
|
||||
}
|
||||
248
Godeps/_workspace/src/github.com/maybebtc/logrus/logrus_test.go
generated
vendored
Normal file
248
Godeps/_workspace/src/github.com/maybebtc/logrus/logrus_test.go
generated
vendored
Normal file
@ -0,0 +1,248 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
|
||||
var buffer bytes.Buffer
|
||||
var fields Fields
|
||||
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
logger.Formatter = new(JSONFormatter)
|
||||
logger.WriteFields = true
|
||||
|
||||
log(logger)
|
||||
|
||||
err := json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assertions(fields)
|
||||
}
|
||||
|
||||
func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
logger.Formatter = &TextFormatter{
|
||||
DisableColors: true,
|
||||
}
|
||||
|
||||
log(logger)
|
||||
|
||||
fields := make(map[string]string)
|
||||
for _, kv := range strings.Split(buffer.String(), " ") {
|
||||
if !strings.Contains(kv, "=") {
|
||||
continue
|
||||
}
|
||||
kvArr := strings.Split(kv, "=")
|
||||
key := strings.TrimSpace(kvArr[0])
|
||||
val, err := strconv.Unquote(kvArr[1])
|
||||
assert.NoError(t, err)
|
||||
fields[key] = val
|
||||
}
|
||||
assertions(fields)
|
||||
}
|
||||
|
||||
func TestPrint(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["level"], "info")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfo(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["level"], "info")
|
||||
})
|
||||
}
|
||||
|
||||
func TestWarn(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Warn("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["level"], "warning")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln("test", "test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test test")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln("test", 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test 10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln(10, 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "10 10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln(10, 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "10 10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Info("test", 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Info("test", "test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "testtest")
|
||||
})
|
||||
}
|
||||
|
||||
func TestWithFieldsShouldAllowAssignments(t *testing.T) {
|
||||
var buffer bytes.Buffer
|
||||
var fields Fields
|
||||
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
logger.Formatter = new(JSONFormatter)
|
||||
|
||||
localLog := logger.WithFields(Fields{
|
||||
"key1": "value1",
|
||||
})
|
||||
|
||||
localLog.WithField("key2", "value2").Info("test")
|
||||
err := json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, "value2", fields["key2"])
|
||||
assert.Equal(t, "value1", fields["key1"])
|
||||
|
||||
buffer = bytes.Buffer{}
|
||||
fields = Fields{}
|
||||
localLog.Info("test")
|
||||
err = json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.Nil(t, err)
|
||||
|
||||
_, ok := fields["key2"]
|
||||
assert.Equal(t, false, ok)
|
||||
assert.Equal(t, "value1", fields["key1"])
|
||||
}
|
||||
|
||||
func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("msg", "hello").Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
})
|
||||
}
|
||||
|
||||
func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("msg", "hello").Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["fields.msg"], "hello")
|
||||
})
|
||||
}
|
||||
|
||||
func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("time", "hello").Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["fields.time"], "hello")
|
||||
})
|
||||
}
|
||||
|
||||
func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("level", 1).Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["level"], "info")
|
||||
assert.Equal(t, fields["fields.level"], 1)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
|
||||
LogAndAssertText(t, func(log *Logger) {
|
||||
ll := log.WithField("herp", "derp")
|
||||
ll.Info("hello")
|
||||
ll.Info("bye")
|
||||
}, func(fields map[string]string) {
|
||||
for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} {
|
||||
if _, ok := fields[fieldName]; ok {
|
||||
t.Fatalf("should not have prefixed %q: %v", fieldName, fields)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestConvertLevelToString(t *testing.T) {
|
||||
assert.Equal(t, "debug", DebugLevel.String())
|
||||
assert.Equal(t, "info", InfoLevel.String())
|
||||
assert.Equal(t, "warning", WarnLevel.String())
|
||||
assert.Equal(t, "error", ErrorLevel.String())
|
||||
assert.Equal(t, "fatal", FatalLevel.String())
|
||||
assert.Equal(t, "panic", PanicLevel.String())
|
||||
}
|
||||
|
||||
func TestParseLevel(t *testing.T) {
|
||||
l, err := ParseLevel("panic")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, PanicLevel, l)
|
||||
|
||||
l, err = ParseLevel("fatal")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, FatalLevel, l)
|
||||
|
||||
l, err = ParseLevel("error")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, ErrorLevel, l)
|
||||
|
||||
l, err = ParseLevel("warn")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, WarnLevel, l)
|
||||
|
||||
l, err = ParseLevel("warning")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, WarnLevel, l)
|
||||
|
||||
l, err = ParseLevel("info")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, InfoLevel, l)
|
||||
|
||||
l, err = ParseLevel("debug")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, DebugLevel, l)
|
||||
|
||||
l, err = ParseLevel("invalid")
|
||||
assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error())
|
||||
}
|
||||
16
Godeps/_workspace/src/github.com/maybebtc/logrus/polite_json_formatter.go
generated
vendored
Normal file
16
Godeps/_workspace/src/github.com/maybebtc/logrus/polite_json_formatter.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type PoliteJSONFormatter struct{}
|
||||
|
||||
func (f *PoliteJSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
serialized, err := json.Marshal(entry.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
||||
12
Godeps/_workspace/src/github.com/maybebtc/logrus/terminal_darwin.go
generated
vendored
Normal file
12
Godeps/_workspace/src/github.com/maybebtc/logrus/terminal_darwin.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// Based on ssh/terminal:
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package logrus
|
||||
|
||||
import "syscall"
|
||||
|
||||
const ioctlReadTermios = syscall.TIOCGETA
|
||||
|
||||
type Termios syscall.Termios
|
||||
20
Godeps/_workspace/src/github.com/maybebtc/logrus/terminal_freebsd.go
generated
vendored
Normal file
20
Godeps/_workspace/src/github.com/maybebtc/logrus/terminal_freebsd.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
/*
|
||||
Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
|
||||
*/
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const ioctlReadTermios = syscall.TIOCGETA
|
||||
|
||||
type Termios struct {
|
||||
Iflag uint32
|
||||
Oflag uint32
|
||||
Cflag uint32
|
||||
Lflag uint32
|
||||
Cc [20]uint8
|
||||
Ispeed uint32
|
||||
Ospeed uint32
|
||||
}
|
||||
12
Godeps/_workspace/src/github.com/maybebtc/logrus/terminal_linux.go
generated
vendored
Normal file
12
Godeps/_workspace/src/github.com/maybebtc/logrus/terminal_linux.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// Based on ssh/terminal:
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package logrus
|
||||
|
||||
import "syscall"
|
||||
|
||||
const ioctlReadTermios = syscall.TCGETS
|
||||
|
||||
type Termios syscall.Termios
|
||||
21
Godeps/_workspace/src/github.com/maybebtc/logrus/terminal_notwindows.go
generated
vendored
Normal file
21
Godeps/_workspace/src/github.com/maybebtc/logrus/terminal_notwindows.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
// Based on ssh/terminal:
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux,!appengine darwin freebsd
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
func IsTerminal() bool {
|
||||
fd := syscall.Stdout
|
||||
var termios Termios
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||
return err == 0
|
||||
}
|
||||
27
Godeps/_workspace/src/github.com/maybebtc/logrus/terminal_windows.go
generated
vendored
Normal file
27
Godeps/_workspace/src/github.com/maybebtc/logrus/terminal_windows.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
// Based on ssh/terminal:
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build windows
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
|
||||
var (
|
||||
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
||||
)
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
func IsTerminal() bool {
|
||||
fd := syscall.Stdout
|
||||
var st uint32
|
||||
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
|
||||
return r != 0 && e == 0
|
||||
}
|
||||
95
Godeps/_workspace/src/github.com/maybebtc/logrus/text_formatter.go
generated
vendored
Normal file
95
Godeps/_workspace/src/github.com/maybebtc/logrus/text_formatter.go
generated
vendored
Normal file
@ -0,0 +1,95 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
nocolor = 0
|
||||
red = 31
|
||||
green = 32
|
||||
yellow = 33
|
||||
blue = 34
|
||||
)
|
||||
|
||||
var (
|
||||
baseTimestamp time.Time
|
||||
isTerminal bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
baseTimestamp = time.Now()
|
||||
isTerminal = IsTerminal()
|
||||
}
|
||||
|
||||
func miniTS() int {
|
||||
return int(time.Since(baseTimestamp) / time.Second)
|
||||
}
|
||||
|
||||
type TextFormatter struct {
|
||||
// Set to true to bypass checking for a TTY before outputting colors.
|
||||
ForceColors bool
|
||||
DisableColors bool
|
||||
}
|
||||
|
||||
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
|
||||
var keys []string
|
||||
for k := range entry.Data {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
b := &bytes.Buffer{}
|
||||
|
||||
prefixFieldClashes(entry)
|
||||
|
||||
isColored := (f.ForceColors || isTerminal) && !f.DisableColors
|
||||
|
||||
if isColored {
|
||||
printColored(b, entry, keys)
|
||||
} else {
|
||||
f.appendKeyValue(b, "time", entry.Time.Format(time.RFC3339))
|
||||
f.appendKeyValue(b, "level", entry.Level.String())
|
||||
f.appendKeyValue(b, "msg", entry.Message)
|
||||
for _, key := range keys {
|
||||
f.appendKeyValue(b, key, entry.Data[key])
|
||||
}
|
||||
}
|
||||
|
||||
b.WriteByte('\n')
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func printColored(b *bytes.Buffer, entry *Entry, keys []string) {
|
||||
var levelColor int
|
||||
switch entry.Level {
|
||||
case WarnLevel:
|
||||
levelColor = yellow
|
||||
case ErrorLevel, FatalLevel, PanicLevel:
|
||||
levelColor = red
|
||||
default:
|
||||
levelColor = blue
|
||||
}
|
||||
|
||||
levelText := strings.ToUpper(entry.Level.String())[0:4]
|
||||
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
|
||||
for _, k := range keys {
|
||||
v := entry.Data[k]
|
||||
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) {
|
||||
switch value.(type) {
|
||||
case string, error:
|
||||
fmt.Fprintf(b, "%v=%q ", key, value)
|
||||
default:
|
||||
fmt.Fprintf(b, "%v=%v ", key, value)
|
||||
}
|
||||
}
|
||||
23
Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/.gitignore
generated
vendored
Normal file
23
Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/.gitignore
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
21
Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/LICENSE
generated
vendored
Normal file
21
Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Nate Finch
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
166
Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/README.md
generated
vendored
Normal file
166
Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/README.md
generated
vendored
Normal file
@ -0,0 +1,166 @@
|
||||
# lumberjack [](https://godoc.org/gopkg.in/natefinch/lumberjack.v2) [](https://drone.io/github.com/natefinch/lumberjack/latest) [](https://ci.appveyor.com/project/natefinch/lumberjack)
|
||||
|
||||
### Lumberjack is a Go package for writing logs to rolling files.
|
||||
|
||||
Package lumberjack provides a rolling logger.
|
||||
|
||||
Note that this is v2.0 of lumberjack, and should be imported using gopkg.in
|
||||
thusly:
|
||||
|
||||
import "gopkg.in/natefinch/lumberjack.v2"
|
||||
|
||||
The package name remains simply lumberjack, and the code resides at
|
||||
https://github.com/natefinch/lumberjack under the v2.0 branch.
|
||||
|
||||
Lumberjack is intended to be one part of a logging infrastructure.
|
||||
It is not an all-in-one solution, but instead is a pluggable
|
||||
component at the bottom of the logging stack that simply controls the files
|
||||
to which logs are written.
|
||||
|
||||
Lumberjack plays well with any logging package that can write to an
|
||||
io.Writer, including the standard library's log package.
|
||||
|
||||
Lumberjack assumes that only one process is writing to the output files.
|
||||
Using the same lumberjack configuration from multiple processes on the same
|
||||
machine will result in improper behavior.
|
||||
|
||||
|
||||
**Example**
|
||||
|
||||
To use lumberjack with the standard library's log package, just pass it into the SetOutput function when your application starts.
|
||||
|
||||
Code:
|
||||
|
||||
```go
|
||||
log.SetOutput(&lumberjack.Logger{
|
||||
Filename: "/var/log/myapp/foo.log",
|
||||
MaxSize: 500, // megabytes
|
||||
MaxBackups: 3,
|
||||
MaxAge: 28, //days
|
||||
})
|
||||
```
|
||||
|
||||
|
||||
|
||||
## type Logger
|
||||
``` go
|
||||
type Logger struct {
|
||||
// Filename is the file to write logs to. Backup log files will be retained
|
||||
// in the same directory. It uses <processname>-lumberjack.log in
|
||||
// os.TempDir() if empty.
|
||||
Filename string `json:"filename" yaml:"filename"`
|
||||
|
||||
// MaxSize is the maximum size in megabytes of the log file before it gets
|
||||
// rotated. It defaults to 100 megabytes.
|
||||
MaxSize int `json:"maxsize" yaml:"maxsize"`
|
||||
|
||||
// MaxAge is the maximum number of days to retain old log files based on the
|
||||
// timestamp encoded in their filename. Note that a day is defined as 24
|
||||
// hours and may not exactly correspond to calendar days due to daylight
|
||||
// savings, leap seconds, etc. The default is not to remove old log files
|
||||
// based on age.
|
||||
MaxAge int `json:"maxage" yaml:"maxage"`
|
||||
|
||||
// MaxBackups is the maximum number of old log files to retain. The default
|
||||
// is to retain all old log files (though MaxAge may still cause them to get
|
||||
// deleted.)
|
||||
MaxBackups int `json:"maxbackups" yaml:"maxbackups"`
|
||||
|
||||
// LocalTime determines if the time used for formatting the timestamps in
|
||||
// backup files is the computer's local time. The default is to use UTC
|
||||
// time.
|
||||
LocalTime bool `json:"localtime" yaml:"localtime"`
|
||||
// contains filtered or unexported fields
|
||||
}
|
||||
```
|
||||
Logger is an io.WriteCloser that writes to the specified filename.
|
||||
|
||||
Logger opens or creates the logfile on first Write. If the file exists and
|
||||
is less than MaxSize megabytes, lumberjack will open and append to that file.
|
||||
If the file exists and its size is >= MaxSize megabytes, the file is renamed
|
||||
by putting the current time in a timestamp in the name immediately before the
|
||||
file's extension (or the end of the filename if there's no extension). A new
|
||||
log file is then created using original filename.
|
||||
|
||||
Whenever a write would cause the current log file exceed MaxSize megabytes,
|
||||
the current file is closed, renamed, and a new log file created with the
|
||||
original name. Thus, the filename you give Logger is always the "current" log
|
||||
file.
|
||||
|
||||
### Cleaning Up Old Log Files
|
||||
Whenever a new logfile gets created, old log files may be deleted. The most
|
||||
recent files according to the encoded timestamp will be retained, up to a
|
||||
number equal to MaxBackups (or all of them if MaxBackups is 0). Any files
|
||||
with an encoded timestamp older than MaxAge days are deleted, regardless of
|
||||
MaxBackups. Note that the time encoded in the timestamp is the rotation
|
||||
time, which may differ from the last time that file was written to.
|
||||
|
||||
If MaxBackups and MaxAge are both 0, no old log files will be deleted.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
### func (\*Logger) Close
|
||||
``` go
|
||||
func (l *Logger) Close() error
|
||||
```
|
||||
Close implements io.Closer, and closes the current logfile.
|
||||
|
||||
|
||||
|
||||
### func (\*Logger) Rotate
|
||||
``` go
|
||||
func (l *Logger) Rotate() error
|
||||
```
|
||||
Rotate causes Logger to close the existing log file and immediately create a
|
||||
new one. This is a helper function for applications that want to initiate
|
||||
rotations outside of the normal rotation rules, such as in response to
|
||||
SIGHUP. After rotating, this initiates a cleanup of old log files according
|
||||
to the normal rules.
|
||||
|
||||
**Example**
|
||||
|
||||
Example of how to rotate in response to SIGHUP.
|
||||
|
||||
Code:
|
||||
|
||||
```go
|
||||
l := &lumberjack.Logger{}
|
||||
log.SetOutput(l)
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, syscall.SIGHUP)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
<-c
|
||||
l.Rotate()
|
||||
}
|
||||
}()
|
||||
```
|
||||
|
||||
### func (\*Logger) Write
|
||||
``` go
|
||||
func (l *Logger) Write(p []byte) (n int, err error)
|
||||
```
|
||||
Write implements io.Writer. If a write would cause the log file to be larger
|
||||
than MaxSize, the file is closed, renamed to include a timestamp of the
|
||||
current time, and a new log file is created using the original log file name.
|
||||
If the length of the write is greater than MaxSize, an error is returned.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
- - -
|
||||
Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
|
||||
11
Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/chown.go
generated
vendored
Normal file
11
Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/chown.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// +build !linux
|
||||
|
||||
package lumberjack
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
func chown(_ string, _ os.FileInfo) error {
|
||||
return nil
|
||||
}
|
||||
19
Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/chown_linux.go
generated
vendored
Normal file
19
Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/chown_linux.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
package lumberjack
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// os_Chown is a var so we can mock it out during tests.
|
||||
var os_Chown = os.Chown
|
||||
|
||||
func chown(name string, info os.FileInfo) error {
|
||||
f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, info.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.Close()
|
||||
stat := info.Sys().(*syscall.Stat_t)
|
||||
return os_Chown(name, int(stat.Uid), int(stat.Gid))
|
||||
}
|
||||
18
Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/example_test.go
generated
vendored
Normal file
18
Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/example_test.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
package lumberjack_test
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/natefinch/lumberjack"
|
||||
)
|
||||
|
||||
// To use lumberjack with the standard library's log package, just pass it into
|
||||
// the SetOutput function when your application starts.
|
||||
func Example() {
|
||||
log.SetOutput(&lumberjack.Logger{
|
||||
Filename: "/var/log/myapp/foo.log",
|
||||
MaxSize: 500, // megabytes
|
||||
MaxBackups: 3,
|
||||
MaxAge: 28, // days
|
||||
})
|
||||
}
|
||||
104
Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/linux_test.go
generated
vendored
Normal file
104
Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/linux_test.go
generated
vendored
Normal file
@ -0,0 +1,104 @@
|
||||
// +build linux
|
||||
|
||||
package lumberjack
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMaintainMode(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
dir := makeTempDir("TestMaintainMode", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
filename := logFile(dir)
|
||||
|
||||
mode := os.FileMode(0770)
|
||||
f, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, mode)
|
||||
isNil(err, t)
|
||||
f.Close()
|
||||
|
||||
l := &Logger{
|
||||
Filename: filename,
|
||||
MaxBackups: 1,
|
||||
MaxSize: 100, // megabytes
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
err = l.Rotate()
|
||||
isNil(err, t)
|
||||
|
||||
filename2 := backupFile(dir)
|
||||
info, err := os.Stat(filename)
|
||||
isNil(err, t)
|
||||
info2, err := os.Stat(filename2)
|
||||
isNil(err, t)
|
||||
equals(mode, info.Mode(), t)
|
||||
equals(mode, info2.Mode(), t)
|
||||
}
|
||||
|
||||
func TestMaintainOwner(t *testing.T) {
|
||||
fakeC := fakeChown{}
|
||||
os_Chown = fakeC.Set
|
||||
os_Stat = fakeStat
|
||||
defer func() {
|
||||
os_Chown = os.Chown
|
||||
os_Stat = os.Stat
|
||||
}()
|
||||
currentTime = fakeTime
|
||||
dir := makeTempDir("TestMaintainOwner", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
filename := logFile(dir)
|
||||
|
||||
l := &Logger{
|
||||
Filename: filename,
|
||||
MaxBackups: 1,
|
||||
MaxSize: 100, // megabytes
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
err = l.Rotate()
|
||||
isNil(err, t)
|
||||
|
||||
equals(555, fakeC.uid, t)
|
||||
equals(666, fakeC.gid, t)
|
||||
}
|
||||
|
||||
type fakeChown struct {
|
||||
name string
|
||||
uid int
|
||||
gid int
|
||||
}
|
||||
|
||||
func (f *fakeChown) Set(name string, uid, gid int) error {
|
||||
f.name = name
|
||||
f.uid = uid
|
||||
f.gid = gid
|
||||
return nil
|
||||
}
|
||||
|
||||
func fakeStat(name string) (os.FileInfo, error) {
|
||||
info, err := os.Stat(name)
|
||||
if err != nil {
|
||||
return info, err
|
||||
}
|
||||
stat := info.Sys().(*syscall.Stat_t)
|
||||
stat.Uid = 555
|
||||
stat.Gid = 666
|
||||
return info, nil
|
||||
}
|
||||
415
Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/lumberjack.go
generated
vendored
Normal file
415
Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/lumberjack.go
generated
vendored
Normal file
@ -0,0 +1,415 @@
|
||||
// Package lumberjack provides a rolling logger.
|
||||
//
|
||||
// Note that this is v2.0 of lumberjack, and should be imported using gopkg.in
|
||||
// thusly:
|
||||
//
|
||||
// import "gopkg.in/natefinch/lumberjack.v2"
|
||||
//
|
||||
// The package name remains simply lumberjack, and the code resides at
|
||||
// https://github.com/natefinch/lumberjack under the v2.0 branch.
|
||||
//
|
||||
// Lumberjack is intended to be one part of a logging infrastructure.
|
||||
// It is not an all-in-one solution, but instead is a pluggable
|
||||
// component at the bottom of the logging stack that simply controls the files
|
||||
// to which logs are written.
|
||||
//
|
||||
// Lumberjack plays well with any logging package that can write to an
|
||||
// io.Writer, including the standard library's log package.
|
||||
//
|
||||
// Lumberjack assumes that only one process is writing to the output files.
|
||||
// Using the same lumberjack configuration from multiple processes on the same
|
||||
// machine will result in improper behavior.
|
||||
package lumberjack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
backupTimeFormat = "2006-01-02T15-04-05.000"
|
||||
defaultMaxSize = 100
|
||||
)
|
||||
|
||||
// ensure we always implement io.WriteCloser
|
||||
var _ io.WriteCloser = (*Logger)(nil)
|
||||
|
||||
// Logger is an io.WriteCloser that writes to the specified filename.
|
||||
//
|
||||
// Logger opens or creates the logfile on first Write. If the file exists and
|
||||
// is less than MaxSize megabytes, lumberjack will open and append to that file.
|
||||
// If the file exists and its size is >= MaxSize megabytes, the file is renamed
|
||||
// by putting the current time in a timestamp in the name immediately before the
|
||||
// file's extension (or the end of the filename if there's no extension). A new
|
||||
// log file is then created using original filename.
|
||||
//
|
||||
// Whenever a write would cause the current log file exceed MaxSize megabytes,
|
||||
// the current file is closed, renamed, and a new log file created with the
|
||||
// original name. Thus, the filename you give Logger is always the "current" log
|
||||
// file.
|
||||
//
|
||||
// Cleaning Up Old Log Files
|
||||
//
|
||||
// Whenever a new logfile gets created, old log files may be deleted. The most
|
||||
// recent files according to the encoded timestamp will be retained, up to a
|
||||
// number equal to MaxBackups (or all of them if MaxBackups is 0). Any files
|
||||
// with an encoded timestamp older than MaxAge days are deleted, regardless of
|
||||
// MaxBackups. Note that the time encoded in the timestamp is the rotation
|
||||
// time, which may differ from the last time that file was written to.
|
||||
//
|
||||
// If MaxBackups and MaxAge are both 0, no old log files will be deleted.
|
||||
type Logger struct {
|
||||
// Filename is the file to write logs to. Backup log files will be retained
|
||||
// in the same directory. It uses <processname>-lumberjack.log in
|
||||
// os.TempDir() if empty.
|
||||
Filename string `json:"filename" yaml:"filename"`
|
||||
|
||||
// MaxSize is the maximum size in megabytes of the log file before it gets
|
||||
// rotated. It defaults to 100 megabytes.
|
||||
MaxSize int `json:"maxsize" yaml:"maxsize"`
|
||||
|
||||
// MaxAge is the maximum number of days to retain old log files based on the
|
||||
// timestamp encoded in their filename. Note that a day is defined as 24
|
||||
// hours and may not exactly correspond to calendar days due to daylight
|
||||
// savings, leap seconds, etc. The default is not to remove old log files
|
||||
// based on age.
|
||||
MaxAge int `json:"maxage" yaml:"maxage"`
|
||||
|
||||
// MaxBackups is the maximum number of old log files to retain. The default
|
||||
// is to retain all old log files (though MaxAge may still cause them to get
|
||||
// deleted.)
|
||||
MaxBackups int `json:"maxbackups" yaml:"maxbackups"`
|
||||
|
||||
// LocalTime determines if the time used for formatting the timestamps in
|
||||
// backup files is the computer's local time. The default is to use UTC
|
||||
// time.
|
||||
LocalTime bool `json:"localtime" yaml:"localtime"`
|
||||
|
||||
size int64
|
||||
file *os.File
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
var (
|
||||
// currentTime exists so it can be mocked out by tests.
|
||||
currentTime = time.Now
|
||||
|
||||
// os_Stat exists so it can be mocked out by tests.
|
||||
os_Stat = os.Stat
|
||||
|
||||
// megabyte is the conversion factor between MaxSize and bytes. It is a
|
||||
// variable so tests can mock it out and not need to write megabytes of data
|
||||
// to disk.
|
||||
megabyte = 1024 * 1024
|
||||
)
|
||||
|
||||
// Write implements io.Writer. If a write would cause the log file to be larger
|
||||
// than MaxSize, the file is closed, renamed to include a timestamp of the
|
||||
// current time, and a new log file is created using the original log file name.
|
||||
// If the length of the write is greater than MaxSize, an error is returned.
|
||||
func (l *Logger) Write(p []byte) (n int, err error) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
|
||||
writeLen := int64(len(p))
|
||||
if writeLen > l.max() {
|
||||
return 0, fmt.Errorf(
|
||||
"write length %d exceeds maximum file size %d", writeLen, l.max(),
|
||||
)
|
||||
}
|
||||
|
||||
if l.file == nil {
|
||||
if err = l.openExistingOrNew(len(p)); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if l.size+writeLen > l.max() {
|
||||
if err := l.rotate(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
n, err = l.file.Write(p)
|
||||
l.size += int64(n)
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close implements io.Closer, and closes the current logfile.
|
||||
func (l *Logger) Close() error {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
return l.close()
|
||||
}
|
||||
|
||||
// close closes the file if it is open.
|
||||
func (l *Logger) close() error {
|
||||
if l.file == nil {
|
||||
return nil
|
||||
}
|
||||
err := l.file.Close()
|
||||
l.file = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// Rotate causes Logger to close the existing log file and immediately create a
|
||||
// new one. This is a helper function for applications that want to initiate
|
||||
// rotations outside of the normal rotation rules, such as in response to
|
||||
// SIGHUP. After rotating, this initiates a cleanup of old log files according
|
||||
// to the normal rules.
|
||||
func (l *Logger) Rotate() error {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
return l.rotate()
|
||||
}
|
||||
|
||||
// rotate closes the current file, moves it aside with a timestamp in the name,
|
||||
// (if it exists), opens a new file with the original filename, and then runs
|
||||
// cleanup.
|
||||
func (l *Logger) rotate() error {
|
||||
if err := l.close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := l.openNew(); err != nil {
|
||||
return err
|
||||
}
|
||||
return l.cleanup()
|
||||
}
|
||||
|
||||
// openNew opens a new log file for writing, moving any old log file out of the
|
||||
// way. This methods assumes the file has already been closed.
|
||||
func (l *Logger) openNew() error {
|
||||
err := os.MkdirAll(l.dir(), 0744)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't make directories for new logfile: %s", err)
|
||||
}
|
||||
|
||||
name := l.filename()
|
||||
mode := os.FileMode(0644)
|
||||
info, err := os_Stat(name)
|
||||
if err == nil {
|
||||
// Copy the mode off the old logfile.
|
||||
mode = info.Mode()
|
||||
// move the existing file
|
||||
newname := backupName(name, l.LocalTime)
|
||||
if err := os.Rename(name, newname); err != nil {
|
||||
return fmt.Errorf("can't rename log file: %s", err)
|
||||
}
|
||||
|
||||
// this is a no-op anywhere but linux
|
||||
if err := chown(name, info); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// we use truncate here because this should only get called when we've moved
|
||||
// the file ourselves. if someone else creates the file in the meantime,
|
||||
// just wipe out the contents.
|
||||
f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't open new logfile: %s", err)
|
||||
}
|
||||
l.file = f
|
||||
l.size = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// backupName creates a new filename from the given name, inserting a timestamp
|
||||
// between the filename and the extension, using the local time if requested
|
||||
// (otherwise UTC).
|
||||
func backupName(name string, local bool) string {
|
||||
dir := filepath.Dir(name)
|
||||
filename := filepath.Base(name)
|
||||
ext := filepath.Ext(filename)
|
||||
prefix := filename[:len(filename)-len(ext)]
|
||||
t := currentTime()
|
||||
if !local {
|
||||
t = t.UTC()
|
||||
}
|
||||
|
||||
timestamp := t.Format(backupTimeFormat)
|
||||
return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext))
|
||||
}
|
||||
|
||||
// openExistingOrNew opens the logfile if it exists and if the current write
|
||||
// would not put it over MaxSize. If there is no such file or the write would
|
||||
// put it over the MaxSize, a new file is created.
|
||||
func (l *Logger) openExistingOrNew(writeLen int) error {
|
||||
filename := l.filename()
|
||||
info, err := os_Stat(filename)
|
||||
if os.IsNotExist(err) {
|
||||
return l.openNew()
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting log file info: %s", err)
|
||||
}
|
||||
// the first file we find that matches our pattern will be the most
|
||||
// recently modified log file.
|
||||
if info.Size()+int64(writeLen) < l.max() {
|
||||
file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644)
|
||||
if err == nil {
|
||||
l.file = file
|
||||
l.size = info.Size()
|
||||
return nil
|
||||
}
|
||||
// if we fail to open the old log file for some reason, just ignore
|
||||
// it and open a new log file.
|
||||
}
|
||||
return l.openNew()
|
||||
}
|
||||
|
||||
// genFilename generates the name of the logfile from the current time.
|
||||
func (l *Logger) filename() string {
|
||||
if l.Filename != "" {
|
||||
return l.Filename
|
||||
}
|
||||
name := filepath.Base(os.Args[0]) + "-lumberjack.log"
|
||||
return filepath.Join(os.TempDir(), name)
|
||||
}
|
||||
|
||||
// cleanup deletes old log files, keeping at most l.MaxBackups files, as long as
|
||||
// none of them are older than MaxAge.
|
||||
func (l *Logger) cleanup() error {
|
||||
if l.MaxBackups == 0 && l.MaxAge == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
files, err := l.oldLogFiles()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var deletes []logInfo
|
||||
|
||||
if l.MaxBackups > 0 && l.MaxBackups < len(files) {
|
||||
deletes = files[l.MaxBackups:]
|
||||
files = files[:l.MaxBackups]
|
||||
}
|
||||
if l.MaxAge > 0 {
|
||||
diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge))
|
||||
|
||||
cutoff := currentTime().Add(-1 * diff)
|
||||
|
||||
for _, f := range files {
|
||||
if f.timestamp.Before(cutoff) {
|
||||
deletes = append(deletes, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(deletes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
go deleteAll(l.dir(), deletes)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteAll(dir string, files []logInfo) {
|
||||
// remove files on a separate goroutine
|
||||
for _, f := range files {
|
||||
// what am I going to do, log this?
|
||||
_ = os.Remove(filepath.Join(dir, f.Name()))
|
||||
}
|
||||
}
|
||||
|
||||
// oldLogFiles returns the list of backup log files stored in the same
|
||||
// directory as the current log file, sorted by ModTime
|
||||
func (l *Logger) oldLogFiles() ([]logInfo, error) {
|
||||
files, err := ioutil.ReadDir(l.dir())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't read log file directory: %s", err)
|
||||
}
|
||||
logFiles := []logInfo{}
|
||||
|
||||
prefix, ext := l.prefixAndExt()
|
||||
|
||||
for _, f := range files {
|
||||
if f.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
name := l.timeFromName(f.Name(), prefix, ext)
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
t, err := time.Parse(backupTimeFormat, name)
|
||||
if err == nil {
|
||||
logFiles = append(logFiles, logInfo{t, f})
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(byFormatTime(logFiles))
|
||||
|
||||
return logFiles, nil
|
||||
}
|
||||
|
||||
// timeFromName extracts the formatted time from the filename by stripping off
|
||||
// the filename's prefix and extension. This prevents someone's filename from
|
||||
// confusing time.parse.
|
||||
func (l *Logger) timeFromName(filename, prefix, ext string) string {
|
||||
if !strings.HasPrefix(filename, prefix) {
|
||||
return ""
|
||||
}
|
||||
filename = filename[len(prefix):]
|
||||
|
||||
if !strings.HasSuffix(filename, ext) {
|
||||
return ""
|
||||
}
|
||||
filename = filename[:len(filename)-len(ext)]
|
||||
return filename
|
||||
}
|
||||
|
||||
// max returns the maximum size in bytes of log files before rolling.
|
||||
func (l *Logger) max() int64 {
|
||||
if l.MaxSize == 0 {
|
||||
return int64(defaultMaxSize * megabyte)
|
||||
}
|
||||
return int64(l.MaxSize) * int64(megabyte)
|
||||
}
|
||||
|
||||
// dir returns the directory for the current filename.
|
||||
func (l *Logger) dir() string {
|
||||
return filepath.Dir(l.filename())
|
||||
}
|
||||
|
||||
// prefixAndExt returns the filename part and extension part from the Logger's
|
||||
// filename.
|
||||
func (l *Logger) prefixAndExt() (prefix, ext string) {
|
||||
filename := filepath.Base(l.filename())
|
||||
ext = filepath.Ext(filename)
|
||||
prefix = filename[:len(filename)-len(ext)] + "-"
|
||||
return prefix, ext
|
||||
}
|
||||
|
||||
// logInfo is a convenience struct to return the filename and its embedded
|
||||
// timestamp.
|
||||
type logInfo struct {
|
||||
timestamp time.Time
|
||||
os.FileInfo
|
||||
}
|
||||
|
||||
// byFormatTime sorts by newest time formatted in the name.
|
||||
type byFormatTime []logInfo
|
||||
|
||||
func (b byFormatTime) Less(i, j int) bool {
|
||||
return b[i].timestamp.After(b[j].timestamp)
|
||||
}
|
||||
|
||||
func (b byFormatTime) Swap(i, j int) {
|
||||
b[i], b[j] = b[j], b[i]
|
||||
}
|
||||
|
||||
func (b byFormatTime) Len() int {
|
||||
return len(b)
|
||||
}
|
||||
634
Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/lumberjack_test.go
generated
vendored
Normal file
634
Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/lumberjack_test.go
generated
vendored
Normal file
@ -0,0 +1,634 @@
|
||||
package lumberjack
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"gopkg.in/yaml.v1"
|
||||
)
|
||||
|
||||
// !!!NOTE!!!
|
||||
//
|
||||
// Running these tests in parallel will almost certainly cause sporadic (or even
|
||||
// regular) failures, because they're all messing with the same global variable
|
||||
// that controls the logic's mocked time.Now. So... don't do that.
|
||||
|
||||
// Since all the tests uses the time to determine filenames etc, we need to
|
||||
// control the wall clock as much as possible, which means having a wall clock
|
||||
// that doesn't change unless we want it to.
|
||||
var fakeCurrentTime = time.Now()
|
||||
|
||||
func fakeTime() time.Time {
|
||||
return fakeCurrentTime
|
||||
}
|
||||
|
||||
func TestNewFile(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
|
||||
dir := makeTempDir("TestNewFile", t)
|
||||
defer os.RemoveAll(dir)
|
||||
l := &Logger{
|
||||
Filename: logFile(dir),
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
existsWithLen(logFile(dir), n, t)
|
||||
fileCount(dir, 1, t)
|
||||
}
|
||||
|
||||
func TestOpenExisting(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
dir := makeTempDir("TestOpenExisting", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
filename := logFile(dir)
|
||||
data := []byte("foo!")
|
||||
err := ioutil.WriteFile(filename, data, 0644)
|
||||
isNil(err, t)
|
||||
existsWithLen(filename, len(data), t)
|
||||
|
||||
l := &Logger{
|
||||
Filename: filename,
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
|
||||
// make sure the file got appended
|
||||
existsWithLen(filename, len(data)+n, t)
|
||||
|
||||
// make sure no other files were created
|
||||
fileCount(dir, 1, t)
|
||||
}
|
||||
|
||||
func TestWriteTooLong(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
megabyte = 1
|
||||
dir := makeTempDir("TestWriteTooLong", t)
|
||||
defer os.RemoveAll(dir)
|
||||
l := &Logger{
|
||||
Filename: logFile(dir),
|
||||
MaxSize: 5,
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("booooooooooooooo!")
|
||||
n, err := l.Write(b)
|
||||
notNil(err, t)
|
||||
equals(0, n, t)
|
||||
equals(err.Error(),
|
||||
fmt.Sprintf("write length %d exceeds maximum file size %d", len(b), l.MaxSize), t)
|
||||
_, err = os.Stat(logFile(dir))
|
||||
assert(os.IsNotExist(err), t, "File exists, but should not have been created")
|
||||
}
|
||||
|
||||
func TestMakeLogDir(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
dir := time.Now().Format("TestMakeLogDir" + backupTimeFormat)
|
||||
dir = filepath.Join(os.TempDir(), dir)
|
||||
defer os.RemoveAll(dir)
|
||||
filename := logFile(dir)
|
||||
l := &Logger{
|
||||
Filename: filename,
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
existsWithLen(logFile(dir), n, t)
|
||||
fileCount(dir, 1, t)
|
||||
}
|
||||
|
||||
func TestDefaultFilename(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
dir := os.TempDir()
|
||||
filename := filepath.Join(dir, filepath.Base(os.Args[0])+"-lumberjack.log")
|
||||
defer os.Remove(filename)
|
||||
l := &Logger{}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
existsWithLen(filename, n, t)
|
||||
}
|
||||
|
||||
func TestAutoRotate(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
megabyte = 1
|
||||
|
||||
dir := makeTempDir("TestAutoRotate", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
filename := logFile(dir)
|
||||
l := &Logger{
|
||||
Filename: filename,
|
||||
MaxSize: 10,
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
|
||||
existsWithLen(filename, n, t)
|
||||
fileCount(dir, 1, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
b2 := []byte("foooooo!")
|
||||
n, err = l.Write(b2)
|
||||
isNil(err, t)
|
||||
equals(len(b2), n, t)
|
||||
|
||||
// the old logfile should be moved aside and the main logfile should have
|
||||
// only the last write in it.
|
||||
existsWithLen(filename, n, t)
|
||||
|
||||
// the backup file will use the current fake time and have the old contents.
|
||||
existsWithLen(backupFile(dir), len(b), t)
|
||||
|
||||
fileCount(dir, 2, t)
|
||||
}
|
||||
|
||||
func TestFirstWriteRotate(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
megabyte = 1
|
||||
dir := makeTempDir("TestFirstWriteRotate", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
filename := logFile(dir)
|
||||
l := &Logger{
|
||||
Filename: filename,
|
||||
MaxSize: 10,
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
start := []byte("boooooo!")
|
||||
err := ioutil.WriteFile(filename, start, 0600)
|
||||
isNil(err, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
// this would make us rotate
|
||||
b := []byte("fooo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
|
||||
existsWithLen(filename, n, t)
|
||||
existsWithLen(backupFile(dir), len(start), t)
|
||||
|
||||
fileCount(dir, 2, t)
|
||||
}
|
||||
|
||||
func TestMaxBackups(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
megabyte = 1
|
||||
dir := makeTempDir("TestMaxBackups", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
filename := logFile(dir)
|
||||
l := &Logger{
|
||||
Filename: filename,
|
||||
MaxSize: 10,
|
||||
MaxBackups: 1,
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
|
||||
existsWithLen(filename, n, t)
|
||||
fileCount(dir, 1, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
// this will put us over the max
|
||||
b2 := []byte("foooooo!")
|
||||
n, err = l.Write(b2)
|
||||
isNil(err, t)
|
||||
equals(len(b2), n, t)
|
||||
|
||||
// this will use the new fake time
|
||||
secondFilename := backupFile(dir)
|
||||
existsWithLen(secondFilename, len(b), t)
|
||||
|
||||
// make sure the old file still exists with the same size.
|
||||
existsWithLen(filename, n, t)
|
||||
|
||||
fileCount(dir, 2, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
// this will make us rotate again
|
||||
n, err = l.Write(b2)
|
||||
isNil(err, t)
|
||||
equals(len(b2), n, t)
|
||||
|
||||
// this will use the new fake time
|
||||
thirdFilename := backupFile(dir)
|
||||
existsWithLen(thirdFilename, len(b2), t)
|
||||
|
||||
existsWithLen(filename, n, t)
|
||||
|
||||
// we need to wait a little bit since the files get deleted on a different
|
||||
// goroutine.
|
||||
<-time.After(time.Millisecond * 10)
|
||||
|
||||
// should only have two files in the dir still
|
||||
fileCount(dir, 2, t)
|
||||
|
||||
// second file name should still exist
|
||||
existsWithLen(thirdFilename, len(b2), t)
|
||||
|
||||
// should have deleted the first backup
|
||||
notExist(secondFilename, t)
|
||||
|
||||
// now test that we don't delete directories or non-logfile files
|
||||
|
||||
newFakeTime()
|
||||
|
||||
// create a file that is close to but different from the logfile name.
|
||||
// It shouldn't get caught by our deletion filters.
|
||||
notlogfile := logFile(dir) + ".foo"
|
||||
err = ioutil.WriteFile(notlogfile, []byte("data"), 0644)
|
||||
isNil(err, t)
|
||||
|
||||
// Make a directory that exactly matches our log file filters... it still
|
||||
// shouldn't get caught by the deletion filter since it's a directory.
|
||||
notlogfiledir := backupFile(dir)
|
||||
err = os.Mkdir(notlogfiledir, 0700)
|
||||
isNil(err, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
// this will make us rotate again
|
||||
n, err = l.Write(b2)
|
||||
isNil(err, t)
|
||||
equals(len(b2), n, t)
|
||||
|
||||
// this will use the new fake time
|
||||
fourthFilename := backupFile(dir)
|
||||
existsWithLen(fourthFilename, len(b2), t)
|
||||
|
||||
// we need to wait a little bit since the files get deleted on a different
|
||||
// goroutine.
|
||||
<-time.After(time.Millisecond * 10)
|
||||
|
||||
// We should have four things in the directory now - the 2 log files, the
|
||||
// not log file, and the directory
|
||||
fileCount(dir, 4, t)
|
||||
|
||||
// third file name should still exist
|
||||
existsWithLen(filename, n, t)
|
||||
|
||||
existsWithLen(fourthFilename, len(b2), t)
|
||||
|
||||
// should have deleted the first filename
|
||||
notExist(thirdFilename, t)
|
||||
|
||||
// the not-a-logfile should still exist
|
||||
exists(notlogfile, t)
|
||||
|
||||
// the directory
|
||||
exists(notlogfiledir, t)
|
||||
}
|
||||
|
||||
func TestMaxAge(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
megabyte = 1
|
||||
|
||||
dir := makeTempDir("TestMaxAge", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
filename := logFile(dir)
|
||||
l := &Logger{
|
||||
Filename: filename,
|
||||
MaxSize: 10,
|
||||
MaxAge: 1,
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
|
||||
existsWithLen(filename, n, t)
|
||||
fileCount(dir, 1, t)
|
||||
|
||||
// two days later
|
||||
newFakeTime()
|
||||
|
||||
b2 := []byte("foooooo!")
|
||||
n, err = l.Write(b2)
|
||||
isNil(err, t)
|
||||
equals(len(b2), n, t)
|
||||
existsWithLen(backupFile(dir), len(b), t)
|
||||
|
||||
// we need to wait a little bit since the files get deleted on a different
|
||||
// goroutine.
|
||||
<-time.After(10 * time.Millisecond)
|
||||
|
||||
// We should still have 2 log files, since the most recent backup was just
|
||||
// created.
|
||||
fileCount(dir, 2, t)
|
||||
|
||||
existsWithLen(filename, len(b2), t)
|
||||
|
||||
// we should have deleted the old file due to being too old
|
||||
existsWithLen(backupFile(dir), len(b), t)
|
||||
|
||||
// two days later
|
||||
newFakeTime()
|
||||
|
||||
b3 := []byte("foooooo!")
|
||||
n, err = l.Write(b2)
|
||||
isNil(err, t)
|
||||
equals(len(b3), n, t)
|
||||
existsWithLen(backupFile(dir), len(b2), t)
|
||||
|
||||
// we need to wait a little bit since the files get deleted on a different
|
||||
// goroutine.
|
||||
<-time.After(10 * time.Millisecond)
|
||||
|
||||
// We should have 2 log files - the main log file, and the most recent
|
||||
// backup. The earlier backup is past the cutoff and should be gone.
|
||||
fileCount(dir, 2, t)
|
||||
|
||||
existsWithLen(filename, len(b3), t)
|
||||
|
||||
// we should have deleted the old file due to being too old
|
||||
existsWithLen(backupFile(dir), len(b2), t)
|
||||
|
||||
}
|
||||
|
||||
func TestOldLogFiles(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
megabyte = 1
|
||||
|
||||
dir := makeTempDir("TestOldLogFiles", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
filename := logFile(dir)
|
||||
data := []byte("data")
|
||||
err := ioutil.WriteFile(filename, data, 07)
|
||||
isNil(err, t)
|
||||
|
||||
// This gives us a time with the same precision as the time we get from the
|
||||
// timestamp in the name.
|
||||
t1, err := time.Parse(backupTimeFormat, fakeTime().UTC().Format(backupTimeFormat))
|
||||
isNil(err, t)
|
||||
|
||||
backup := backupFile(dir)
|
||||
err = ioutil.WriteFile(backup, data, 07)
|
||||
isNil(err, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
t2, err := time.Parse(backupTimeFormat, fakeTime().UTC().Format(backupTimeFormat))
|
||||
isNil(err, t)
|
||||
|
||||
backup2 := backupFile(dir)
|
||||
err = ioutil.WriteFile(backup2, data, 07)
|
||||
isNil(err, t)
|
||||
|
||||
l := &Logger{Filename: filename}
|
||||
files, err := l.oldLogFiles()
|
||||
isNil(err, t)
|
||||
equals(2, len(files), t)
|
||||
|
||||
// should be sorted by newest file first, which would be t2
|
||||
equals(t2, files[0].timestamp, t)
|
||||
equals(t1, files[1].timestamp, t)
|
||||
}
|
||||
|
||||
func TestTimeFromName(t *testing.T) {
|
||||
l := &Logger{Filename: "/var/log/myfoo/foo.log"}
|
||||
prefix, ext := l.prefixAndExt()
|
||||
val := l.timeFromName("foo-2014-05-04T14-44-33.555.log", prefix, ext)
|
||||
equals("2014-05-04T14-44-33.555", val, t)
|
||||
|
||||
val = l.timeFromName("foo-2014-05-04T14-44-33.555", prefix, ext)
|
||||
equals("", val, t)
|
||||
|
||||
val = l.timeFromName("2014-05-04T14-44-33.555.log", prefix, ext)
|
||||
equals("", val, t)
|
||||
|
||||
val = l.timeFromName("foo.log", prefix, ext)
|
||||
equals("", val, t)
|
||||
}
|
||||
|
||||
func TestLocalTime(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
megabyte = 1
|
||||
|
||||
dir := makeTempDir("TestLocalTime", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
l := &Logger{
|
||||
Filename: logFile(dir),
|
||||
MaxSize: 10,
|
||||
LocalTime: true,
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
|
||||
b2 := []byte("fooooooo!")
|
||||
n2, err := l.Write(b2)
|
||||
isNil(err, t)
|
||||
equals(len(b2), n2, t)
|
||||
|
||||
existsWithLen(logFile(dir), n2, t)
|
||||
existsWithLen(backupFileLocal(dir), n, t)
|
||||
}
|
||||
|
||||
func TestRotate(t *testing.T) {
|
||||
currentTime = fakeTime
|
||||
dir := makeTempDir("TestRotate", t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
filename := logFile(dir)
|
||||
|
||||
l := &Logger{
|
||||
Filename: filename,
|
||||
MaxBackups: 1,
|
||||
MaxSize: 100, // megabytes
|
||||
}
|
||||
defer l.Close()
|
||||
b := []byte("boo!")
|
||||
n, err := l.Write(b)
|
||||
isNil(err, t)
|
||||
equals(len(b), n, t)
|
||||
|
||||
existsWithLen(filename, n, t)
|
||||
fileCount(dir, 1, t)
|
||||
|
||||
newFakeTime()
|
||||
|
||||
err = l.Rotate()
|
||||
isNil(err, t)
|
||||
|
||||
// we need to wait a little bit since the files get deleted on a different
|
||||
// goroutine.
|
||||
<-time.After(10 * time.Millisecond)
|
||||
|
||||
filename2 := backupFile(dir)
|
||||
existsWithLen(filename2, n, t)
|
||||
existsWithLen(filename, 0, t)
|
||||
fileCount(dir, 2, t)
|
||||
newFakeTime()
|
||||
|
||||
err = l.Rotate()
|
||||
isNil(err, t)
|
||||
|
||||
// we need to wait a little bit since the files get deleted on a different
|
||||
// goroutine.
|
||||
<-time.After(10 * time.Millisecond)
|
||||
|
||||
filename3 := backupFile(dir)
|
||||
existsWithLen(filename3, 0, t)
|
||||
existsWithLen(filename, 0, t)
|
||||
fileCount(dir, 2, t)
|
||||
|
||||
b2 := []byte("foooooo!")
|
||||
n, err = l.Write(b2)
|
||||
isNil(err, t)
|
||||
equals(len(b2), n, t)
|
||||
|
||||
// this will use the new fake time
|
||||
existsWithLen(filename, n, t)
|
||||
}
|
||||
|
||||
func TestJson(t *testing.T) {
|
||||
data := []byte(`
|
||||
{
|
||||
"filename": "foo",
|
||||
"maxsize": 5,
|
||||
"maxage": 10,
|
||||
"maxbackups": 3,
|
||||
"localtime": true
|
||||
}`[1:])
|
||||
|
||||
l := Logger{}
|
||||
err := json.Unmarshal(data, &l)
|
||||
isNil(err, t)
|
||||
equals("foo", l.Filename, t)
|
||||
equals(5, l.MaxSize, t)
|
||||
equals(10, l.MaxAge, t)
|
||||
equals(3, l.MaxBackups, t)
|
||||
equals(true, l.LocalTime, t)
|
||||
}
|
||||
|
||||
func TestYaml(t *testing.T) {
|
||||
data := []byte(`
|
||||
filename: foo
|
||||
maxsize: 5
|
||||
maxage: 10
|
||||
maxbackups: 3
|
||||
localtime: true`[1:])
|
||||
|
||||
l := Logger{}
|
||||
err := yaml.Unmarshal(data, &l)
|
||||
isNil(err, t)
|
||||
equals("foo", l.Filename, t)
|
||||
equals(5, l.MaxSize, t)
|
||||
equals(10, l.MaxAge, t)
|
||||
equals(3, l.MaxBackups, t)
|
||||
equals(true, l.LocalTime, t)
|
||||
}
|
||||
|
||||
func TestToml(t *testing.T) {
|
||||
data := `
|
||||
filename = "foo"
|
||||
maxsize = 5
|
||||
maxage = 10
|
||||
maxbackups = 3
|
||||
localtime = true`[1:]
|
||||
|
||||
l := Logger{}
|
||||
md, err := toml.Decode(data, &l)
|
||||
isNil(err, t)
|
||||
equals("foo", l.Filename, t)
|
||||
equals(5, l.MaxSize, t)
|
||||
equals(10, l.MaxAge, t)
|
||||
equals(3, l.MaxBackups, t)
|
||||
equals(true, l.LocalTime, t)
|
||||
equals(0, len(md.Undecoded()), t)
|
||||
}
|
||||
|
||||
// makeTempDir creates a file with a semi-unique name in the OS temp directory.
|
||||
// It should be based on the name of the test, to keep parallel tests from
|
||||
// colliding, and must be cleaned up after the test is finished.
|
||||
func makeTempDir(name string, t testing.TB) string {
|
||||
dir := time.Now().Format(name + backupTimeFormat)
|
||||
dir = filepath.Join(os.TempDir(), dir)
|
||||
isNilUp(os.Mkdir(dir, 0777), t, 1)
|
||||
return dir
|
||||
}
|
||||
|
||||
// existsWithLen checks that the given file exists and has the correct length.
|
||||
func existsWithLen(path string, length int, t testing.TB) {
|
||||
info, err := os.Stat(path)
|
||||
isNilUp(err, t, 1)
|
||||
equalsUp(int64(length), info.Size(), t, 1)
|
||||
}
|
||||
|
||||
// logFile returns the log file name in the given directory for the current fake
|
||||
// time.
|
||||
func logFile(dir string) string {
|
||||
return filepath.Join(dir, "foobar.log")
|
||||
}
|
||||
|
||||
func backupFile(dir string) string {
|
||||
return filepath.Join(dir, "foobar-"+fakeTime().UTC().Format(backupTimeFormat)+".log")
|
||||
}
|
||||
|
||||
func backupFileLocal(dir string) string {
|
||||
return filepath.Join(dir, "foobar-"+fakeTime().Format(backupTimeFormat)+".log")
|
||||
}
|
||||
|
||||
// logFileLocal returns the log file name in the given directory for the current
|
||||
// fake time using the local timezone.
|
||||
func logFileLocal(dir string) string {
|
||||
return filepath.Join(dir, fakeTime().Format(backupTimeFormat))
|
||||
}
|
||||
|
||||
// fileCount checks that the number of files in the directory is exp.
|
||||
func fileCount(dir string, exp int, t testing.TB) {
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
isNilUp(err, t, 1)
|
||||
// Make sure no other files were created.
|
||||
equalsUp(exp, len(files), t, 1)
|
||||
}
|
||||
|
||||
// newFakeTime sets the fake "current time" to two days later.
|
||||
func newFakeTime() {
|
||||
fakeCurrentTime = fakeCurrentTime.Add(time.Hour * 24 * 2)
|
||||
}
|
||||
|
||||
func notExist(path string, t testing.TB) {
|
||||
_, err := os.Stat(path)
|
||||
assertUp(os.IsNotExist(err), t, 1, "expected to get os.IsNotExist, but instead got %v", err)
|
||||
}
|
||||
|
||||
func exists(path string, t testing.TB) {
|
||||
_, err := os.Stat(path)
|
||||
assertUp(err == nil, t, 1, "expected file to exist, but got error from os.Stat: %v", err)
|
||||
}
|
||||
27
Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/rotate_test.go
generated
vendored
Normal file
27
Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/rotate_test.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
// +build linux
|
||||
|
||||
package lumberjack_test
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/natefinch/lumberjack"
|
||||
)
|
||||
|
||||
// Example of how to rotate in response to SIGHUP.
|
||||
func ExampleLogger_Rotate() {
|
||||
l := &lumberjack.Logger{}
|
||||
log.SetOutput(l)
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, syscall.SIGHUP)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
<-c
|
||||
l.Rotate()
|
||||
}
|
||||
}()
|
||||
}
|
||||
91
Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/testing_test.go
generated
vendored
Normal file
91
Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/testing_test.go
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
package lumberjack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// assert will log the given message if condition is false.
|
||||
func assert(condition bool, t testing.TB, msg string, v ...interface{}) {
|
||||
assertUp(condition, t, 1, msg, v...)
|
||||
}
|
||||
|
||||
// assertUp is like assert, but used inside helper functions, to ensure that
|
||||
// the file and line number reported by failures corresponds to one or more
|
||||
// levels up the stack.
|
||||
func assertUp(condition bool, t testing.TB, caller int, msg string, v ...interface{}) {
|
||||
if !condition {
|
||||
_, file, line, _ := runtime.Caller(caller + 1)
|
||||
v = append([]interface{}{filepath.Base(file), line}, v...)
|
||||
fmt.Printf("%s:%d: "+msg+"\n", v...)
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
// equals tests that the two values are equal according to reflect.DeepEqual.
|
||||
func equals(exp, act interface{}, t testing.TB) {
|
||||
equalsUp(exp, act, t, 1)
|
||||
}
|
||||
|
||||
// equalsUp is like equals, but used inside helper functions, to ensure that the
|
||||
// file and line number reported by failures corresponds to one or more levels
|
||||
// up the stack.
|
||||
func equalsUp(exp, act interface{}, t testing.TB, caller int) {
|
||||
if !reflect.DeepEqual(exp, act) {
|
||||
_, file, line, _ := runtime.Caller(caller + 1)
|
||||
fmt.Printf("%s:%d: exp: %v (%T), got: %v (%T)\n",
|
||||
filepath.Base(file), line, exp, exp, act, act)
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
// isNil reports a failure if the given value is not nil. Note that values
|
||||
// which cannot be nil will always fail this check.
|
||||
func isNil(obtained interface{}, t testing.TB) {
|
||||
isNilUp(obtained, t, 1)
|
||||
}
|
||||
|
||||
// isNilUp is like isNil, but used inside helper functions, to ensure that the
|
||||
// file and line number reported by failures corresponds to one or more levels
|
||||
// up the stack.
|
||||
func isNilUp(obtained interface{}, t testing.TB, caller int) {
|
||||
if !_isNil(obtained) {
|
||||
_, file, line, _ := runtime.Caller(caller + 1)
|
||||
fmt.Printf("%s:%d: expected nil, got: %v\n", filepath.Base(file), line, obtained)
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
// notNil reports a failure if the given value is nil.
|
||||
func notNil(obtained interface{}, t testing.TB) {
|
||||
notNilUp(obtained, t, 1)
|
||||
}
|
||||
|
||||
// notNilUp is like notNil, but used inside helper functions, to ensure that the
|
||||
// file and line number reported by failures corresponds to one or more levels
|
||||
// up the stack.
|
||||
func notNilUp(obtained interface{}, t testing.TB, caller int) {
|
||||
if _isNil(obtained) {
|
||||
_, file, line, _ := runtime.Caller(caller + 1)
|
||||
fmt.Printf("%s:%d: expected non-nil, got: %v\n", filepath.Base(file), line, obtained)
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
// _isNil is a helper function for isNil and notNil, and should not be used
|
||||
// directly.
|
||||
func _isNil(obtained interface{}) bool {
|
||||
if obtained == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
switch v := reflect.ValueOf(obtained); v.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return v.IsNil()
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
@ -3,9 +3,9 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
cmds "github.com/jbenet/go-ipfs/commands"
|
||||
@ -16,6 +16,7 @@ import (
|
||||
chunk "github.com/jbenet/go-ipfs/importer/chunk"
|
||||
peer "github.com/jbenet/go-ipfs/peer"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
errors "github.com/jbenet/go-ipfs/util/debugerror"
|
||||
)
|
||||
|
||||
var initCmd = &cmds.Command{
|
||||
@ -29,6 +30,11 @@ var initCmd = &cmds.Command{
|
||||
cmds.StringOption("passphrase", "p", "Passphrase for encrypting the private key"),
|
||||
cmds.BoolOption("force", "f", "Overwrite existing config (if it exists)"),
|
||||
cmds.StringOption("datastore", "d", "Location for the IPFS data store"),
|
||||
|
||||
// TODO need to decide whether to expose the override as a file or a
|
||||
// directory. That is: should we allow the user to also specify the
|
||||
// name of the file?
|
||||
// TODO cmds.StringOption("event-logs", "l", "Location for machine-readable event logs"),
|
||||
},
|
||||
Run: func(req cmds.Request) (interface{}, error) {
|
||||
|
||||
@ -97,10 +103,24 @@ func doInit(configRoot string, dspathOverride string, force bool, nBitsForKeypai
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nd, err := core.NewIpfsNode(conf, false)
|
||||
err = addTheWelcomeFile(conf, func(k u.Key) {
|
||||
fmt.Printf("\nto get started, enter: ipfs cat %s\n", k)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// addTheWelcomeFile adds a file containing the welcome message to the newly
|
||||
// minted node. On success, it calls onSuccess
|
||||
func addTheWelcomeFile(conf *config.Config, onSuccess func(u.Key)) error {
|
||||
// TODO extract this file creation operation into a function
|
||||
nd, err := core.NewIpfsNode(conf, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer nd.Close()
|
||||
|
||||
// Set up default file
|
||||
@ -108,15 +128,15 @@ func doInit(configRoot string, dspathOverride string, force bool, nBitsForKeypai
|
||||
|
||||
defnd, err := imp.BuildDagFromReader(reader, nd.DAG, nd.Pinning.GetManual(), chunk.DefaultSplitter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
k, err := defnd.Key()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to write test file: %s", err)
|
||||
return fmt.Errorf("failed to write test file: %s", err)
|
||||
}
|
||||
fmt.Printf("done.\nto test, enter: ipfs cat %s\n", k)
|
||||
return nil, nil
|
||||
onSuccess(k)
|
||||
return nil
|
||||
}
|
||||
|
||||
func datastoreConfig(dspath string) (config.Datastore, error) {
|
||||
@ -131,16 +151,9 @@ func datastoreConfig(dspath string) (config.Datastore, error) {
|
||||
ds.Path = dspath
|
||||
ds.Type = "leveldb"
|
||||
|
||||
// Construct the data store if missing
|
||||
if err := os.MkdirAll(dspath, os.ModePerm); err != nil {
|
||||
return ds, err
|
||||
}
|
||||
|
||||
// Check the directory is writeable
|
||||
if f, err := os.Create(filepath.Join(dspath, "._check_writeable")); err == nil {
|
||||
os.Remove(f.Name())
|
||||
} else {
|
||||
return ds, errors.New("Datastore '" + dspath + "' is not writeable")
|
||||
err := initCheckDir(dspath)
|
||||
if err != nil {
|
||||
return ds, errors.Errorf("datastore: %s", err)
|
||||
}
|
||||
|
||||
return ds, nil
|
||||
@ -152,7 +165,17 @@ func initConfig(configFilename string, dspathOverride string, nBitsForKeypair in
|
||||
return nil, err
|
||||
}
|
||||
|
||||
identity, err := identityConfig(nBitsForKeypair)
|
||||
identity, err := identityConfig(nBitsForKeypair, func() {
|
||||
fmt.Printf("generating key pair...")
|
||||
}, func(ident config.Identity) {
|
||||
fmt.Printf("done\n")
|
||||
fmt.Printf("peer identity: %s\n", ident.PeerID)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logConfig, err := initLogs("") // TODO allow user to override dir
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -175,6 +198,8 @@ func initConfig(configFilename string, dspathOverride string, nBitsForKeypair in
|
||||
|
||||
Datastore: ds,
|
||||
|
||||
Logs: logConfig,
|
||||
|
||||
Identity: identity,
|
||||
|
||||
// setup the node mount points.
|
||||
@ -195,14 +220,17 @@ func initConfig(configFilename string, dspathOverride string, nBitsForKeypair in
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
func identityConfig(nbits int) (config.Identity, error) {
|
||||
// identityConfig initializes a new identity. It calls onBegin when it begins
|
||||
// to generate the identity and it calls onSuccess once the operation is
|
||||
// completed successfully
|
||||
func identityConfig(nbits int, onBegin func(), onSuccess func(config.Identity)) (config.Identity, error) {
|
||||
// TODO guard higher up
|
||||
ident := config.Identity{}
|
||||
if nbits < 1024 {
|
||||
return ident, errors.New("Bitsize less than 1024 is considered unsafe.")
|
||||
}
|
||||
|
||||
fmt.Printf("generating key pair...")
|
||||
onBegin()
|
||||
sk, pk, err := ci.GenerateKeyPair(ci.RSA, nbits)
|
||||
if err != nil {
|
||||
return ident, err
|
||||
@ -221,6 +249,41 @@ func identityConfig(nbits int) (config.Identity, error) {
|
||||
return ident, err
|
||||
}
|
||||
ident.PeerID = id.Pretty()
|
||||
|
||||
onSuccess(ident)
|
||||
return ident, nil
|
||||
}
|
||||
|
||||
func initLogs(logpath string) (config.Logs, error) {
|
||||
if len(logpath) == 0 {
|
||||
var err error
|
||||
logpath, err = config.LogsPath("")
|
||||
if err != nil {
|
||||
return config.Logs{}, errors.Wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
err := initCheckDir(logpath)
|
||||
if err != nil {
|
||||
return config.Logs{}, errors.Errorf("logs: %s", err)
|
||||
}
|
||||
|
||||
return config.Logs{
|
||||
Filename: path.Join(logpath, "events.log"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// initCheckDir ensures the directory exists and is writable
|
||||
func initCheckDir(path string) error {
|
||||
// Construct the path if missing
|
||||
if err := os.MkdirAll(path, os.ModePerm); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the directory is writeable
|
||||
if f, err := os.Create(filepath.Join(path, "._check_writeable")); err == nil {
|
||||
os.Remove(f.Name())
|
||||
} else {
|
||||
return errors.New("'" + path + "' is not writeable")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -76,10 +76,11 @@ func (d *cmdDetails) String() string {
|
||||
d.canRunOnClient(), d.canRunOnDaemon(), d.usesRepo())
|
||||
}
|
||||
|
||||
func (d *cmdDetails) usesConfigAsInput() bool { return !d.doesNotUseConfigAsInput }
|
||||
func (d *cmdDetails) canRunOnClient() bool { return !d.cannotRunOnClient }
|
||||
func (d *cmdDetails) canRunOnDaemon() bool { return !d.cannotRunOnDaemon }
|
||||
func (d *cmdDetails) usesRepo() bool { return !d.doesNotUseRepo }
|
||||
func (d *cmdDetails) usesConfigAsInput() bool { return !d.doesNotUseConfigAsInput }
|
||||
func (d *cmdDetails) doesNotPreemptAutoUpdate() bool { return !d.preemptsAutoUpdate }
|
||||
func (d *cmdDetails) canRunOnClient() bool { return !d.cannotRunOnClient }
|
||||
func (d *cmdDetails) canRunOnDaemon() bool { return !d.cannotRunOnDaemon }
|
||||
func (d *cmdDetails) usesRepo() bool { return !d.doesNotUseRepo }
|
||||
|
||||
// "What is this madness!?" you ask. Our commands have the unfortunate problem of
|
||||
// not being able to run on all the same contexts. This map describes these
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@ -9,6 +8,7 @@ import (
|
||||
"runtime/pprof"
|
||||
"syscall"
|
||||
|
||||
// TODO rm direct reference to go-logging
|
||||
logging "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-logging"
|
||||
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
|
||||
manet "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/net"
|
||||
@ -21,11 +21,12 @@ import (
|
||||
daemon "github.com/jbenet/go-ipfs/daemon2"
|
||||
updates "github.com/jbenet/go-ipfs/updates"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
"github.com/jbenet/go-ipfs/util/debugerror"
|
||||
errors "github.com/jbenet/go-ipfs/util/debugerror"
|
||||
eventlog "github.com/jbenet/go-ipfs/util/eventlog"
|
||||
)
|
||||
|
||||
// log is the command logger
|
||||
var log = u.Logger("cmd/ipfs")
|
||||
var log = eventlog.Logger("cmd/ipfs")
|
||||
|
||||
// signal to output help
|
||||
var errHelpRequested = errors.New("Help Requested")
|
||||
@ -217,7 +218,7 @@ func callPreCommandHooks(details cmdDetails, req cmds.Request, root *cmds.Comman
|
||||
// check for updates when 1) commands is going to be run locally, 2) the
|
||||
// command does not initialize the config, and 3) the command does not
|
||||
// pre-empt updates
|
||||
if !daemon && details.usesConfigAsInput() && !details.preemptsAutoUpdate {
|
||||
if !daemon && details.usesConfigAsInput() && details.doesNotPreemptAutoUpdate() {
|
||||
|
||||
log.Debug("Calling hook: Check for updates")
|
||||
|
||||
@ -231,6 +232,17 @@ func callPreCommandHooks(details cmdDetails, req cmds.Request, root *cmds.Comman
|
||||
}
|
||||
}
|
||||
|
||||
// When the upcoming command may use the config and repo, we know it's safe
|
||||
// for the log config hook to touch the config/repo
|
||||
if details.usesConfigAsInput() && details.usesRepo() {
|
||||
log.Debug("Calling hook: Configure Event Logger")
|
||||
cfg, err := req.Context().GetConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
configureEventLogger(cfg)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -319,14 +331,13 @@ func commandDetails(path []string, root *cmds.Command) (*cmdDetails, error) {
|
||||
var found bool
|
||||
cmd, found = cmd.Subcommands[cmp]
|
||||
if !found {
|
||||
return nil, debugerror.Errorf("subcommand %s should be in root", cmp)
|
||||
return nil, errors.Errorf("subcommand %s should be in root", cmp)
|
||||
}
|
||||
|
||||
if cmdDetails, found := cmdDetailsMap[cmd]; found {
|
||||
details = cmdDetails
|
||||
}
|
||||
}
|
||||
log.Debugf("cmd perms for +%v: %s", path, details.String())
|
||||
return &details, nil
|
||||
}
|
||||
|
||||
@ -351,13 +362,15 @@ func commandShouldRunOnDaemon(details cmdDetails, req cmds.Request, root *cmds.C
|
||||
return false, nil
|
||||
}
|
||||
|
||||
log.Info("looking for running daemon...")
|
||||
// at this point need to know whether daemon is running. we defer
|
||||
// to this point so that some commands dont open files unnecessarily.
|
||||
daemonLocked := daemon.Locked(req.Context().ConfigRoot)
|
||||
log.Info("Daemon is running.")
|
||||
|
||||
if daemonLocked {
|
||||
|
||||
log.Info("a daemon is running...")
|
||||
|
||||
if details.cannotRunOnDaemon {
|
||||
e := "ipfs daemon is running. please stop it to run this command"
|
||||
return false, cmds.ClientError(e)
|
||||
@ -479,3 +492,24 @@ func allInterruptSignals() chan os.Signal {
|
||||
syscall.SIGTERM, syscall.SIGQUIT)
|
||||
return sigc
|
||||
}
|
||||
|
||||
func configureEventLogger(config *config.Config) error {
|
||||
|
||||
if u.Debug {
|
||||
eventlog.Configure(eventlog.LevelDebug)
|
||||
} else {
|
||||
eventlog.Configure(eventlog.LevelInfo)
|
||||
}
|
||||
|
||||
eventlog.Configure(eventlog.LdJSONFormatter)
|
||||
|
||||
rotateConf := eventlog.LogRotatorConfig{
|
||||
Filename: config.Logs.Filename,
|
||||
MaxSizeMB: config.Logs.MaxSizeMB,
|
||||
MaxBackups: config.Logs.MaxBackups,
|
||||
MaxAgeDays: config.Logs.MaxAgeDays,
|
||||
}
|
||||
|
||||
eventlog.Configure(eventlog.OutputRotatingLogFile(rotateConf))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -9,7 +9,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
"github.com/jbenet/go-ipfs/util/debugerror"
|
||||
errors "github.com/jbenet/go-ipfs/util/debugerror"
|
||||
)
|
||||
|
||||
var log = u.Logger("config")
|
||||
@ -20,6 +20,14 @@ type Identity struct {
|
||||
PrivKey string
|
||||
}
|
||||
|
||||
// Logs tracks the configuration of the event logger
|
||||
type Logs struct {
|
||||
Filename string
|
||||
MaxSizeMB uint64
|
||||
MaxBackups uint64
|
||||
MaxAgeDays uint64
|
||||
}
|
||||
|
||||
// Datastore tracks the configuration of the datastore.
|
||||
type Datastore struct {
|
||||
Type string
|
||||
@ -63,6 +71,7 @@ type Config struct {
|
||||
Version Version // local node's version management
|
||||
Bootstrap []*BootstrapPeer // local nodes's bootstrap peers
|
||||
Tour Tour // local node's tour position
|
||||
Logs Logs // local node's event log configuration
|
||||
}
|
||||
|
||||
// DefaultPathRoot is the path to the default config dir location.
|
||||
@ -77,6 +86,9 @@ const DefaultDataStoreDirectory = "datastore"
|
||||
// EnvDir is the environment variable used to change the path root.
|
||||
const EnvDir = "IPFS_DIR"
|
||||
|
||||
// LogsDefaultDirectory is the directory to store all IPFS event logs.
|
||||
var LogsDefaultDirectory = "logs"
|
||||
|
||||
// PathRoot returns the default configuration root directory
|
||||
func PathRoot() (string, error) {
|
||||
dir := os.Getenv(EnvDir)
|
||||
@ -107,6 +119,12 @@ func DataStorePath(configroot string) (string, error) {
|
||||
return Path(configroot, DefaultDataStoreDirectory)
|
||||
}
|
||||
|
||||
// LogsPath returns the default path for event logs given a configuration root
|
||||
// (set an empty string to have the default configuration root)
|
||||
func LogsPath(configroot string) (string, error) {
|
||||
return Path(configroot, LogsDefaultDirectory)
|
||||
}
|
||||
|
||||
// Filename returns the configuration file path given a configuration root
|
||||
// directory. If the configuration root directory is empty, use the default one
|
||||
func Filename(configroot string) (string, error) {
|
||||
@ -129,7 +147,7 @@ func (i *Identity) DecodePrivateKey(passphrase string) (crypto.PrivateKey, error
|
||||
func Load(filename string) (*Config, error) {
|
||||
// if nothing is there, fail. User must run 'ipfs init'
|
||||
if _, err := os.Stat(filename); os.IsNotExist(err) {
|
||||
return nil, debugerror.New("ipfs not initialized, please run 'ipfs init'")
|
||||
return nil, errors.New("ipfs not initialized, please run 'ipfs init'")
|
||||
}
|
||||
|
||||
var cfg Config
|
||||
|
||||
23
core/core.go
23
core/core.go
@ -121,7 +121,7 @@ func NewIpfsNode(cfg *config.Config, online bool) (n *IpfsNode, err error) {
|
||||
|
||||
// setup peerstore + local peer identity
|
||||
n.Peerstore = peer.NewPeerstore()
|
||||
n.Identity, err = initIdentity(n.Config, n.Peerstore, online)
|
||||
n.Identity, err = initIdentity(&n.Config.Identity, n.Peerstore, online)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -196,35 +196,40 @@ func (n *IpfsNode) OnlineMode() bool {
|
||||
return n.onlineMode
|
||||
}
|
||||
|
||||
func initIdentity(cfg *config.Config, peers peer.Peerstore, online bool) (peer.Peer, error) {
|
||||
if cfg.Identity.PeerID == "" {
|
||||
func initIdentity(cfg *config.Identity, peers peer.Peerstore, online bool) (peer.Peer, error) {
|
||||
if cfg.PeerID == "" {
|
||||
return nil, errors.New("Identity was not set in config (was ipfs init run?)")
|
||||
}
|
||||
|
||||
if len(cfg.Identity.PeerID) == 0 {
|
||||
if len(cfg.PeerID) == 0 {
|
||||
return nil, errors.New("No peer ID in config! (was ipfs init run?)")
|
||||
}
|
||||
|
||||
// get peer from peerstore (so it is constructed there)
|
||||
id := peer.ID(b58.Decode(cfg.Identity.PeerID))
|
||||
peer, err := peers.Get(id)
|
||||
id := peer.ID(b58.Decode(cfg.PeerID))
|
||||
self, err := peers.Get(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
self.SetType(peer.Local)
|
||||
self, err = peers.Add(self)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// when not online, don't need to parse private keys (yet)
|
||||
if online {
|
||||
skb, err := base64.StdEncoding.DecodeString(cfg.Identity.PrivKey)
|
||||
skb, err := base64.StdEncoding.DecodeString(cfg.PrivKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := peer.LoadAndVerifyKeyPair(skb); err != nil {
|
||||
if err := self.LoadAndVerifyKeyPair(skb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return peer, nil
|
||||
return self, nil
|
||||
}
|
||||
|
||||
func initConnections(ctx context.Context, cfg *config.Config, pstore peer.Peerstore, route *dht.IpfsDHT) {
|
||||
|
||||
@ -4,13 +4,11 @@ import (
|
||||
"testing"
|
||||
|
||||
config "github.com/jbenet/go-ipfs/config"
|
||||
"github.com/jbenet/go-ipfs/peer"
|
||||
)
|
||||
|
||||
func TestInitialization(t *testing.T) {
|
||||
id := config.Identity{
|
||||
PeerID: "QmNgdzLieYi8tgfo2WfTUzNVH5hQK9oAYGVf6dxN12NrHt",
|
||||
PrivKey: "CAASrRIwggkpAgEAAoICAQCwt67GTUQ8nlJhks6CgbLKOx7F5tl1r9zF4m3TUrG3Pe8h64vi+ILDRFd7QJxaJ/n8ux9RUDoxLjzftL4uTdtv5UXl2vaufCc/C0bhCRvDhuWPhVsD75/DZPbwLsepxocwVWTyq7/ZHsCfuWdoh/KNczfy+Gn33gVQbHCnip/uhTVxT7ARTiv8Qa3d7qmmxsR+1zdL/IRO0mic/iojcb3Oc/PRnYBTiAZFbZdUEit/99tnfSjMDg02wRayZaT5ikxa6gBTMZ16Yvienq7RwSELzMQq2jFA4i/TdiGhS9uKywltiN2LrNDBcQJSN02pK12DKoiIy+wuOCRgs2NTQEhU2sXCk091v7giTTOpFX2ij9ghmiRfoSiBFPJA5RGwiH6ansCHtWKY1K8BS5UORM0o3dYk87mTnKbCsdz4bYnGtOWafujYwzueGx8r+IWiys80IPQKDeehnLW6RgoyjszKgL/2XTyP54xMLSW+Qb3BPgDcPaPO0hmop1hW9upStxKsefW2A2d46Ds4HEpJEry7PkS5M4gKL/zCKHuxuXVk14+fZQ1rstMuvKjrekpAC2aVIKMI9VRA3awtnje8HImQMdj+r+bPmv0N8rTTr3eS4J8Yl7k12i95LLfK+fWnmUh22oTNzkRlaiERQrUDyE4XNCtJc0xs1oe1yXGqazCIAQIDAQABAoICAQCk1N/ftahlRmOfAXk//8wNl7FvdJD3le6+YSKBj0uWmN1ZbUSQk64chr12iGCOM2WY180xYjy1LOS44PTXaeW5bEiTSnb3b3SH+HPHaWCNM2EiSogHltYVQjKW+3tfH39vlOdQ9uQ+l9Gh6iTLOqsCRyszpYPqIBwi1NMLY2Ej8PpVU7ftnFWouHZ9YKS7nAEiMoowhTu/7cCIVwZlAy3AySTuKxPMVj9LORqC32PVvBHZaMPJ+X1Xyijqg6aq39WyoztkXg3+Xxx5j5eOrK6vO/Lp6ZUxaQilHDXoJkKEJjgIBDZpluss08UPfOgiWAGkW+L4fgUxY0qDLDAEMhyEBAn6KOKVL1JhGTX6GjhWziI94bddSpHKYOEIDzUy4H8BXnKhtnyQV6ELS65C2hj9D0IMBTj7edCF1poJy0QfdK0cuXgMvxHLeUO5uc2YWfbNosvKxqygB9rToy4b22YvNwsZUXsTY6Jt+p9V2OgXSKfB5VPeRbjTJL6xqvvUJpQytmII/C9JmSDUtCbYceHj6X9jgigLk20VV6nWHqCTj3utXD6NPAjoycVpLKDlnWEgfVELDIk0gobxUqqSm3jTPEKRPJgxkgPxbwxYumtw++1UY2y35w3WRDc2xYPaWKBCQeZy+mL6ByXp9bWlNvxS3Knb6oZp36/ovGnf2pGvdQKCAQEAyKpipz2lIUySDyE0avVWAmQb2tWGKXALPohzj7AwkcfEg2GuwoC6GyVE2sTJD1HRazIjOKn3yQORg2uOPeG7sx7EKHxSxCKDrbPawkvLCq8JYSy9TLvhqKUVVGYPqMBzu2POSLEA81QXas+aYjKOFWA2Zrjq26zV9ey3+6Lc6WULePgRQybU8+RHJc6fdjUCCfUxgOrUO2IQOuTJ+FsDpVnrMUGlokmWn23OjL4qTL9wGDnWGUs2pjSzNbj3qA0d8iqaiMUyHX/D/VS0wpeT1osNBSm8suvSibYBn+7wbIApbwXUxZaxMv2OHGz3empae4ckvNZs7r8wsI9UwFt8mwKCAQEA4XK6gZkv9t+3YCcSPw2ensLvL/xU7i2bkC9tfTGdjnQfzZXIf5KNdVuj/SerOl2S1s45NMs3ysJbADwRb4ahElD/V71nGzV8fpFTitC20ro9fuX4J0+twmBolHqeH9pmeGTjAeL1rvt6vxs4FkeG/yNft7GdXpXTtEGaObn8Mt0tPY+aB3UnKrnCQoQAlPyGHFrVRX0UEcp6wyyNGhJCNKeNOvqCHTFObhbhO+KWpWSN0MkVHnqaIBnIn1Te8FtvP/iTwXGnKc0YXJUG6+LM6LmOguW6tg8ZqiQeYyyR+e9eCFH4csLzkrTl1GxCxwEsoSLIMm7UDcjttW6tYEghkwKCAQEAmeCO5lCPYImnN5Lu71ZTLmI2OgmjaANTnBBnDbi+hgv61gUCToUIMejSdDCTPfwv61P3TmyIZs0luPGxkiKYHTNqmOE9Vspgz8Mr7fLRMNApESuNvloVIY32XVImj/GEzh4rAfM6F15U1sN8T/EUo6+0B/Glp+9R49QzAfRSE2g48/rGwgf1JVHYfVWFUtAzUA+GdqWdOixo5cCsYJbqpNHfWVZN/bUQnBFIYwUwysnC29D+LUdQEQQ4qOm+gFAOtrWU62zMkXJ4iLt8Ify6kbrvsRXgbhQIzzGS7WH9XDarj0eZciuslr15TLMC1Azadf+cXHLR9gMHA13mT9vYIQKCAQA/DjGv8cKCkAvf7s2hqROGYAs6Jp8yhrsN1tYOwAPLRhtnCs+rLrg17M2vDptLlcRuI/vIElamdTmylRpjUQpX7yObzLO73nfVhpwRJVMdGU394iBIDncQ+JoHfUwgqJskbUM40dvZdyjbrqc/Q/4z+hbZb+oN/GXb8sVKBATPzSDMKQ/xqgisYIw+wmDPStnPsHAaIWOtni47zIgilJzD0WEk78/YjmPbUrboYvWziK5JiRRJFA1rkQqV1c0M+OXixIm+/yS8AksgCeaHr0WUieGcJtjT9uE8vyFop5ykhRiNxy9wGaq6i7IEecsrkd6DqxDHWkwhFuO1bSE83q/VAoIBAEA+RX1i/SUi08p71ggUi9WFMqXmzELp1L3hiEjOc2AklHk2rPxsaTh9+G95BvjhP7fRa/Yga+yDtYuyjO99nedStdNNSg03aPXILl9gs3r2dPiQKUEXZJ3FrH6tkils/8BlpOIRfbkszrdZIKTO9GCdLWQ30dQITDACs8zV/1GFGrHFrqnnMe/NpIFHWNZJ0/WZMi8wgWO6Ik8jHEpQtVXRiXLqy7U6hk170pa4GHOzvftfPElOZZjy9qn7KjdAQqy6spIrAE94OEL+fBgbHQZGLpuTlj6w6YGbMtPU8uo7sXKoc6WOCb68JWft3tejGLDa1946HAWqVM9B/UcneNc=",
|
||||
}
|
||||
id := testIdentity
|
||||
|
||||
good := []*config.Config{
|
||||
&config.Config{
|
||||
@ -59,3 +57,24 @@ func TestInitialization(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerIsLocal(t *testing.T) {
|
||||
t.Log("Ensure that peer is Local after initializing identity")
|
||||
|
||||
online := false
|
||||
peers := peer.NewPeerstore()
|
||||
|
||||
cfg := testIdentity
|
||||
p, err := initIdentity(&cfg, peers, online)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if p.GetType() != peer.Local {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
var testIdentity = config.Identity{
|
||||
PeerID: "QmNgdzLieYi8tgfo2WfTUzNVH5hQK9oAYGVf6dxN12NrHt",
|
||||
PrivKey: "CAASrRIwggkpAgEAAoICAQCwt67GTUQ8nlJhks6CgbLKOx7F5tl1r9zF4m3TUrG3Pe8h64vi+ILDRFd7QJxaJ/n8ux9RUDoxLjzftL4uTdtv5UXl2vaufCc/C0bhCRvDhuWPhVsD75/DZPbwLsepxocwVWTyq7/ZHsCfuWdoh/KNczfy+Gn33gVQbHCnip/uhTVxT7ARTiv8Qa3d7qmmxsR+1zdL/IRO0mic/iojcb3Oc/PRnYBTiAZFbZdUEit/99tnfSjMDg02wRayZaT5ikxa6gBTMZ16Yvienq7RwSELzMQq2jFA4i/TdiGhS9uKywltiN2LrNDBcQJSN02pK12DKoiIy+wuOCRgs2NTQEhU2sXCk091v7giTTOpFX2ij9ghmiRfoSiBFPJA5RGwiH6ansCHtWKY1K8BS5UORM0o3dYk87mTnKbCsdz4bYnGtOWafujYwzueGx8r+IWiys80IPQKDeehnLW6RgoyjszKgL/2XTyP54xMLSW+Qb3BPgDcPaPO0hmop1hW9upStxKsefW2A2d46Ds4HEpJEry7PkS5M4gKL/zCKHuxuXVk14+fZQ1rstMuvKjrekpAC2aVIKMI9VRA3awtnje8HImQMdj+r+bPmv0N8rTTr3eS4J8Yl7k12i95LLfK+fWnmUh22oTNzkRlaiERQrUDyE4XNCtJc0xs1oe1yXGqazCIAQIDAQABAoICAQCk1N/ftahlRmOfAXk//8wNl7FvdJD3le6+YSKBj0uWmN1ZbUSQk64chr12iGCOM2WY180xYjy1LOS44PTXaeW5bEiTSnb3b3SH+HPHaWCNM2EiSogHltYVQjKW+3tfH39vlOdQ9uQ+l9Gh6iTLOqsCRyszpYPqIBwi1NMLY2Ej8PpVU7ftnFWouHZ9YKS7nAEiMoowhTu/7cCIVwZlAy3AySTuKxPMVj9LORqC32PVvBHZaMPJ+X1Xyijqg6aq39WyoztkXg3+Xxx5j5eOrK6vO/Lp6ZUxaQilHDXoJkKEJjgIBDZpluss08UPfOgiWAGkW+L4fgUxY0qDLDAEMhyEBAn6KOKVL1JhGTX6GjhWziI94bddSpHKYOEIDzUy4H8BXnKhtnyQV6ELS65C2hj9D0IMBTj7edCF1poJy0QfdK0cuXgMvxHLeUO5uc2YWfbNosvKxqygB9rToy4b22YvNwsZUXsTY6Jt+p9V2OgXSKfB5VPeRbjTJL6xqvvUJpQytmII/C9JmSDUtCbYceHj6X9jgigLk20VV6nWHqCTj3utXD6NPAjoycVpLKDlnWEgfVELDIk0gobxUqqSm3jTPEKRPJgxkgPxbwxYumtw++1UY2y35w3WRDc2xYPaWKBCQeZy+mL6ByXp9bWlNvxS3Knb6oZp36/ovGnf2pGvdQKCAQEAyKpipz2lIUySDyE0avVWAmQb2tWGKXALPohzj7AwkcfEg2GuwoC6GyVE2sTJD1HRazIjOKn3yQORg2uOPeG7sx7EKHxSxCKDrbPawkvLCq8JYSy9TLvhqKUVVGYPqMBzu2POSLEA81QXas+aYjKOFWA2Zrjq26zV9ey3+6Lc6WULePgRQybU8+RHJc6fdjUCCfUxgOrUO2IQOuTJ+FsDpVnrMUGlokmWn23OjL4qTL9wGDnWGUs2pjSzNbj3qA0d8iqaiMUyHX/D/VS0wpeT1osNBSm8suvSibYBn+7wbIApbwXUxZaxMv2OHGz3empae4ckvNZs7r8wsI9UwFt8mwKCAQEA4XK6gZkv9t+3YCcSPw2ensLvL/xU7i2bkC9tfTGdjnQfzZXIf5KNdVuj/SerOl2S1s45NMs3ysJbADwRb4ahElD/V71nGzV8fpFTitC20ro9fuX4J0+twmBolHqeH9pmeGTjAeL1rvt6vxs4FkeG/yNft7GdXpXTtEGaObn8Mt0tPY+aB3UnKrnCQoQAlPyGHFrVRX0UEcp6wyyNGhJCNKeNOvqCHTFObhbhO+KWpWSN0MkVHnqaIBnIn1Te8FtvP/iTwXGnKc0YXJUG6+LM6LmOguW6tg8ZqiQeYyyR+e9eCFH4csLzkrTl1GxCxwEsoSLIMm7UDcjttW6tYEghkwKCAQEAmeCO5lCPYImnN5Lu71ZTLmI2OgmjaANTnBBnDbi+hgv61gUCToUIMejSdDCTPfwv61P3TmyIZs0luPGxkiKYHTNqmOE9Vspgz8Mr7fLRMNApESuNvloVIY32XVImj/GEzh4rAfM6F15U1sN8T/EUo6+0B/Glp+9R49QzAfRSE2g48/rGwgf1JVHYfVWFUtAzUA+GdqWdOixo5cCsYJbqpNHfWVZN/bUQnBFIYwUwysnC29D+LUdQEQQ4qOm+gFAOtrWU62zMkXJ4iLt8Ify6kbrvsRXgbhQIzzGS7WH9XDarj0eZciuslr15TLMC1Azadf+cXHLR9gMHA13mT9vYIQKCAQA/DjGv8cKCkAvf7s2hqROGYAs6Jp8yhrsN1tYOwAPLRhtnCs+rLrg17M2vDptLlcRuI/vIElamdTmylRpjUQpX7yObzLO73nfVhpwRJVMdGU394iBIDncQ+JoHfUwgqJskbUM40dvZdyjbrqc/Q/4z+hbZb+oN/GXb8sVKBATPzSDMKQ/xqgisYIw+wmDPStnPsHAaIWOtni47zIgilJzD0WEk78/YjmPbUrboYvWziK5JiRRJFA1rkQqV1c0M+OXixIm+/yS8AksgCeaHr0WUieGcJtjT9uE8vyFop5ykhRiNxy9wGaq6i7IEecsrkd6DqxDHWkwhFuO1bSE83q/VAoIBAEA+RX1i/SUi08p71ggUi9WFMqXmzELp1L3hiEjOc2AklHk2rPxsaTh9+G95BvjhP7fRa/Yga+yDtYuyjO99nedStdNNSg03aPXILl9gs3r2dPiQKUEXZJ3FrH6tkils/8BlpOIRfbkszrdZIKTO9GCdLWQ30dQITDACs8zV/1GFGrHFrqnnMe/NpIFHWNZJ0/WZMi8wgWO6Ik8jHEpQtVXRiXLqy7U6hk170pa4GHOzvftfPElOZZjy9qn7KjdAQqy6spIrAE94OEL+fBgbHQZGLpuTlj6w6YGbMtPU8uo7sXKoc6WOCb68JWft3tejGLDa1946HAWqVM9B/UcneNc=",
|
||||
}
|
||||
|
||||
@ -23,6 +23,7 @@ func (d *Dialer) Dial(ctx context.Context, network string, remote peer.Peer) (Co
|
||||
return nil, fmt.Errorf("No remote address for network %s", network)
|
||||
}
|
||||
|
||||
remote.SetType(peer.Remote)
|
||||
remote, err := d.Peerstore.Add(remote)
|
||||
if err != nil {
|
||||
log.Errorf("Error putting peer into peerstore: %s", remote)
|
||||
|
||||
@ -1,7 +1,9 @@
|
||||
package swarm
|
||||
|
||||
import (
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/net"
|
||||
"github.com/jbenet/go-ipfs/util/eventlog"
|
||||
|
||||
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
|
||||
)
|
||||
@ -52,6 +54,13 @@ func resolveUnspecifiedAddresses(unspecifiedAddrs []ma.Multiaddr) ([]ma.Multiadd
|
||||
}
|
||||
}
|
||||
|
||||
log.Event(context.TODO(), "interfaceListenAddresses", func() eventlog.Loggable {
|
||||
var addrs []string
|
||||
for _, addr := range outputAddrs {
|
||||
addrs = append(addrs, addr.String())
|
||||
}
|
||||
return eventlog.Metadata{"addresses": addrs}
|
||||
}())
|
||||
log.Info("InterfaceListenAddresses:", outputAddrs)
|
||||
return outputAddrs, nil
|
||||
}
|
||||
|
||||
@ -13,12 +13,13 @@ import (
|
||||
peer "github.com/jbenet/go-ipfs/peer"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
ctxc "github.com/jbenet/go-ipfs/util/ctxcloser"
|
||||
"github.com/jbenet/go-ipfs/util/eventlog"
|
||||
|
||||
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
|
||||
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
|
||||
)
|
||||
|
||||
var log = u.Logger("swarm")
|
||||
var log = eventlog.Logger("swarm")
|
||||
|
||||
// ErrAlreadyOpen signals that a connection to a peer is already open.
|
||||
var ErrAlreadyOpen = errors.New("Error: Connection to this peer already open.")
|
||||
|
||||
52
peer/peer.go
52
peer/peer.go
@ -95,8 +95,34 @@ type Peer interface {
|
||||
GetLatency() (out time.Duration)
|
||||
SetLatency(laten time.Duration)
|
||||
|
||||
// Get/SetType indicate whether this is a local or remote peer
|
||||
GetType() Type
|
||||
SetType(Type)
|
||||
|
||||
// Update with the data of another peer instance
|
||||
Update(Peer) error
|
||||
|
||||
Loggable() map[string]interface{}
|
||||
}
|
||||
|
||||
type Type uint8
|
||||
|
||||
const (
|
||||
// Unspecified indicates peer was created without specifying Type
|
||||
Unspecified Type = iota
|
||||
Local
|
||||
Remote
|
||||
)
|
||||
|
||||
func (t Type) String() string {
|
||||
switch t {
|
||||
case Local:
|
||||
return "localPeer"
|
||||
case Remote:
|
||||
return "remotePeer"
|
||||
default:
|
||||
}
|
||||
return "unspecifiedPeer"
|
||||
}
|
||||
|
||||
type peer struct {
|
||||
@ -111,6 +137,9 @@ type peer struct {
|
||||
// within that package, map from ID to latency value.
|
||||
latency time.Duration
|
||||
|
||||
// typ can be Local, Remote, or Unspecified (default)
|
||||
typ Type
|
||||
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
@ -129,6 +158,15 @@ func (p *peer) String() string {
|
||||
return "[Peer " + pid[:maxRunes] + "]"
|
||||
}
|
||||
|
||||
func (p *peer) Loggable() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
p.GetType().String(): map[string]interface{}{
|
||||
"id": p.ID(),
|
||||
"latency": p.GetLatency(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Key returns the ID as a Key (string) for maps.
|
||||
func (p *peer) Key() u.Key {
|
||||
return u.Key(p.id)
|
||||
@ -222,6 +260,18 @@ func (p *peer) SetLatency(laten time.Duration) {
|
||||
p.Unlock()
|
||||
}
|
||||
|
||||
func (p *peer) SetType(t Type) {
|
||||
p.Lock()
|
||||
p.typ = t
|
||||
defer p.Unlock()
|
||||
}
|
||||
|
||||
func (p *peer) GetType() Type {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
return p.typ
|
||||
}
|
||||
|
||||
// LoadAndVerifyKeyPair unmarshalls, loads a private/public key pair.
|
||||
// Error if (a) unmarshalling fails, or (b) pubkey does not match id.
|
||||
func (p *peer) LoadAndVerifyKeyPair(marshalled []byte) error {
|
||||
@ -306,6 +356,8 @@ func (p *peer) Update(other Peer) error {
|
||||
|
||||
p.SetLatency(other.GetLatency())
|
||||
|
||||
p.SetType(other.GetType())
|
||||
|
||||
sk := other.PrivKey()
|
||||
pk := other.PubKey()
|
||||
p.Lock()
|
||||
|
||||
@ -55,3 +55,11 @@ func TestStringMethodWithSmallId(t *testing.T) {
|
||||
}
|
||||
p1.String()
|
||||
}
|
||||
|
||||
func TestDefaultType(t *testing.T) {
|
||||
t.Log("Ensure that peers are initialized to Unspecified by default")
|
||||
p := peer{}
|
||||
if p.GetType() != Unspecified {
|
||||
t.Fatalf("Peer's default type is was not `Unspecified`")
|
||||
}
|
||||
}
|
||||
|
||||
@ -47,6 +47,17 @@ func (p *peerstore) Get(i ID) (Peer, error) {
|
||||
|
||||
// not found, construct it ourselves, add it to datastore, and return.
|
||||
case ds.ErrNotFound:
|
||||
|
||||
// TODO(brian) kinda dangerous, no? If ID is invalid and doesn't
|
||||
// correspond to an actual valid peer ID, this peerstore will return an
|
||||
// instantiated peer value, allowing the error to propagate. It might
|
||||
// be better to nip this at the bud by returning nil and making the
|
||||
// client manually add a Peer. To keep the peerstore in control, this
|
||||
// can even be a peerstore method that performs cursory validation.
|
||||
//
|
||||
// Potential bad case: Suppose values arrive from untrusted providers
|
||||
// in the DHT.
|
||||
|
||||
peer := &peer{id: i}
|
||||
if err := p.peers.Put(k, peer); err != nil {
|
||||
return nil, err
|
||||
|
||||
@ -18,6 +18,7 @@ import (
|
||||
kb "github.com/jbenet/go-ipfs/routing/kbucket"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
ctxc "github.com/jbenet/go-ipfs/util/ctxcloser"
|
||||
"github.com/jbenet/go-ipfs/util/eventlog"
|
||||
|
||||
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
|
||||
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
|
||||
@ -25,7 +26,7 @@ import (
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
|
||||
)
|
||||
|
||||
var log = u.Logger("dht")
|
||||
var log = eventlog.Logger("dht")
|
||||
|
||||
const doPinging = false
|
||||
|
||||
@ -151,9 +152,7 @@ func (dht *IpfsDHT) HandleMessage(ctx context.Context, mes msg.NetMessage) msg.N
|
||||
// update the peer (on valid msgs only)
|
||||
dht.Update(mPeer)
|
||||
|
||||
// Print out diagnostic
|
||||
log.Debugf("%s got message type: '%s' from %s",
|
||||
dht.self, pb.Message_MessageType_name[int32(pmes.GetType())], mPeer)
|
||||
log.Event(ctx, "foo", dht.self, mPeer, pmes)
|
||||
|
||||
// get handler for this msg type.
|
||||
handler := dht.handlerForMsgType(pmes.GetType())
|
||||
@ -196,9 +195,7 @@ func (dht *IpfsDHT) sendRequest(ctx context.Context, p peer.Peer, pmes *pb.Messa
|
||||
|
||||
start := time.Now()
|
||||
|
||||
// Print out diagnostic
|
||||
log.Debugf("Sent message type: '%s' to %s",
|
||||
pb.Message_MessageType_name[int32(pmes.GetType())], p)
|
||||
log.Event(ctx, "sentMessage", dht.self, p, pmes)
|
||||
|
||||
rmes, err := dht.sender.SendRequest(ctx, mes)
|
||||
if err != nil {
|
||||
|
||||
@ -65,3 +65,11 @@ func (m *Message) SetClusterLevel(level int) {
|
||||
lvl := int32(level)
|
||||
m.ClusterLevelRaw = &lvl
|
||||
}
|
||||
|
||||
func (m *Message) Loggable() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"message": map[string]string{
|
||||
"type": m.Type.String(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
35
util/eventlog/context.go
Normal file
35
util/eventlog/context.go
Normal file
@ -0,0 +1,35 @@
|
||||
package eventlog
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
|
||||
)
|
||||
|
||||
type key int
|
||||
|
||||
const metadataKey key = 0
|
||||
|
||||
func ContextWithMetadata(ctx context.Context, l Loggable) context.Context {
|
||||
existing, err := MetadataFromContext(ctx)
|
||||
if err != nil {
|
||||
// context does not contain meta. just set the new metadata
|
||||
child := context.WithValue(ctx, metadataKey, Metadata(l.Loggable()))
|
||||
return child
|
||||
}
|
||||
|
||||
merged := DeepMerge(existing, l.Loggable())
|
||||
child := context.WithValue(ctx, metadataKey, merged)
|
||||
return child
|
||||
}
|
||||
|
||||
func MetadataFromContext(ctx context.Context) (Metadata, error) {
|
||||
value := ctx.Value(metadataKey)
|
||||
if value != nil {
|
||||
metadata, ok := value.(Metadata)
|
||||
if ok {
|
||||
return metadata, nil
|
||||
}
|
||||
}
|
||||
return nil, errors.New("context contains no metadata")
|
||||
}
|
||||
44
util/eventlog/context_test.go
Normal file
44
util/eventlog/context_test.go
Normal file
@ -0,0 +1,44 @@
|
||||
package eventlog
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
|
||||
)
|
||||
|
||||
func TestContextContainsMetadata(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
m := Metadata{"foo": "bar"}
|
||||
ctx := ContextWithMetadata(context.Background(), m)
|
||||
got, err := MetadataFromContext(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, exists := got["foo"]
|
||||
if !exists {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestContextWithPreexistingMetadata(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := ContextWithMetadata(context.Background(), Metadata{"hello": "world"})
|
||||
ctx = ContextWithMetadata(ctx, Metadata{"goodbye": "earth"})
|
||||
|
||||
got, err := MetadataFromContext(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, exists := got["hello"]
|
||||
if !exists {
|
||||
t.Fatal("original key not present")
|
||||
}
|
||||
_, exists = got["goodbye"]
|
||||
if !exists {
|
||||
t.Fatal("new key not present")
|
||||
}
|
||||
}
|
||||
39
util/eventlog/entry.go
Normal file
39
util/eventlog/entry.go
Normal file
@ -0,0 +1,39 @@
|
||||
package eventlog
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/jbenet/go-ipfs/util"
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/maybebtc/logrus"
|
||||
)
|
||||
|
||||
type entry struct {
|
||||
loggables []Loggable
|
||||
system string
|
||||
event string
|
||||
}
|
||||
|
||||
// Log logs the event unconditionally (regardless of log level)
|
||||
// TODO add support for leveled-logs once we decide which levels we want
|
||||
// for our structured logs
|
||||
func (e *entry) Log() {
|
||||
e.log()
|
||||
}
|
||||
|
||||
// log is a private method invoked by the public Log, Info, Error methods
|
||||
func (e *entry) log() {
|
||||
// accumulate metadata
|
||||
accum := Metadata{}
|
||||
for _, loggable := range e.loggables {
|
||||
accum = DeepMerge(accum, loggable.Loggable())
|
||||
}
|
||||
|
||||
// apply final attributes to reserved keys
|
||||
// TODO accum["level"] = level
|
||||
accum["event"] = e.event
|
||||
accum["system"] = e.system
|
||||
accum["time"] = util.FormatRFC3339(time.Now())
|
||||
|
||||
// TODO roll our own event logger
|
||||
logrus.WithFields(map[string]interface{}(accum)).Info(e.event)
|
||||
}
|
||||
90
util/eventlog/log.go
Normal file
90
util/eventlog/log.go
Normal file
@ -0,0 +1,90 @@
|
||||
package eventlog
|
||||
|
||||
import (
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
|
||||
logging "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-logging"
|
||||
"github.com/jbenet/go-ipfs/util"
|
||||
)
|
||||
|
||||
// EventLogger extends the StandardLogger interface to allow for log items
|
||||
// containing structured metadata
|
||||
type EventLogger interface {
|
||||
StandardLogger
|
||||
|
||||
// Event merges structured data from the provided inputs into a single
|
||||
// machine-readable log event.
|
||||
//
|
||||
// If the context contains metadata, a copy of this is used as the base
|
||||
// metadata accumulator.
|
||||
//
|
||||
// If one or more loggable objects are provided, these are deep-merged into base blob.
|
||||
//
|
||||
// Next, the event name is added to the blob under the key "event". If
|
||||
// the key "event" already exists, it will be over-written.
|
||||
//
|
||||
// Finally the timestamp and package name are added to the accumulator and
|
||||
// the metadata is logged.
|
||||
Event(ctx context.Context, event string, m ...Loggable)
|
||||
}
|
||||
|
||||
// StandardLogger provides API compatibility with standard printf loggers
|
||||
// eg. go-logging
|
||||
type StandardLogger interface {
|
||||
Critical(args ...interface{})
|
||||
Criticalf(format string, args ...interface{})
|
||||
Debug(args ...interface{})
|
||||
Debugf(format string, args ...interface{})
|
||||
Error(args ...interface{})
|
||||
Errorf(format string, args ...interface{})
|
||||
Fatal(args ...interface{})
|
||||
Fatalf(format string, args ...interface{})
|
||||
Info(args ...interface{})
|
||||
Infof(format string, args ...interface{})
|
||||
Notice(args ...interface{})
|
||||
Noticef(format string, args ...interface{})
|
||||
Panic(args ...interface{})
|
||||
Panicf(format string, args ...interface{})
|
||||
Warning(args ...interface{})
|
||||
Warningf(format string, args ...interface{})
|
||||
}
|
||||
|
||||
// Logger retrieves an event logger by name
|
||||
func Logger(system string) EventLogger {
|
||||
|
||||
// TODO if we would like to adjust log levels at run-time. Store this event
|
||||
// logger in a map (just like the util.Logger impl)
|
||||
|
||||
return &eventLogger{system: system, Logger: util.Logger(system)}
|
||||
}
|
||||
|
||||
// eventLogger implements the EventLogger and wraps a go-logging Logger
|
||||
type eventLogger struct {
|
||||
*logging.Logger
|
||||
system string
|
||||
// TODO add log-level
|
||||
}
|
||||
|
||||
func (el *eventLogger) Event(ctx context.Context, event string, metadata ...Loggable) {
|
||||
|
||||
// Collect loggables for later logging
|
||||
var loggables []Loggable
|
||||
|
||||
// get any existing metadata from the context
|
||||
existing, err := MetadataFromContext(ctx)
|
||||
if err != nil {
|
||||
existing = Metadata{}
|
||||
}
|
||||
loggables = append(loggables, existing)
|
||||
|
||||
for _, datum := range metadata {
|
||||
loggables = append(loggables, datum)
|
||||
}
|
||||
|
||||
e := entry{
|
||||
loggables: loggables,
|
||||
system: el.system,
|
||||
event: event,
|
||||
}
|
||||
|
||||
e.Log() // TODO replace this when leveled-logs have been implemented
|
||||
}
|
||||
87
util/eventlog/metadata.go
Normal file
87
util/eventlog/metadata.go
Normal file
@ -0,0 +1,87 @@
|
||||
package eventlog
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"reflect"
|
||||
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go-uuid/uuid"
|
||||
)
|
||||
|
||||
// Metadata is a convenience type for generic maps
|
||||
type Metadata map[string]interface{}
|
||||
|
||||
// Loggable describes objects that can be marshalled into Metadata for logging
|
||||
type Loggable interface {
|
||||
Loggable() map[string]interface{}
|
||||
}
|
||||
|
||||
// Uuid returns a Metadata with the string key and UUID value
|
||||
func Uuid(key string) Metadata {
|
||||
return Metadata{
|
||||
key: uuid.New(),
|
||||
}
|
||||
}
|
||||
|
||||
// DeepMerge merges the second Metadata parameter into the first.
|
||||
// Nested Metadata are merged recursively. Primitives are over-written.
|
||||
func DeepMerge(b, a Metadata) Metadata {
|
||||
out := Metadata{}
|
||||
for k, v := range b {
|
||||
out[k] = v
|
||||
}
|
||||
for k, v := range a {
|
||||
|
||||
maybe, err := Metadatify(v)
|
||||
if err != nil {
|
||||
// if the new value is not meta. just overwrite the dest vaue
|
||||
out[k] = v
|
||||
continue
|
||||
}
|
||||
|
||||
// it is meta. What about dest?
|
||||
outv, exists := out[k]
|
||||
if !exists {
|
||||
// the new value is meta, but there's no dest value. just write it
|
||||
out[k] = v
|
||||
continue
|
||||
}
|
||||
|
||||
outMetadataValue, err := Metadatify(outv)
|
||||
if err != nil {
|
||||
// the new value is meta and there's a dest value, but the dest
|
||||
// value isn't meta. just overwrite
|
||||
out[k] = v
|
||||
continue
|
||||
}
|
||||
|
||||
// both are meta. merge them.
|
||||
out[k] = DeepMerge(outMetadataValue, maybe)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Loggable implements the Loggable interface
|
||||
func (m Metadata) Loggable() map[string]interface{} {
|
||||
// NB: method defined on value to avoid de-referencing nil Metadata
|
||||
return m
|
||||
}
|
||||
|
||||
func (m Metadata) JsonString() (string, error) {
|
||||
// NB: method defined on value
|
||||
b, err := json.Marshal(m)
|
||||
return string(b), err
|
||||
}
|
||||
|
||||
// Metadatify converts maps into Metadata
|
||||
func Metadatify(i interface{}) (Metadata, error) {
|
||||
value := reflect.ValueOf(i)
|
||||
if value.Kind() == reflect.Map {
|
||||
m := map[string]interface{}{}
|
||||
for _, k := range value.MapKeys() {
|
||||
m[k.String()] = value.MapIndex(k).Interface()
|
||||
}
|
||||
return Metadata(m), nil
|
||||
}
|
||||
return nil, errors.New("is not a map")
|
||||
}
|
||||
50
util/eventlog/metadata_test.go
Normal file
50
util/eventlog/metadata_test.go
Normal file
@ -0,0 +1,50 @@
|
||||
package eventlog
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestOverwrite(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
under := Metadata{
|
||||
"a": Metadata{
|
||||
"b": Metadata{
|
||||
"c": Metadata{
|
||||
"d": "the original value",
|
||||
"other": "SURVIVE",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
over := Metadata{
|
||||
"a": Metadata{
|
||||
"b": Metadata{
|
||||
"c": Metadata{
|
||||
"d": "a new value",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
out := DeepMerge(under, over)
|
||||
|
||||
dval := out["a"].(Metadata)["b"].(Metadata)["c"].(Metadata)["d"].(string)
|
||||
if dval != "a new value" {
|
||||
t.Fatal(dval)
|
||||
}
|
||||
surv := out["a"].(Metadata)["b"].(Metadata)["c"].(Metadata)["other"].(string)
|
||||
if surv != "SURVIVE" {
|
||||
t.Fatal(surv)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalJSON(t *testing.T) {
|
||||
t.Parallel()
|
||||
bs, _ := Metadata{"a": "b"}.JsonString()
|
||||
t.Log(bs)
|
||||
}
|
||||
|
||||
func TestMetadataIsLoggable(t *testing.T) {
|
||||
t.Parallel()
|
||||
func(l Loggable) {
|
||||
}(Metadata{})
|
||||
}
|
||||
63
util/eventlog/option.go
Normal file
63
util/eventlog/option.go
Normal file
@ -0,0 +1,63 @@
|
||||
package eventlog
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/maybebtc/logrus"
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2"
|
||||
)
|
||||
|
||||
type Option func()
|
||||
|
||||
func Configure(options ...Option) {
|
||||
for _, f := range options {
|
||||
f()
|
||||
}
|
||||
}
|
||||
|
||||
// LdJSONFormatter formats the event log as line-delimited JSON
|
||||
var LdJSONFormatter = func() {
|
||||
logrus.SetFormatter(&logrus.PoliteJSONFormatter{})
|
||||
}
|
||||
|
||||
var TextFormatter = func() {
|
||||
logrus.SetFormatter(&logrus.TextFormatter{})
|
||||
}
|
||||
|
||||
type LogRotatorConfig struct {
|
||||
Filename string
|
||||
MaxSizeMB uint64
|
||||
MaxBackups uint64
|
||||
MaxAgeDays uint64
|
||||
}
|
||||
|
||||
func Output(w io.Writer) Option {
|
||||
return func() {
|
||||
logrus.SetOutput(w)
|
||||
// TODO return previous Output option
|
||||
}
|
||||
}
|
||||
|
||||
func OutputRotatingLogFile(config LogRotatorConfig) Option {
|
||||
return func() {
|
||||
logrus.SetOutput(
|
||||
&lumberjack.Logger{
|
||||
Filename: config.Filename,
|
||||
MaxSize: int(config.MaxSizeMB),
|
||||
MaxBackups: int(config.MaxBackups),
|
||||
MaxAge: int(config.MaxAgeDays),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var LevelDebug = func() {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
|
||||
var LevelError = func() {
|
||||
logrus.SetLevel(logrus.ErrorLevel)
|
||||
}
|
||||
|
||||
var LevelInfo = func() {
|
||||
logrus.SetLevel(logrus.InfoLevel)
|
||||
}
|
||||
Loading…
Reference in New Issue
Block a user