Commit a4ff2bcd authored by Sebastian Stark's avatar Sebastian Stark

heavy linting

  - use conforming identifiers
  - unexport unneeded exports
  - remove superfluous initialisations
  - remove useless "else" branches
  - errcheck complained about some missing error checks, dealing
    with those later
parent ba38c92f
......@@ -9,7 +9,7 @@ import (
"time"
)
type Clock interface {
type clock interface {
Now() time.Time
}
......
......@@ -19,26 +19,26 @@ import (
const (
myName = "snaprd"
defaultSchedFileName = "/etc/" + myName + ".schedules"
DATA_SUBDIR = ".data"
dataSubdir = ".data"
)
type Opts []string
type opts []string
// Opts getter
func (o *Opts) String() string {
func (o *opts) String() string {
return fmt.Sprintf("\"%s\"", strings.Join(*o, ""))
}
// Opts setter
func (o *Opts) Set(value string) error {
func (o *opts) Set(value string) error {
*o = strings.Split(value, " ")
return nil
}
// use own struct as "backing store" for parsed flags
// Config is used as a backing store for parsed flags
type Config struct {
RsyncPath string
RsyncOpts Opts
RsyncOpts opts
Origin string
repository string
Schedule string
......@@ -56,7 +56,7 @@ type Config struct {
// WriteCache writes the global configuration to disk as a json file.
func (c *Config) WriteCache() error {
cacheFile := filepath.Join(c.repository, "."+myName+".settings")
Debugf("trying to write cached settings to %s", cacheFile)
debugf("trying to write cached settings to %s", cacheFile)
jsonConfig, err := json.MarshalIndent(c, "", " ")
if err != nil {
log.Println("could not write config:", err)
......@@ -71,35 +71,34 @@ func (c *Config) WriteCache() error {
func (c *Config) ReadCache() error {
t := new(Config)
cacheFile := filepath.Join(c.repository, "."+myName+".settings")
Debugf("trying to read cached settings from %s", cacheFile)
debugf("trying to read cached settings from %s", cacheFile)
b, err := ioutil.ReadFile(filepath.Join(c.repository, "."+myName+".settings"))
if err != nil {
return err
} else {
err = json.Unmarshal(b, &t)
if err != nil {
return err
}
c.RsyncPath = t.RsyncPath
c.RsyncOpts = t.RsyncOpts
if t.SchedFile != "" {
c.SchedFile = t.SchedFile
schedules.AddFromFile(c.SchedFile)
}
c.Origin = t.Origin
if _, ok := schedules[t.Schedule]; ok == false {
log.Fatalln("no such schedule:", t.Schedule)
}
c.Schedule = t.Schedule
c.MaxKeep = t.MaxKeep
c.NoPurge = t.NoPurge
c.MinPercSpace = t.MinPercSpace
c.MinGiBSpace = t.MinGiBSpace
}
err = json.Unmarshal(b, &t)
if err != nil {
return err
}
c.RsyncPath = t.RsyncPath
c.RsyncOpts = t.RsyncOpts
if t.SchedFile != "" {
c.SchedFile = t.SchedFile
schedules.AddFromFile(c.SchedFile)
}
c.Origin = t.Origin
if _, ok := schedules[t.Schedule]; ok == false {
log.Fatalln("no such schedule:", t.Schedule)
}
c.Schedule = t.Schedule
c.MaxKeep = t.MaxKeep
c.NoPurge = t.NoPurge
c.MinPercSpace = t.MinPercSpace
c.MinGiBSpace = t.MinGiBSpace
return nil
}
var subcmd string = ""
var subcmd string
func usage() {
fmt.Printf(`usage: %[1]s <command> <options>
......@@ -115,7 +114,7 @@ Examples:
`, myName)
}
func LoadConfig() *Config {
func loadConfig() *Config {
config := new(Config)
if len(os.Args) > 1 {
subcmd = os.Args[1]
......@@ -170,15 +169,15 @@ func LoadConfig() *Config {
if _, ok := schedules[config.Schedule]; ok == false {
log.Fatalln("no such schedule:", config.Schedule)
}
path := filepath.Join(config.repository, DATA_SUBDIR)
Debugf("creating repository:", path)
path := filepath.Join(config.repository, dataSubdir)
debugf("creating repository:", path)
err := os.MkdirAll(path, 00755)
if err != nil {
log.Fatal(err)
}
err = config.WriteCache()
if err != nil {
log.Printf("could not write settings cache file:", err)
log.Print("could not write settings cache file:", err)
}
return config
}
......@@ -208,7 +207,7 @@ func LoadConfig() *Config {
if err != nil {
log.Println("error reading cached settings (using defaults):", err)
}
Debugf("cached config: %s", config)
debugf("cached config: %s", config)
return config
}
case "help", "-h", "--help":
......
......@@ -9,7 +9,8 @@ import (
"syscall"
)
const GiB = 1024 * 1024 * 1024 // One gibibyte (2^30)
// GiB is exactly one gibibyte (2^30)
const GiB = 1024 * 1024 * 1024
// Function to verify the space constraints specified by the user.
// Return true if all the constraints are satisfied, or in case something unusual happens.
......@@ -20,7 +21,7 @@ func checkFreeSpace(baseDir string, minPerc float64, minGiB int) bool {
}
var stats syscall.Statfs_t
Debugf("Trying to check free space in %s", baseDir)
debugf("Trying to check free space in %s", baseDir)
err := syscall.Statfs(baseDir, &stats)
if err != nil {
log.Println("could not check free space:", err)
......@@ -30,7 +31,7 @@ func checkFreeSpace(baseDir string, minPerc float64, minGiB int) bool {
sizeBytes := uint64(stats.Bsize) * stats.Blocks
freeBytes := uint64(stats.Bsize) * stats.Bfree
Debugf("We have %f GiB, and %f GiB of them are free.", float64(sizeBytes)/GiB, float64(freeBytes)/GiB)
debugf("We have %f GiB, and %f GiB of them are free.", float64(sizeBytes)/GiB, float64(freeBytes)/GiB)
// The actual check... we fail it we are below either the absolute or the relative value
......
......@@ -24,8 +24,8 @@ func TestCheckFreeSpace(t *testing.T) {
// First, gather the data
data := gatherTestData("/")
var actualFreePerc float64 = 100 * float64(data.Bfree) / float64(data.Blocks)
var actualFreeGiB int = int(uint64(data.Bsize) * data.Bfree / GiB)
var actualFreePerc = 100 * float64(data.Bfree) / float64(data.Blocks)
var actualFreeGiB = int(uint64(data.Bsize) * data.Bfree / GiB)
// Now, let's make a quick run of the test
var result bool
......
......@@ -11,32 +11,32 @@ import (
"strconv"
)
type PidLocker struct {
type pidLocker struct {
pid int
f string
}
func NewPidLocker(lockfile string) *PidLocker {
return &PidLocker{
func newPidLocker(lockfile string) *pidLocker {
return &pidLocker{
pid: os.Getpid(),
f: lockfile,
}
}
func (pl *PidLocker) Lock() {
func (pl *pidLocker) Lock() {
_, err := os.Stat(pl.f)
if err == nil {
log.Fatalf("pid file %s already exists. Is snaprd running already?", pl.f)
}
Debugf("write pid %d to pidfile %s", pl.pid, pl.f)
debugf("write pid %d to pidfile %s", pl.pid, pl.f)
err = ioutil.WriteFile(pl.f, []byte(strconv.Itoa(pl.pid)), 0666)
if err != nil {
log.Fatalf("could not write pid file %s", pl.f)
}
}
func (pl *PidLocker) Unlock() {
Debugf("delete pidfile %s", pl.f)
func (pl *pidLocker) Unlock() {
debugf("delete pidfile %s", pl.f)
err := os.Remove(pl.f)
if err != nil {
log.Fatalf("could not remove pid file %s", pl.f)
......
......@@ -19,22 +19,22 @@ import (
var config *Config
var logger *log.Logger
func Debugf(format string, args ...interface{}) {
func debugf(format string, args ...interface{}) {
if os.Getenv("SNAPRD_DEBUG") == "1" {
logger.Output(2, "<DEBUG> "+fmt.Sprintf(format, args...))
}
}
// The LastGoodTicker is the clock for the create loop. It takes the last
// lastGoodTicker is the clock for the create loop. It takes the last
// created snapshot on its input channel and outputs it on the output channel,
// but only after an appropriate waiting time. To start things off, the first
// lastGood snapshot has to be read from disk.
func LastGoodTicker(in, out chan *Snapshot, cl Clock) {
func lastGoodTicker(in, out chan *snapshot, cl clock) {
var gap, wait time.Duration
var sn *Snapshot
sn = LastGoodFromDisk(cl)
var sn *snapshot
sn = lastGoodFromDisk(cl)
if sn != nil {
Debugf("lastgood from disk: %s\n", sn.String())
debugf("lastgood from disk: %s\n", sn.String())
}
// kick off the loop
go func() {
......@@ -45,12 +45,12 @@ func LastGoodTicker(in, out chan *Snapshot, cl Clock) {
sn := <-in
if sn != nil {
gap = cl.Now().Sub(sn.startTime)
Debugf("gap: %s", gap)
debugf("gap: %s", gap)
wait = schedules[config.Schedule][0] - gap
if wait > 0 {
log.Println("wait", wait, "before next snapshot")
time.Sleep(wait)
Debugf("Awoken at %s\n", cl.Now())
debugf("Awoken at %s\n", cl.Now())
}
}
out <- sn
......@@ -60,7 +60,7 @@ func LastGoodTicker(in, out chan *Snapshot, cl Clock) {
// subcmdRun is the main, long-running routine and starts off a couple of
// helper goroutines.
func subcmdRun() (ferr error) {
pl := NewPidLocker(filepath.Join(config.repository, ".pid"))
pl := newPidLocker(filepath.Join(config.repository, ".pid"))
pl.Lock()
defer pl.Unlock()
if !config.NoWait {
......@@ -77,51 +77,51 @@ func subcmdRun() (ferr error) {
// The obsoleteQueue should not be larger than the absolute number of
// expected snapshots. However, there is no way (yet) to calculate that
// number.
obsoleteQueue := make(chan *Snapshot, 10000)
lastGoodIn := make(chan *Snapshot)
lastGoodOut := make(chan *Snapshot)
obsoleteQueue := make(chan *snapshot, 10000)
lastGoodIn := make(chan *snapshot)
lastGoodOut := make(chan *snapshot)
freeSpaceCheck := make(chan struct{}) // Empty type for the channel: we don't care about what is inside, only about the fact that there is something inside
cl := new(realClock)
go LastGoodTicker(lastGoodIn, lastGoodOut, cl)
go lastGoodTicker(lastGoodIn, lastGoodOut, cl)
// Snapshot creation loop
go func() {
var lastGood *Snapshot
var lastGood *snapshot
var createError error
CREATE_LOOP:
for {
select {
case <-createExit:
Debugf("gracefully exiting snapshot creation goroutine")
debugf("gracefully exiting snapshot creation goroutine")
lastGoodOut = nil
break CREATE_LOOP
case lastGood = <-lastGoodOut:
sn, err := CreateSnapshot(lastGood)
sn, err := createSnapshot(lastGood)
if err != nil || sn == nil {
Debugf("snapshot creation finally failed (%s), the partial transfer will hopefully be reused", err)
debugf("snapshot creation finally failed (%s), the partial transfer will hopefully be reused", err)
//createError = err
//go func() { createExit <- true; return }()
}
lastGoodIn <- sn
Debugf("pruning")
debugf("pruning")
prune(obsoleteQueue, cl)
// If we purge automatically all the expired snapshots,
// there's nothing to remove to free space.
if config.NoPurge {
Debugf("checking space constraints")
debugf("checking space constraints")
freeSpaceCheck <- struct{}{}
}
}
}
createExitDone <- createError
}()
Debugf("started snapshot creation goroutine")
debugf("started snapshot creation goroutine")
// Usually the purger gets its input only from prune(). But there
// could be snapshots left behind from a previously failed snaprd run, so
// we fill the obsoleteQueue once at the beginning.
for _, sn := range FindDangling(cl) {
for _, sn := range findDangling(cl) {
obsoleteQueue <- sn
}
......@@ -133,7 +133,7 @@ func subcmdRun() (ferr error) {
}
}
}()
Debugf("started purge goroutine")
debugf("started purge goroutine")
// If we are going to automatically purge all expired snapshots, we
// needn't even starting the gofunc
......@@ -143,7 +143,7 @@ func subcmdRun() (ferr error) {
for {
<-freeSpaceCheck // Wait until we are ordered to do something
// Get all obsolete snapshots
snapshots, err := FindSnapshots(cl) // This returns a sorted list
snapshots, err := findSnapshots(cl) // This returns a sorted list
if err != nil {
log.Println(err)
return
......@@ -152,7 +152,7 @@ func subcmdRun() (ferr error) {
log.Println("less than 2 snapshots found, not pruning")
return
}
obsolete := snapshots.state(STATE_OBSOLETE, 0)
obsolete := snapshots.state(stateObsolete, 0)
// We only delete as long as we need *AND* we have something to delete
for !checkFreeSpace(config.repository, config.MinPercSpace, config.MinGiBSpace) && len(obsolete) > 0 {
// If there is not enough space, purge the oldest snapshot
......@@ -170,7 +170,7 @@ func subcmdRun() (ferr error) {
signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM, syscall.SIGUSR1)
select {
case sig := <-sigc:
Debugf("Got signal", sig)
debugf("Got signal", sig)
switch sig {
case syscall.SIGINT, syscall.SIGTERM:
log.Println("-> Immediate exit")
......@@ -188,19 +188,19 @@ func subcmdRun() (ferr error) {
func subcmdList() {
intervals := schedules[config.Schedule]
cl := new(realClock)
snapshots, err := FindSnapshots(cl)
snapshots, err := findSnapshots(cl)
if err != nil {
log.Println(err)
}
for n := len(intervals) - 2; n >= 0; n-- {
Debugf("listing interval %d", n)
debugf("listing interval %d", n)
if config.showAll {
snapshots = snapshots.state(ANY, NONE)
snapshots = snapshots.state(any, none)
} else {
snapshots = snapshots.state(STATE_COMPLETE, NONE)
snapshots = snapshots.state(stateComplete, none)
}
snapshots := snapshots.interval(intervals, n, cl)
Debugf("snapshots in interval %d: %s", n, snapshots)
debugf("snapshots in interval %d: %s", n, snapshots)
if n < len(intervals)-2 {
fmt.Printf("### From %s ago, %d/%d\n", intervals.offset(n+1), len(snapshots), intervals.goal(n))
} else {
......@@ -230,7 +230,7 @@ func subcmdList() {
func main() {
logger = log.New(os.Stderr, "", log.Ldate|log.Ltime|log.Lshortfile)
config = LoadConfig()
config = loadConfig()
if config == nil {
log.Fatal("no config, don't know what to do!")
}
......
......@@ -12,11 +12,11 @@ import (
// Sieves snapshots according to schedule and marks them as obsolete. Also,
// enqueue them in the buffered channel q for later reuse or deletion.
func prune(q chan *Snapshot, cl Clock) {
func prune(q chan *snapshot, cl clock) {
intervals := schedules[config.Schedule]
// interval 0 does not need pruning, start with 1
for i := len(intervals) - 2; i > 0; i-- {
snapshots, err := FindSnapshots(cl)
snapshots, err := findSnapshots(cl)
if err != nil {
log.Println(err)
return
......@@ -25,14 +25,14 @@ func prune(q chan *Snapshot, cl Clock) {
log.Println("less than 2 snapshots found, not pruning")
return
}
iv := snapshots.interval(intervals, i, cl).state(STATE_COMPLETE, STATE_OBSOLETE)
iv := snapshots.interval(intervals, i, cl).state(stateComplete, stateObsolete)
pruneAgain := false
if len(iv) > 2 {
// prune highest interval by maximum number
if (i == len(intervals)-2) &&
(len(iv) > config.MaxKeep) &&
(config.MaxKeep != 0) {
Debugf("%d snapshots in oldest interval", len(iv))
debugf("%d snapshots in oldest interval", len(iv))
log.Printf("mark oldest as obsolete: %s", iv[0])
iv[0].transObsolete()
q <- iv[0]
......
......@@ -59,17 +59,17 @@ func mockConfig() {
func mockRepository() {
for _, s := range mockSnapshots {
os.MkdirAll(filepath.Join(config.repository, DATA_SUBDIR, s), 0777)
os.MkdirAll(filepath.Join(config.repository, dataSubdir, s), 0777)
}
}
func assertSnapshotChanLen(t *testing.T, c chan *Snapshot, want int) {
func assertSnapshotChanLen(t *testing.T, c chan *snapshot, want int) {
if got := len(c); got != want {
t.Errorf("channel %v contains %v snapshots, wanted %v", c, got, want)
}
}
func assertSnapshotChanItem(t *testing.T, c chan *Snapshot, want string) {
func assertSnapshotChanItem(t *testing.T, c chan *snapshot, want string) {
if got := <-c; got.String() != want {
t.Errorf("prune() obsoleted %v, wanted %v", got.String(), want)
}
......@@ -86,7 +86,7 @@ func TestPrune(t *testing.T) {
mockRepository()
defer os.RemoveAll(config.repository)
cl := newSkewClock(startAt)
c := make(chan *Snapshot, 100)
c := make(chan *snapshot, 100)
tests := []pruneTestPair{
{0,
......
......@@ -17,7 +17,7 @@ import (
// createRsyncCommand returns an exec.Command structure that, when executed,
// creates a snapshot using rsync. Takes an optional (non-nil) base to be used
// with rsyncs --link-dest feature.
func createRsyncCommand(sn *Snapshot, base *Snapshot) *exec.Cmd {
func createRsyncCommand(sn *snapshot, base *snapshot) *exec.Cmd {
cmd := exec.Command(config.RsyncPath)
args := make([]string, 0, 256)
args = append(args, config.RsyncPath)
......@@ -29,38 +29,38 @@ func createRsyncCommand(sn *Snapshot, base *Snapshot) *exec.Cmd {
}
args = append(args, config.Origin, sn.FullName())
cmd.Args = args
cmd.Dir = filepath.Join(config.repository, DATA_SUBDIR)
cmd.Dir = filepath.Join(config.repository, dataSubdir)
log.Println("run:", args)
return cmd
}
// runRsyncCommand executes the given command. On sucessful startup return an
// error channel the caller can receive a return status from.
func runRsyncCommand(cmd *exec.Cmd) (error, chan error) {
func runRsyncCommand(cmd *exec.Cmd) (chan error, error) {
var err error
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
Debugf("starting rsync command")
debugf("starting rsync command")
err = cmd.Start()
if err != nil {
return err, nil
return nil, err
}
done := make(chan error)
go func() {
done <- cmd.Wait()
return
}()
return nil, done
return done, nil
}
// CreateSnapshot starts a potentially long running rsync command and returns a
// createSnapshot starts a potentially long running rsync command and returns a
// Snapshot pointer on success.
// For non-zero return values of rsync potentially restart the process if the
// error was presumably volatile.
func CreateSnapshot(base *Snapshot) (*Snapshot, error) {
func createSnapshot(base *snapshot) (*snapshot, error) {
cl := new(realClock)
newSn := LastReusableFromDisk(cl)
newSn := lastReusableFromDisk(cl)
if newSn == nil {
newSn = newIncompleteSnapshot(cl)
......@@ -68,24 +68,24 @@ func CreateSnapshot(base *Snapshot) (*Snapshot, error) {
newSn.transIncomplete(cl)
}
cmd := createRsyncCommand(newSn, base)
err, done := runRsyncCommand(cmd)
done, err := runRsyncCommand(cmd)
if err != nil {
log.Fatalln("could not start rsync command:", err)
}
Debugf("rsync started")
debugf("rsync started")
sigc := make(chan os.Signal, 1)
signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM)
for {
select {
case sig := <-sigc:
Debugf("trying to kill rsync with signal %v", sig)
debugf("trying to kill rsync with signal %v", sig)
err := cmd.Process.Signal(sig)
if err != nil {
log.Fatal("failed to kill: ", err)
}
return nil, errors.New("rsync killed by request")
case err := <-done:
Debugf("received something on done channel: ", err)
debugf("received something on done channel: ", err)
if err != nil {
// At this stage rsync ran, but with errors.
// Restart in case of
......@@ -98,10 +98,10 @@ func CreateSnapshot(base *Snapshot) (*Snapshot, error) {
// First, get the error code
if exiterr, ok := err.(*exec.ExitError); ok { // The return code != 0)
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { // Finally get the actual status code
Debugf("The error code we got is: ", status.ExitStatus())
debugf("The error code we got is: ", status.ExitStatus())
// status now holds the actual return code
if status.ExitStatus() == 24 { // Magic number: means some files couldn't be copied because they vanished, so nothing critical. See man rsync
Debugf("Some files failed to copy because they were deleted in the meantime, but nothing critical... going on...")
debugf("Some files failed to copy because they were deleted in the meantime, but nothing critical... going on...")
failed = false
}
}
......
......@@ -30,9 +30,8 @@ type intervalList []time.Duration
func (il intervalList) offset(i int) time.Duration {
if i == 0 {
return 0
} else {
return il[i] + il.offset(i-1)
}
return il[i] + il.offset(i-1)
}
// Returns how many snapshots are the goal in the given interval
......@@ -112,7 +111,7 @@ func (schl scheduleList) List() {
func (json jsonInterval) IntervalList() intervalList {
il := make(intervalList, len(json))
for i, interval := range json {
var duration time.Duration = 0
var duration time.Duration
Loop:
for k, v := range interval {
switch k {
......
This diff is collapsed.
......@@ -18,7 +18,7 @@ const (
func TestNewSnapshot(t *testing.T) {
out := strconv.FormatInt(sdate, 10) + "-" + strconv.FormatInt(edate, 10) + " Complete"
sn := newSnapshot(time.Unix(sdate, 0), time.Unix(edate, 0), STATE_COMPLETE)
sn := newSnapshot(time.Unix(sdate, 0), time.Unix(edate, 0), stateComplete)
if s := sn.String(); s != out {
t.Errorf("sn.String() = %v, want %v", s, out)
}
......@@ -38,7 +38,7 @@ var mockSnapshotsDangling = []string{
func mockRepositoryDangling() {
for _, s := range mockSnapshotsDangling {
os.MkdirAll(filepath.Join(config.repository, DATA_SUBDIR, s), 0777)
os.MkdirAll(filepath.Join(config.repository, dataSubdir, s), 0777)
}
}
......@@ -57,7 +57,7 @@ func TestFindDangling(t *testing.T) {
defer os.RemoveAll(config.repository)
cl := newSkewClock(startAt)
sl := FindDangling(cl)
sl := findDangling(cl)
lgot, lwant := len(sl), len(tests)
if lgot != lwant {
t.Errorf("FindDangling() found %v, should be %v", lgot, lwant)
......@@ -75,55 +75,55 @@ func TestLastGood(t *testing.T) {
defer os.RemoveAll(config.repository)
cl := newSkewClock(startAt)
sl, _ := FindSnapshots(cl)
sl, _ := findSnapshots(cl)
if s := sl.lastGood().String(); s != lastGood {
t.Errorf("lastGood() found %v, should be %v", s, lastGood)
}
// Advance to next snapshot the is not (yet) complete, see if this is
// omitted as it should
os.Mkdir(filepath.Join(config.repository, DATA_SUBDIR, "1400337727-0-incomplete"), 0777)
os.Mkdir(filepath.Join(config.repository, dataSubdir, "1400337727-0-incomplete"), 0777)
cl.skew -= schedules["testing2"][0]
sl, _ = FindSnapshots(cl)
sl, _ = findSnapshots(cl)
if s := sl.lastGood().String(); s != lastGood {
t.Errorf("lastGood() found %v, should be %v", s, lastGood)
}
}
type snStateTestPair struct {
include SnapshotState
exclude SnapshotState
sl *SnapshotList
include snapshotState
exclude snapshotState
sl *snapshotList
}
func TestSnapshotState(t *testing.T) {
slIn := &SnapshotList{
{time.Unix(1400337531, 0), time.Unix(1400337532, 0), STATE_COMPLETE},
{time.Unix(1400337611, 0), time.Unix(1400337612, 0), STATE_COMPLETE},
{time.Unix(1400337651, 0), time.Unix(1400337652, 0), STATE_PURGING},
{time.Unix(1400337671, 0), time.Unix(1400337672, 0), STATE_COMPLETE},
{time.Unix(1400337691, 0), time.Unix(1400337692, 0), STATE_COMPLETE},
{time.Unix(1400337706, 0), time.Unix(1400337707, 0), STATE_COMPLETE},
{time.Unix(1400337711, 0), time.Unix(1400337712, 0), STATE_OBSOLETE},
{time.Unix(1400337716, 0), time.Unix(1400337717, 0), STATE_COMPLETE},
{time.Unix(1400337721, 0), time.Unix(1400337722, 0), STATE_INCOMPLETE},
slIn := &snapshotList{
{time.Unix(1400337531, 0), time.Unix(1400337532, 0), stateComplete},
{time.Unix(1400337611, 0), time.Unix(1400337612, 0), stateComplete},
{time.Unix(1400337651, 0), time.Unix(1400337652, 0), statePurging},
{time.Unix(1400337671, 0), time.Unix(1400337672, 0), stateComplete},
{time.Unix(1400337691, 0), time.Unix(1400337692, 0), stateComplete},
{time.Unix(1400337706, 0), time.Unix(1400337707, 0), stateComplete},
{time.Unix(1400337711, 0), time.Unix(1400337712, 0), stateObsolete},
{time.Unix(1400337716, 0), time.Unix(1400337717, 0), stateComplete},
{time.Unix(1400337721, 0), time.Unix(1400337722, 0), stateIncomplete},
}
tests := []snStateTestPair{
{
STATE_PURGING, 0, &SnapshotList{
&Snapshot{time.Unix(1400337651, 0), time.Unix(1400337652, 0), STATE_PURGING},
statePurging, 0, &snapshotList{
&snapshot{time.Unix(1400337651, 0), time.Unix(1400337652, 0), statePurging},
},
},
{
STATE_PURGING + STATE_OBSOLETE, 0, &SnapshotList{
&Snapshot{time.Unix(1400337651, 0), time.Unix(1400337652, 0), STATE_PURGING},
&Snapshot{time.Unix(1400337711, 0), time.Unix(1400337712, 0), STATE_OBSOLETE},
statePurging + stateObsolete, 0, &snapshotList{
&snapshot{time.Unix(1400337651, 0), time.Unix(1400337652, 0), statePurging},
&snapshot{time.Unix(1400337711, 0), time.Unix(1400337712, 0), stateObsolete},
},
},
{