From 4a70cf8a77da58751fc583cff54d010ab2b74df9 Mon Sep 17 00:00:00 2001 From: PoAn Yang Date: Fri, 26 Apr 2024 12:01:25 +0800 Subject: [PATCH] fix: golangci-lint error Signed-off-by: PoAn Yang --- Dockerfile.dapper | 6 +- backupbackingimage/backupbackingimage.go | 38 +++++-- config.go | 2 - deltablock.go | 92 +++++++++++---- fsops/fsops.go | 4 +- list_inspect_test.go | 137 +++++++++++++++-------- scripts/validate | 16 +-- test/backup_test.go | 11 +- util/util.go | 24 ++-- 9 files changed, 215 insertions(+), 115 deletions(-) diff --git a/Dockerfile.dapper b/Dockerfile.dapper index ee79381e6..a29967914 100644 --- a/Dockerfile.dapper +++ b/Dockerfile.dapper @@ -10,15 +10,15 @@ RUN apt-get update && \ rm -f /bin/sh && ln -s /bin/bash /bin/sh ENV DOCKER_URL_amd64=https://get.docker.com/builds/Linux/x86_64/docker-1.10.3 \ - DOCKER_URL_arm=https://github.com/rancher/docker/releases/download/v1.10.3-ros1/docker-1.10.3_arm \ + DOCKER_URL_arm64=https://github.com/rancher/docker/releases/download/v1.10.3-ros1/docker-1.10.3_arm \ DOCKER_URL=DOCKER_URL_${ARCH} RUN wget -O - ${!DOCKER_URL} > /usr/bin/docker && chmod +x /usr/bin/docker -ENV GOLANG_ARCH_amd64=amd64 GOLANG_ARCH_arm=armv6l GOLANG_ARCH=GOLANG_ARCH_${ARCH} \ +ENV GOLANG_ARCH_amd64=amd64 GOLANG_ARCH_arm64=arm64 GOLANG_ARCH=GOLANG_ARCH_${ARCH} \ GOPATH=/go PATH=/go/bin:/usr/local/go/bin:${PATH} SHELL=/bin/bash -RUN wget -O - https://storage.googleapis.com/golang/go1.17.10.linux-${!GOLANG_ARCH}.tar.gz | tar -xzf - -C /usr/local +RUN wget -O - https://storage.googleapis.com/golang/go1.21.3.linux-${!GOLANG_ARCH}.tar.gz | tar -xzf - -C /usr/local RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.55.2 ENV DAPPER_SOURCE /go/src/github.com/longhorn/backupstore diff --git a/backupbackingimage/backupbackingimage.go b/backupbackingimage/backupbackingimage.go index 672b17ff6..5a71dfe24 100644 --- a/backupbackingimage/backupbackingimage.go +++ b/backupbackingimage/backupbackingimage.go @@ -95,10 +95,14 @@ func CreateBackingImageBackup(config *BackupConfig, backupBackingImage *BackupBa return err } - defer lock.Unlock() if err := lock.Lock(); err != nil { return err } + defer func() { + if unlockErr := lock.Unlock(); unlockErr != nil { + log.WithError(unlockErr).Warn("Failed to unlock backup backing image") + } + }() exists, err := addBackingImageConfigInBackupStore(bsDriver, backupBackingImage) if err != nil { @@ -123,7 +127,11 @@ func CreateBackingImageBackup(config *BackupConfig, backupBackingImage *BackupBa go func() { defer backupOperation.CloseFile() - defer lock.Unlock() + defer func() { + if unlockErr := lock.Unlock(); unlockErr != nil { + log.WithError(unlockErr).Warn("Failed to unlock backup backing image") + } + }() backupOperation.UpdateBackupProgress(string(common.ProgressStateInProgress), 0, "", "") @@ -322,10 +330,14 @@ func RestoreBackingImageBackup(config *RestoreConfig, restoreOperation RestoreOp return err } - defer lock.Unlock() if err := lock.Lock(); err != nil { return err } + defer func() { + if unlockErr := lock.Unlock(); unlockErr != nil { + logrus.WithError(unlockErr).Warn("Failed to unlock restore") + } + }() backupBackingImage, err := loadBackingImageConfigInBackupStore(bsDriver, backingImageName) if err != nil { @@ -356,8 +368,16 @@ func RestoreBackingImageBackup(config *RestoreConfig, restoreOperation RestoreOp } go func() { - defer backingImageFile.Close() - defer lock.Unlock() + defer func() { + if closeErr := backingImageFile.Close(); closeErr != nil { + logrus.WithError(closeErr).Warn("Failed to close backing image file") + } + }() + defer func() { + if unlockErr := lock.Unlock(); unlockErr != nil { + logrus.WithError(unlockErr).Warn("Failed to unlock restore") + } + }() progress := &common.Progress{ TotalBlockCounts: int64(len(backupBackingImage.Blocks)), @@ -416,7 +436,7 @@ func checkBackingImageFile(backingImageFilePath string, backupBackingImage *Back // We want to truncate regular files, but not device if stat.Mode()&os.ModeType == 0 { if err := backingImageFile.Truncate(backupBackingImage.Size); err != nil { - errors.Wrapf(err, "failed to truncate backing image") + err = errors.Wrapf(err, "failed to truncate backing image") return nil, err } } @@ -519,7 +539,11 @@ func RemoveBackingImageBackup(backupURL string) (err error) { if err := lock.Lock(); err != nil { return err } - defer lock.Unlock() + defer func() { + if unlockErr := lock.Unlock(); unlockErr != nil { + logrus.WithError(unlockErr).Warn("Failed to unlock restore") + } + }() // If we fail to load the backup we still want to proceed with the deletion of the backup file backupBackingImage, err := loadBackingImageConfigInBackupStore(bsDriver, backingImageName) diff --git a/config.go b/config.go index a996ba0db..40d9305fe 100644 --- a/config.go +++ b/config.go @@ -220,7 +220,6 @@ func getVolumeNames(jobQueues *workerpool.WorkerPool, driver BackupStoreDriver) Payload: lv2Paths, Err: nil, } - return }) } @@ -258,7 +257,6 @@ func getVolumeNames(jobQueues *workerpool.WorkerPool, driver BackupStoreDriver) Payload: volumeNames, Err: nil, } - return }) } } diff --git a/deltablock.go b/deltablock.go index 7c2ace7be..a440ad610 100644 --- a/deltablock.go +++ b/deltablock.go @@ -138,7 +138,9 @@ func CreateDeltaBlockBackup(backupName string, config *DeltaBackupConfig) (isInc defer func() { if err != nil { log.WithError(err).Error("Failed to create delta block backup") - deltaOps.UpdateBackupStatus(snapshot.Name, volume.Name, string(types.ProgressStateError), 0, "", err.Error()) + if updateErr := deltaOps.UpdateBackupStatus(snapshot.Name, volume.Name, string(types.ProgressStateError), 0, "", err.Error()); updateErr != nil { + log.WithError(updateErr).Error("Failed to update backup status") + } } }() @@ -152,7 +154,11 @@ func CreateDeltaBlockBackup(backupName string, config *DeltaBackupConfig) (isInc return false, err } - defer lock.Unlock() + defer func() { + if unlockErr := lock.Unlock(); unlockErr != nil { + logrus.WithError(unlockErr).Warn("Failed to unlock") + } + }() if err := lock.Lock(); err != nil { return false, err } @@ -218,13 +224,17 @@ func CreateDeltaBlockBackup(backupName string, config *DeltaBackupConfig) (isInc delta, err := deltaOps.CompareSnapshot(snapshot.Name, backupRequest.getLastSnapshotName(), volume.Name) if err != nil { - deltaOps.CloseSnapshot(snapshot.Name, volume.Name) + if closeErr := deltaOps.CloseSnapshot(snapshot.Name, volume.Name); closeErr != nil { + err = errors.Wrapf(err, "during handling err %+v, close snapshot returns err %+v", err, closeErr) + } return backupRequest.isIncrementalBackup(), err } if delta.BlockSize != DEFAULT_BLOCK_SIZE { - deltaOps.CloseSnapshot(snapshot.Name, volume.Name) - return backupRequest.isIncrementalBackup(), - fmt.Errorf("driver doesn't support block sizes other than %v", DEFAULT_BLOCK_SIZE) + err = fmt.Errorf("driver doesn't support block sizes other than %v", DEFAULT_BLOCK_SIZE) + if closeErr := deltaOps.CloseSnapshot(snapshot.Name, volume.Name); closeErr != nil { + err = errors.Wrapf(err, "during handling err %+v, close snapshot returns err %+v", err, closeErr) + } + return backupRequest.isIncrementalBackup(), err } log.WithFields(logrus.Fields{ LogFieldReason: LogReasonComplete, @@ -258,22 +268,38 @@ func CreateDeltaBlockBackup(backupName string, config *DeltaBackupConfig) (isInc // keep lock alive for async go routine. if err := lock.Lock(); err != nil { - deltaOps.CloseSnapshot(snapshot.Name, volume.Name) + if closeErr := deltaOps.CloseSnapshot(snapshot.Name, volume.Name); closeErr != nil { + err = errors.Wrapf(err, "during handling err %+v, close snapshot returns err %+v", err, closeErr) + } return backupRequest.isIncrementalBackup(), err } go func() { - defer deltaOps.CloseSnapshot(snapshot.Name, volume.Name) - defer lock.Unlock() + defer func() { + if closeErr := deltaOps.CloseSnapshot(snapshot.Name, volume.Name); closeErr != nil { + logrus.WithError(closeErr).Error("Failed to close snapshot") + } + }() + defer func() { + if unlockErr := lock.Unlock(); unlockErr != nil { + logrus.WithError(unlockErr).Warn("Failed to unlock") + } + }() - deltaOps.UpdateBackupStatus(snapshot.Name, volume.Name, string(types.ProgressStateInProgress), 0, "", "") + if updateErr := deltaOps.UpdateBackupStatus(snapshot.Name, volume.Name, string(types.ProgressStateInProgress), 0, "", ""); updateErr != nil { + logrus.WithError(updateErr).Error("Failed to update backup status") + } log.Info("Performing delta block backup") if progress, backup, err := performBackup(bsDriver, config, delta, deltaBackup, backupRequest.lastBackup); err != nil { logrus.WithError(err).Errorf("Failed to perform backup for volume %v snapshot %v", volume.Name, snapshot.Name) - deltaOps.UpdateBackupStatus(snapshot.Name, volume.Name, string(types.ProgressStateInProgress), progress, "", err.Error()) + if updateErr := deltaOps.UpdateBackupStatus(snapshot.Name, volume.Name, string(types.ProgressStateInProgress), progress, "", err.Error()); updateErr != nil { + logrus.WithError(updateErr).Error("Failed to update backup status") + } } else { - deltaOps.UpdateBackupStatus(snapshot.Name, volume.Name, string(types.ProgressStateInProgress), progress, backup, "") + if updateErr := deltaOps.UpdateBackupStatus(snapshot.Name, volume.Name, string(types.ProgressStateInProgress), progress, backup, ""); updateErr != nil { + logrus.WithError(updateErr).Error("Failed to update backup status") + } } }() return backupRequest.isIncrementalBackup(), nil @@ -369,7 +395,9 @@ func backupBlock(bsDriver BackupStoreDriver, config *DeltaBackupConfig, deltaBackup.Lock() defer deltaBackup.Unlock() updateBlocksAndProgress(deltaBackup, progress, checksum, newBlock) - deltaOps.UpdateBackupStatus(snapshot.Name, volume.Name, string(types.ProgressStateInProgress), progress.progress, "", "") + if updateErr := deltaOps.UpdateBackupStatus(snapshot.Name, volume.Name, string(types.ProgressStateInProgress), progress.progress, "", ""); updateErr != nil { + logrus.WithError(updateErr).Error("Failed to update backup status") + } }() blkFile := getBlockFilePath(volume.Name, checksum) @@ -640,7 +668,11 @@ func RestoreDeltaBlockBackup(ctx context.Context, config *DeltaRestoreConfig) er return err } - defer lock.Unlock() + defer func() { + if unlockErr := lock.Unlock(); unlockErr != nil { + logrus.WithError(unlockErr).Warn("Failed to unlock") + } + }() if err := lock.Lock(); err != nil { return err } @@ -699,7 +731,9 @@ func RestoreDeltaBlockBackup(ctx context.Context, config *DeltaRestoreConfig) er defer func() { _ = deltaOps.CloseVolumeDev(volDev) deltaOps.UpdateRestoreStatus(volDevName, currentProgress, err) - lock.Unlock() + if unlockErr := lock.Unlock(); unlockErr != nil { + logrus.WithError(unlockErr).Warn("Failed to unlock") + } }() progress := &progress{ @@ -791,7 +825,11 @@ func RestoreDeltaBlockBackupIncrementally(ctx context.Context, config *DeltaRest if err := lock.Lock(); err != nil { return err } - defer lock.Unlock() + defer func() { + if unlockErr := lock.Unlock(); unlockErr != nil { + logrus.WithError(unlockErr).Warn("Failed to unlock") + } + }() vol, err := loadVolume(bsDriver, srcVolumeName) if err != nil { @@ -858,7 +896,11 @@ func RestoreDeltaBlockBackupIncrementally(ctx context.Context, config *DeltaRest } go func() { defer volDev.Close() - defer lock.Unlock() + defer func() { + if unlockErr := lock.Unlock(); unlockErr != nil { + logrus.WithError(err).Warn("Failed to unlock") + } + }() // This pre-truncate is to ensure the XFS speculatively // preallocates post-EOF blocks get reclaimed when volDev is @@ -1079,7 +1121,11 @@ func DeleteBackupVolume(volumeName string, destURL string) error { if err := lock.Lock(); err != nil { return err } - defer lock.Unlock() + defer func() { + if unlockErr := lock.Unlock(); unlockErr != nil { + logrus.WithError(unlockErr).Warn("Failed to unlock") + } + }() return removeVolume(volumeName, bsDriver) } @@ -1100,7 +1146,7 @@ func checkBlockReferenceCount(blockInfos map[string]*BlockInfo, backup *Backup, func getLatestBackup(backup *Backup, lastBackup *Backup) error { if lastBackup.SnapshotCreatedAt == "" { // FIXME - go lint points out that this copies a potentially locked sync.mutex - *lastBackup = *backup + *lastBackup = *backup // nolint:govet return nil } @@ -1116,7 +1162,7 @@ func getLatestBackup(backup *Backup, lastBackup *Backup) error { if backupTime.After(lastBackupTime) { // FIXME - go lint points out that this copies a potentially locked sync.mutex - *lastBackup = *backup + *lastBackup = *backup // nolint:govet } return nil @@ -1144,7 +1190,11 @@ func DeleteDeltaBlockBackup(backupURL string) error { if err := lock.Lock(); err != nil { return err } - defer lock.Unlock() + defer func() { + if unlockErr := lock.Unlock(); unlockErr != nil { + logrus.WithError(unlockErr).Warn("Failed to unlock") + } + }() // If we fail to load the backup we still want to proceed with the deletion of the backup file backupToBeDeleted, err := loadBackup(bsDriver, backupName, volumeName) diff --git a/fsops/fsops.go b/fsops/fsops.go index 157ff9db1..cbae35059 100644 --- a/fsops/fsops.go +++ b/fsops/fsops.go @@ -127,7 +127,9 @@ func (f *FileSystemOperator) List(path string) ([]string, error) { func (f *FileSystemOperator) Upload(src, dst string) error { tmpDst := dst + ".tmp" + "." + strconv.FormatInt(time.Now().UTC().UnixNano(), 10) if f.FileExists(tmpDst) { - f.Remove(tmpDst) + if err := f.Remove(tmpDst); err != nil { + return err + } } if err := f.preparePath(dst); err != nil { return err diff --git a/list_inspect_test.go b/list_inspect_test.go index 1cd567765..5c683478c 100644 --- a/list_inspect_test.go +++ b/list_inspect_test.go @@ -27,15 +27,15 @@ func (m *mockStoreDriver) Init() { m.fs = afero.NewMemMapFs() m.destURL = mockDriverURL - RegisterDriver(mockDriverName, func(destURL string) (BackupStoreDriver, error) { - m.fs.MkdirAll(filepath.Join(backupstoreBase, VOLUME_DIRECTORY), 0755) + RegisterDriver(mockDriverName, func(destURL string) (BackupStoreDriver, error) { // nolint:errcheck + m.fs.MkdirAll(filepath.Join(backupstoreBase, VOLUME_DIRECTORY), 0755) // nolint:errcheck return m, nil }) } func (m *mockStoreDriver) uninstall() { - m.fs.RemoveAll("/") - unregisterDriver(mockDriverName) + m.fs.RemoveAll("/") // nolint:errcheck + unregisterDriver(mockDriverName) // nolint:errcheck } func (m *mockStoreDriver) Kind() string { @@ -124,11 +124,14 @@ func TestListBackupVolumeNames(t *testing.T) { assert.Equal(0, len(volumeInfo)) // create pvc-1 folder and config - m.fs.MkdirAll(getVolumePath("pvc-1"), 0755) - afero.WriteFile(m.fs, getVolumeFilePath("pvc-1"), []byte(`{"Name":"pvc-1"}`), 0644) + err = m.fs.MkdirAll(getVolumePath("pvc-1"), 0755) + assert.NoError(err) + err = afero.WriteFile(m.fs, getVolumeFilePath("pvc-1"), []byte(`{"Name":"pvc-1"}`), 0644) + assert.NoError(err) // create pvc-2 folder without config - m.fs.MkdirAll(getVolumePath("pvc-2"), 0755) + err = m.fs.MkdirAll(getVolumePath("pvc-2"), 0755) + assert.NoError(err) // list backup volume names volumeInfo, err = List("", mockDriverURL, true) @@ -146,7 +149,8 @@ func TestListBackupVolumeBackups(t *testing.T) { defer m.uninstall() // create pvc-1 folder - m.fs.MkdirAll(getVolumePath("pvc-1"), 0755) + err := m.fs.MkdirAll(getVolumePath("pvc-1"), 0755) + assert.NoError(err) // list pvc-1 without config volumeInfo, err := List("pvc-1", mockDriverURL, false) @@ -154,16 +158,19 @@ func TestListBackupVolumeBackups(t *testing.T) { assert.Equal(1, len(volumeInfo["pvc-1"].Messages)) // create pvc-1 config - afero.WriteFile(m.fs, getVolumeFilePath("pvc-1"), []byte(`{"Name":"pvc-1"}`), 0644) + err = afero.WriteFile(m.fs, getVolumeFilePath("pvc-1"), []byte(`{"Name":"pvc-1"}`), 0644) + assert.NoError(err) // create backups folder - m.fs.MkdirAll(getBackupPath("pvc-1"), 0755) + err = m.fs.MkdirAll(getBackupPath("pvc-1"), 0755) + assert.NoError(err) // create 100 backups config for i := 1; i <= 100; i++ { backup := fmt.Sprintf("backup-%d", i) - afero.WriteFile(m.fs, getBackupConfigPath(backup, "pvc-1"), + err = afero.WriteFile(m.fs, getBackupConfigPath(backup, "pvc-1"), []byte(fmt.Sprintf(`{"Name":"%s","CreatedTime":"%s"}`, backup, time.Now().String())), 0644) + assert.NoError(err) } volumeInfo, err = List("pvc-1", mockDriverURL, false) @@ -181,7 +188,8 @@ func TestInspectVolume(t *testing.T) { defer m.uninstall() // create pvc-1 folder and config - m.fs.MkdirAll(getVolumePath("pvc-1"), 0755) + err := m.fs.MkdirAll(getVolumePath("pvc-1"), 0755) + assert.NoError(err) volumeURL := EncodeBackupURL("", "pvc-1", mockDriverURL) volumeInfo, err := InspectVolume(volumeURL) @@ -189,8 +197,9 @@ func TestInspectVolume(t *testing.T) { assert.Nil(volumeInfo) // create pvc-1 config - afero.WriteFile(m.fs, getVolumeFilePath("pvc-1"), + err = afero.WriteFile(m.fs, getVolumeFilePath("pvc-1"), []byte(`{"Name":"pvc-1","Size":"2147483648","CreatedTime":"2021-05-12T00:52:01Z","LastBackupName":"backup-3","LastBackupAt":"2021-05-17T05:31:01Z"}`), 0644) + assert.NoError(err) // inspect backup volume config volumeURL = EncodeBackupURL("", "pvc-1", mockDriverURL) @@ -211,7 +220,8 @@ func TestInspectBackup(t *testing.T) { defer m.uninstall() // create pvc-1 folder and config - m.fs.MkdirAll(getVolumePath("pvc-1"), 0755) + err := m.fs.MkdirAll(getVolumePath("pvc-1"), 0755) + assert.NoError(err) backupURL := EncodeBackupURL("backup-1", "pvc-1", mockDriverURL) backupInfo, err := InspectBackup(backupURL) @@ -219,28 +229,33 @@ func TestInspectBackup(t *testing.T) { assert.Nil(backupInfo) // create pvc-1 config - afero.WriteFile(m.fs, getVolumeFilePath("pvc-1"), + err = afero.WriteFile(m.fs, getVolumeFilePath("pvc-1"), []byte(`{"Name":"pvc-1","Size":"2147483648","CreatedTime":"2021-05-12T00:52:01Z","LastBackupName":"backup-3","LastBackupAt":"2021-05-17T05:31:01Z"}`), 0644) + assert.NoError(err) // create backups folder - m.fs.MkdirAll(getBackupPath("pvc-1"), 0755) + err = m.fs.MkdirAll(getBackupPath("pvc-1"), 0755) + assert.NoError(err) // inspect an invalid backup-1 config - afero.WriteFile(m.fs, getBackupConfigPath("backup-1", "pvc-1"), []byte(""), 0644) + err = afero.WriteFile(m.fs, getBackupConfigPath("backup-1", "pvc-1"), []byte(""), 0644) + assert.NoError(err) backupInfo, err = InspectBackup(backupURL) assert.Error(err) assert.Nil(backupInfo) // create a in progress backup-1 config - afero.WriteFile(m.fs, getBackupConfigPath("backup-1", "pvc-1"), + err = afero.WriteFile(m.fs, getBackupConfigPath("backup-1", "pvc-1"), []byte(`{"Name":"backup-1"}`), 0644) + assert.Error(err) backupInfo, err = InspectBackup(backupURL) assert.Error(err) assert.Nil(backupInfo) // create a valid backup-1 config - afero.WriteFile(m.fs, getBackupConfigPath("backup-1", "pvc-1"), + err = afero.WriteFile(m.fs, getBackupConfigPath("backup-1", "pvc-1"), []byte(`{"Name":"backup-1","VolumeName":"pvc-1","Size":"115343360","SnapshotName":"1eb35e75-73d8-4e8c-9761-3df6ec35ba9a","SnapshotCreatedAt":"2021-06-07T08:57:23Z","CreatedTime":"2021-06-07T08:57:25Z","Size":"115343360"}`), 0644) + assert.NoError(err) // inspect backup-1 config backupInfo, err = InspectBackup(backupURL) @@ -261,12 +276,15 @@ func BenchmarkBackupVolumeNames10ms32volumes(b *testing.B) { // create 32 backup volumes for i := 1; i <= 32; i++ { pvc := fmt.Sprintf("pvc-%d", i) - m.fs.MkdirAll(getVolumePath(pvc), 0755) - afero.WriteFile(m.fs, getVolumeFilePath(pvc), []byte(fmt.Sprintf(`{"Name":%s}`, pvc)), 0644) + err := m.fs.MkdirAll(getVolumePath(pvc), 0755) + assert.NoError(b, err) + err = afero.WriteFile(m.fs, getVolumeFilePath(pvc), []byte(fmt.Sprintf(`{"Name":%s}`, pvc)), 0644) + assert.NoError(b, err) } for i := 0; i < b.N; i++ { - List("", mockDriverURL, true) + _, err := List("", mockDriverURL, true) + assert.NoError(b, err) } } @@ -278,12 +296,15 @@ func BenchmarkBackupVolumeNames100ms32volumes(b *testing.B) { // create 32 backup volumes for i := 1; i <= 32; i++ { pvc := fmt.Sprintf("pvc-%d", i) - m.fs.MkdirAll(getVolumePath(pvc), 0755) - afero.WriteFile(m.fs, getVolumeFilePath(pvc), []byte(fmt.Sprintf(`{"Name":%s}`, pvc)), 0644) + err := m.fs.MkdirAll(getVolumePath(pvc), 0755) + assert.NoError(b, err) + err = afero.WriteFile(m.fs, getVolumeFilePath(pvc), []byte(fmt.Sprintf(`{"Name":%s}`, pvc)), 0644) + assert.NoError(b, err) } for i := 0; i < b.N; i++ { - List("", mockDriverURL, true) + _, err := List("", mockDriverURL, true) + assert.NoError(b, err) } } @@ -295,12 +316,15 @@ func BenchmarkBackupVolumeNames250ms32volumes(b *testing.B) { // create 32 backup volumes for i := 1; i <= 32; i++ { pvc := fmt.Sprintf("pvc-%d", i) - m.fs.MkdirAll(getVolumePath(pvc), 0755) - afero.WriteFile(m.fs, getVolumeFilePath(pvc), []byte(fmt.Sprintf(`{"Name":%s}`, pvc)), 0644) + err := m.fs.MkdirAll(getVolumePath(pvc), 0755) + assert.NoError(b, err) + err = afero.WriteFile(m.fs, getVolumeFilePath(pvc), []byte(fmt.Sprintf(`{"Name":%s}`, pvc)), 0644) + assert.NoError(b, err) } for i := 0; i < b.N; i++ { - List("", mockDriverURL, true) + _, err := List("", mockDriverURL, true) + assert.NoError(b, err) } } @@ -312,12 +336,15 @@ func BenchmarkBackupVolumeNames500ms32volumes(b *testing.B) { // create 32 backup volumes for i := 1; i <= 32; i++ { pvc := fmt.Sprintf("pvc-%d", i) - m.fs.MkdirAll(getVolumePath(pvc), 0755) - afero.WriteFile(m.fs, getVolumeFilePath(pvc), []byte(fmt.Sprintf(`{"Name":%s}`, pvc)), 0644) + err := m.fs.MkdirAll(getVolumePath(pvc), 0755) + assert.NoError(b, err) + err = afero.WriteFile(m.fs, getVolumeFilePath(pvc), []byte(fmt.Sprintf(`{"Name":%s}`, pvc)), 0644) + assert.NoError(b, err) } for i := 0; i < b.N; i++ { - List("", mockDriverURL, true) + _, err := List("", mockDriverURL, true) + assert.NoError(b, err) } } @@ -327,18 +354,22 @@ func BenchmarkListBackupVolumeBackups10ms(b *testing.B) { defer m.uninstall() // create pvc-1 config - m.fs.MkdirAll(getBackupPath("pvc-1"), 0755) - afero.WriteFile(m.fs, getVolumeFilePath("pvc-1"), []byte(`{"Name":"pvc-1"}`), 0644) + err := m.fs.MkdirAll(getBackupPath("pvc-1"), 0755) + assert.NoError(b, err) + err = afero.WriteFile(m.fs, getVolumeFilePath("pvc-1"), []byte(`{"Name":"pvc-1"}`), 0644) + assert.NoError(b, err) // create 100 backups for i := 1; i <= 100; i++ { backup := fmt.Sprintf("backup-%d", i) - afero.WriteFile(m.fs, getBackupConfigPath(backup, "pvc-1"), + err = afero.WriteFile(m.fs, getBackupConfigPath(backup, "pvc-1"), []byte(fmt.Sprintf(`{"Name":"%s","CreatedTime":"%s"}`, backup, time.Now().String())), 0644) + assert.NoError(b, err) } for i := 0; i < b.N; i++ { - List("pvc-1", mockDriverURL, false) + _, err = List("pvc-1", mockDriverURL, false) + assert.NoError(b, err) } } @@ -348,18 +379,22 @@ func BenchmarkListBackupVolumeBackups100ms(b *testing.B) { defer m.uninstall() // create pvc-1 config - m.fs.MkdirAll(getBackupPath("pvc-1"), 0755) - afero.WriteFile(m.fs, getVolumeFilePath("pvc-1"), []byte(`{"Name":"pvc-1"}`), 0644) + err := m.fs.MkdirAll(getBackupPath("pvc-1"), 0755) + assert.NoError(b, err) + err = afero.WriteFile(m.fs, getVolumeFilePath("pvc-1"), []byte(`{"Name":"pvc-1"}`), 0644) + assert.NoError(b, err) // create 100 backups for i := 1; i <= 100; i++ { backup := fmt.Sprintf("backup-%d", i) - afero.WriteFile(m.fs, getBackupConfigPath(backup, "pvc-1"), + err = afero.WriteFile(m.fs, getBackupConfigPath(backup, "pvc-1"), []byte(fmt.Sprintf(`{"Name":"%s","CreatedTime":"%s"}`, backup, time.Now().String())), 0644) + assert.NoError(b, err) } for i := 0; i < b.N; i++ { - List("pvc-1", mockDriverURL, false) + _, err = List("pvc-1", mockDriverURL, false) + assert.NoError(b, err) } } @@ -369,18 +404,22 @@ func BenchmarkListBackupVolumeBackups250ms(b *testing.B) { defer m.uninstall() // create pvc-1 config - m.fs.MkdirAll(getBackupPath("pvc-1"), 0755) - afero.WriteFile(m.fs, getVolumeFilePath("pvc-1"), []byte(`{"Name":"pvc-1"}`), 0644) + err := m.fs.MkdirAll(getBackupPath("pvc-1"), 0755) + assert.NoError(b, err) + err = afero.WriteFile(m.fs, getVolumeFilePath("pvc-1"), []byte(`{"Name":"pvc-1"}`), 0644) + assert.NoError(b, err) // create 100 backups for i := 1; i <= 100; i++ { backup := fmt.Sprintf("backup-%d", i) - afero.WriteFile(m.fs, getBackupConfigPath(backup, "pvc-1"), + err = afero.WriteFile(m.fs, getBackupConfigPath(backup, "pvc-1"), []byte(fmt.Sprintf(`{"Name":"%s","CreatedTime":"%s"}`, backup, time.Now().String())), 0644) + assert.NoError(b, err) } for i := 0; i < b.N; i++ { - List("pvc-1", mockDriverURL, false) + _, err = List("pvc-1", mockDriverURL, false) + assert.NoError(b, err) } } @@ -390,17 +429,21 @@ func BenchmarkListBackupVolumeBackups500ms(b *testing.B) { defer m.uninstall() // create pvc-1 config - m.fs.MkdirAll(getBackupPath("pvc-1"), 0755) - afero.WriteFile(m.fs, getVolumeFilePath("pvc-1"), []byte(`{"Name":"pvc-1"}`), 0644) + err := m.fs.MkdirAll(getBackupPath("pvc-1"), 0755) + assert.NoError(b, err) + err = afero.WriteFile(m.fs, getVolumeFilePath("pvc-1"), []byte(`{"Name":"pvc-1"}`), 0644) + assert.NoError(b, err) // create 100 backups for i := 1; i <= 100; i++ { backup := fmt.Sprintf("backup-%d", i) - afero.WriteFile(m.fs, getBackupConfigPath(backup, "pvc-1"), + err = afero.WriteFile(m.fs, getBackupConfigPath(backup, "pvc-1"), []byte(fmt.Sprintf(`{"Name":"%s","CreatedTime":"%s"}`, backup, time.Now().String())), 0644) + assert.NoError(b, err) } for i := 0; i < b.N; i++ { - List("pvc-1", mockDriverURL, false) + _, err = List("pvc-1", mockDriverURL, false) + assert.NoError(b, err) } } diff --git a/scripts/validate b/scripts/validate index 3081c0ac1..5c83da063 100755 --- a/scripts/validate +++ b/scripts/validate @@ -10,20 +10,8 @@ PACKAGES="$(find -name '*.go' | xargs -I{} dirname {} | cut -f2 -d/ | sort -u | echo Running: go vet go vet ${PACKAGES} -if [ ! -z "${DRONE_REPO}" ] && [ ! -z "${DRONE_PULL_REQUEST}" ]; then - wget https://github.com/$DRONE_REPO/pull/$DRONE_PULL_REQUEST.patch - echo "Running: golangci-lint run --new-from-patch=${DRONE_PULL_REQUEST}.patch" - golangci-lint run --new-from-patch="${DRONE_PULL_REQUEST}.patch" - rm "${DRONE_PULL_REQUEST}.patch" -elif [ ! -z "${DRONE_COMMIT_REF}" ]; then - echo "Running: golangci-lint run --new-from-rev=${DRONE_COMMIT_REF}" - golangci-lint run --new-from-rev=${DRONE_COMMIT_REF} -else - git symbolic-ref -q HEAD && REV="origin/HEAD" || REV="HEAD^" - headSHA=$(git rev-parse --short=12 ${REV}) - echo "Running: golangci-lint run --new-from-rev=${headSHA}" - golangci-lint run --new-from-rev=${headSHA} -fi +echo "Running: golangci-lint" +golangci-lint run --timeout=5m echo Running: go fmt test -z "$(go fmt ${PACKAGES} | tee /dev/stderr)" diff --git a/test/backup_test.go b/test/backup_test.go index 4a7109f4c..93853a216 100644 --- a/test/backup_test.go +++ b/test/backup_test.go @@ -39,6 +39,7 @@ func Test(t *testing.T) { TestingT(t) } type TestSuite struct { BasePath string BackupStorePath string + randFunc *rand.Rand } var _ = Suite(&TestSuite{}) @@ -234,12 +235,12 @@ func (s *TestSuite) getSnapshotName(snapPrefix string, i int) string { func (s *TestSuite) randomChange(data []byte, offset, length int64) { for i := int64(0); i < length; i++ { - data[offset+i] = letterBytes[rand.Intn(len(letterBytes))] + data[offset+i] = letterBytes[s.randFunc.Intn(len(letterBytes))] } } func (s *TestSuite) SetUpSuite(c *C) { - rand.Seed(time.Now().UTC().UnixNano()) + s.randFunc = rand.New(rand.NewSource(time.Now().UnixNano())) dir, err := os.MkdirTemp("", "backupstore-test") c.Assert(err, IsNil) @@ -333,7 +334,7 @@ func (s *TestSuite) TestBackupBasic(c *C) { blockSize := int64(backupstore.DEFAULT_BLOCK_SIZE) for i := int64(0); i < blockSize; i++ { - data[i] = letterBytes[rand.Intn(len(letterBytes))] + data[i] = letterBytes[s.randFunc.Intn(len(letterBytes))] } for i := int64(1); i < volumeContentSize/blockSize; i++ { for j := int64(0); j < blockSize; j++ { @@ -467,8 +468,8 @@ func (s *TestSuite) TestBackupRestoreExtra(c *C) { dataEmpty := make([]byte, blockSize) dataModified := make([]byte, blockSize) for i := int64(0); i < blockSize; i++ { - data[i] = letterBytes[rand.Intn(len(letterBytes))] - dataModified[i] = letterBytes[rand.Intn(len(letterBytes))] + data[i] = letterBytes[s.randFunc.Intn(len(letterBytes))] + dataModified[i] = letterBytes[s.randFunc.Intn(len(letterBytes))] } volume := RawFileVolume{ diff --git a/util/util.go b/util/util.go index 394050110..ff291b22b 100644 --- a/util/util.go +++ b/util/util.go @@ -333,7 +333,7 @@ func EnsureMountPoint(Kind, mountPoint string, mounter mount.Interface, log logr } }() - notMounted, err := mount.IsNotMountPoint(mounter, mountPoint) + isMoundPoint, err := mounter.IsMountPoint(mountPoint) if err == fs.ErrNotExist { return false, nil } @@ -352,10 +352,10 @@ func EnsureMountPoint(Kind, mountPoint string, mounter mount.Interface, log logr if mntErr := cleanupMount(mountPoint, mounter, log); mntErr != nil { return true, errors.Wrapf(mntErr, "failed to clean up corrupted mount point %v", mountPoint) } - notMounted = true + isMoundPoint = false } - if notMounted { + if !isMoundPoint { return false, nil } @@ -400,36 +400,30 @@ func MountWithTimeout(mounter mount.Interface, source string, target string, fst // CleanUpMountPoints tries to clean up all existing mount points for existing backup stores func CleanUpMountPoints(mounter mount.Interface, log logrus.FieldLogger) error { - var errs error - - filepath.Walk(MountDir, func(path string, info os.FileInfo, err error) error { + return filepath.Walk(MountDir, func(path string, info os.FileInfo, err error) error { if err != nil { - errs = multierr.Append(errs, errors.Wrapf(err, "failed to get file info of %v", path)) - return nil + return errors.Wrapf(err, "failed to get file info of %v", path) } if !info.IsDir() { return nil } - notMounted, err := mount.IsNotMountPoint(mounter, path) + isMountPoint, err := mounter.IsMountPoint(path) if err != nil { - errs = multierr.Append(errs, errors.Wrapf(err, "failed to check if %s is not mounted", path)) - return nil + return errors.Wrapf(err, "failed to check if %s is not mounted", path) } - if notMounted { + if !isMountPoint { return nil } if err := cleanupMount(path, mounter, log); err != nil { - errs = multierr.Append(errs, errors.Wrapf(err, "failed to clean up mount point %v", path)) + return errors.Wrapf(err, "failed to clean up mount point %v", path) } return nil }) - - return errs } func CheckBackupType(backupTarget string) (string, error) {