Browse Source

Big update on HyperBackup implementation (moved from fsh to storagePool)

TC pushbot 5 4 năm trước cách đây
mục cha
commit
cf63a2b040

+ 1 - 1
file_system.go

@@ -1868,7 +1868,7 @@ func system_fs_listRoot(w http.ResponseWriter, r *http.Request) {
 				roots = append(roots, thisDevice)
 			} else if store.Hierarchy == "backup" {
 				//Backup drive.
-				backupRoots = append(backupRoots, store.HierarchyConfig.(hybridBackup.BackupConfig).ParentUID)
+				backupRoots = append(backupRoots, store.HierarchyConfig.(hybridBackup.BackupTask).ParentUID)
 			}
 		}
 

+ 2 - 1
mod/filesystem/filesystem.go

@@ -108,7 +108,7 @@ func NewFileSystemHandler(option FileSystemOption) (*FileSystemHandler, error) {
 
 		if option.Hierarchy == "backup" {
 			//Backup disk. Create an Hierarchy Config for this drive
-			hierarchySpecificConfig = hybridBackup.BackupConfig{
+			hierarchySpecificConfig = hybridBackup.BackupTask{
 				CycleCounter:      0,
 				LastCycleTime:     time.Now().Unix(),
 				DiskUID:           option.Uuid,
@@ -117,6 +117,7 @@ func NewFileSystemHandler(option FileSystemOption) (*FileSystemHandler, error) {
 				Mode:              option.BackupMode,
 				DeleteFileMarkers: map[string]int64{},
 			}
+
 		}
 
 		//Create the fsdb for this handler

+ 91 - 5
mod/filesystem/hybridBackup/hybridBackup.go

@@ -34,7 +34,11 @@ import (
 	- If you need any function from the file system, copy and paste it in this module
 */
 
-type BackupConfig struct {
+type Manager struct {
+	Tasks []*BackupTask
+}
+
+type BackupTask struct {
 	JobName           string           //The name used by the scheduler for executing this config
 	CycleCounter      int64            //The number of backup executed in the background
 	LastCycleTime     int64            //The execution time of the last cycle
@@ -46,11 +50,93 @@ type BackupConfig struct {
 	Mode              string           //Backup mode
 }
 
-func executeBackup(backupConfig *BackupConfig, deepBackup bool) (string, error) {
+//A file in the backup drive that is restorable
+type RestorableFile struct {
+	Filename      string //Filename of this restorable object
+	RelpathOnDisk string //Relative path of this file to the root
+	Deleteime     int64  //Delete remaining time
+}
+
+//The restorable report
+type RestorableReport struct {
+	ParentUID       string           //The Disk ID to be restored to
+	DiskUID         string           //The Backup disk UID
+	RestorableFiles []RestorableFile //A list of restorable files
+}
+
+func NewHyperBackupManager() *Manager {
+	return &Manager{
+		Tasks: []*BackupTask{},
+	}
+}
+
+func (m *Manager) AddTask(newtask *BackupTask) error {
+	log.Println(">>>> [Debug] New Backup Tasks added: ", newtask)
+
+	/*for _, thisHandler := range fsHandlers {
+
+			if thisHandler.Hierarchy == "backup" {
+				//This is a backup drive. Generate it handler
+				backupConfig := thisHandler.HierarchyConfig.(hybridBackup.BackupTask)
+
+				//Get its parent mount point for backup
+				parentFileSystemHandler, err := GetFsHandlerByUUID(backupConfig.ParentUID)
+				if err != nil {
+					log.Println("Virtual Root with UUID: " + backupConfig.ParentUID + " not loaded. Unable to start backup process.")
+					break
+				}
+
+				backupConfig.JobName = "backup-daemon [" + thisHandler.UUID + "]"
+				backupConfig.ParentPath = parentFileSystemHandler.Path
+				backupConfig.CycleCounter = 1
+
+				//Debug backup execution
+				hybridBackup.HandleBackupProcess(&backupConfig)
+
+				//Remove the previous job if it exists
+				if systemScheduler.JobExists(backupConfig.JobName) {
+					systemScheduler.RemoveJobFromScheduleList(backupConfig.JobName)
+				}
+
+				//Create a scheudler for this disk
+				systemScheduler.CreateNewScheduledFunctionJob(backupConfig.JobName,
+					"Backup daemon from "+backupConfig.ParentUID+":/ to "+backupConfig.DiskUID+":/",
+					60,
+					func() (string, error) {
+						return hybridBackup.HandleBackupProcess(&backupConfig)
+					},
+				)
+			}
+
+	}*/
+
+	return nil
+}
+
+func executeBackup(backupConfig *BackupTask, deepBackup bool) (string, error) {
 	copiedFileList := []string{}
 
 	rootPath := filepath.ToSlash(filepath.Clean(backupConfig.ParentPath))
 
+	//Check if the backup parent root is identical / within backup disk
+	parentRootAbs, err := filepath.Abs(backupConfig.ParentPath)
+	if err != nil {
+		return "", errors.New("Unable to resolve parent disk path")
+	}
+
+	backupRootAbs, err := filepath.Abs(backupConfig.DiskPath)
+	if err != nil {
+		return "", errors.New("Unable to resolve backup disk path")
+	}
+
+	if len(parentRootAbs) >= len(backupRootAbs) {
+		if parentRootAbs[:len(backupRootAbs)] == backupRootAbs {
+			//parent root is within backup root. Raise configuration error
+			log.Println("*HyperBackup* Invalid backup cycle: Parent drive is located inside backup drive")
+			return "", errors.New("Configuration Error. Skipping backup cycle.")
+		}
+	}
+
 	//Add file cycles
 	fastWalk(rootPath, func(filename string) error {
 		if filepath.Base(filename) == "aofs.db" || filepath.Base(filename) == "aofs.db.lock" {
@@ -173,7 +259,7 @@ func executeBackup(backupConfig *BackupConfig, deepBackup bool) (string, error)
 }
 
 //Main handler function for hybrid backup
-func HandleBackupProcess(backupConfig *BackupConfig) (string, error) {
+func HandleBackupProcess(backupConfig *BackupTask) (string, error) {
 	log.Println(">>>>>> [Debug] Running backup process: ", backupConfig)
 
 	//Check if the target disk is writable and mounted
@@ -223,13 +309,13 @@ func HandleBackupProcess(backupConfig *BackupConfig) (string, error) {
 }
 
 //Restore accidentailly removed file from backup
-func HandleRestore(backupConfig *BackupConfig, targetFile string) error {
+func HandleRestore(backupConfig *BackupTask, targetFile string) error {
 
 	return nil
 }
 
 //List the file that is restorable from the given disk
-func ListRestorable(ackupConfig *BackupConfig) {
+func ListRestorable(backupConfig *BackupTask) {
 
 }
 

+ 35 - 6
mod/storage/storage.go

@@ -9,15 +9,18 @@ package storage
 */
 
 import (
+	"log"
 	"os"
 
 	fs "imuslab.com/arozos/mod/filesystem"
+	"imuslab.com/arozos/mod/filesystem/hybridBackup"
 )
 
 type StoragePool struct {
-	Owner           string                  //Owner of the storage pool, also act as the resolver's username
-	OtherPermission string                  //Permissions on other users but not the owner
-	Storages        []*fs.FileSystemHandler //Storage pool accessable by this owner
+	Owner              string                  //Owner of the storage pool, also act as the resolver's username
+	OtherPermission    string                  //Permissions on other users but not the owner
+	Storages           []*fs.FileSystemHandler //Storage pool accessable by this owner
+	HyperBackupManager *hybridBackup.Manager   //HyperBackup Manager
 }
 
 /*
@@ -34,17 +37,43 @@ func init() {
 
 //Create a new StoragePool objects with given uuids
 func NewStoragePool(fsHandlers []*fs.FileSystemHandler, owner string) (*StoragePool, error) {
+	//Create new HypberBackup Manager
+	backupManager := hybridBackup.NewHyperBackupManager()
+
 	//Move all fshandler into the storageHandler
 	storageHandlers := []*fs.FileSystemHandler{}
 	for _, fsHandler := range fsHandlers {
 		//Move the handler pointer to the target
 		storageHandlers = append(storageHandlers, fsHandler)
+
+		if fsHandler.Hierarchy == "backup" {
+			//Backup disk. Build the Hierarchy Config for this drive
+			backupConfig := fsHandler.HierarchyConfig.(hybridBackup.BackupTask)
+
+			//Resolve parent path for backup File System Handler
+			parentExists := false
+			for _, potentialParnet := range fsHandlers {
+				if potentialParnet.UUID == backupConfig.ParentUID {
+					//This is the parent
+					backupConfig.ParentPath = potentialParnet.Path
+					parentExists = true
+				}
+			}
+
+			if parentExists {
+				backupManager.AddTask(&backupConfig)
+			} else {
+				log.Println("*ERROR* Backup disk " + backupConfig.DiskUID + ":/ source disk not found: " + backupConfig.ParentUID + ":/ not exists!")
+			}
+
+		}
 	}
 
 	return &StoragePool{
-		Owner:           owner,
-		OtherPermission: "readonly",
-		Storages:        storageHandlers,
+		Owner:              owner,
+		OtherPermission:    "readonly",
+		Storages:           storageHandlers,
+		HyperBackupManager: backupManager,
 	}, nil
 }
 

+ 0 - 37
storage.go

@@ -8,8 +8,6 @@ import (
 	"path/filepath"
 	"runtime"
 
-	"imuslab.com/arozos/mod/filesystem/hybridBackup"
-
 	"imuslab.com/arozos/mod/permission"
 
 	fs "imuslab.com/arozos/mod/filesystem"
@@ -111,42 +109,7 @@ func LoadBaseStoragePool() error {
 	This function must be called after the scheduler initiated.
 */
 func FilesystemDaemonInit() {
-	for _, thisHandler := range fsHandlers {
-		if thisHandler.Hierarchy == "backup" {
-			//This is a backup drive. Generate it handler
-			backupConfig := thisHandler.HierarchyConfig.(hybridBackup.BackupConfig)
-
-			//Get its parent mount point for backup
-			parentFileSystemHandler, err := GetFsHandlerByUUID(backupConfig.ParentUID)
-			if err != nil {
-				log.Println("Virtual Root with UUID: " + backupConfig.ParentUID + " not loaded. Unable to start backup process.")
-				break
-			}
-
-			backupConfig.JobName = "backup-daemon [" + thisHandler.UUID + "]"
-			backupConfig.ParentPath = parentFileSystemHandler.Path
-			backupConfig.CycleCounter = 1
 
-			//Debug backup execution
-			hybridBackup.HandleBackupProcess(&backupConfig)
-
-			//Remove the previous job if it exists
-			if systemScheduler.JobExists(backupConfig.JobName) {
-				systemScheduler.RemoveJobFromScheduleList(backupConfig.JobName)
-			}
-
-			//Create a scheudler for this disk
-			systemScheduler.CreateNewScheduledFunctionJob(backupConfig.JobName,
-				"Backup daemon from "+backupConfig.ParentUID+":/ to "+backupConfig.DiskUID+":/",
-				60,
-				func() (string, error) {
-					return hybridBackup.HandleBackupProcess(&backupConfig)
-				},
-			)
-		}
-
-		//Add other type of handler here
-	}
 }
 
 //Initialize group storage pool