/ cmd / keepsync / cmd / sync.go
sync.go
  1  // Package cmd implements the command-line interface for KeepSync
  2  package cmd
  3  
  4  import (
  5  	"context"
  6  	"fmt"
  7  	"os"
  8  	"path/filepath"
  9  	"strings"
 10  	"time"
 11  
 12  	"github.com/spf13/cobra"
 13  	"github.com/spf13/viper"
 14  
 15  	"keepSync/internal/providers"
 16  )
 17  
 18  // getConfig returns the configuration from viper as a map
 19  func getConfig() map[string]interface{} {
 20  	return viper.AllSettings()
 21  }
 22  
 23  // syncConfig holds configuration for advanced sync operations
 24  type syncConfig struct {
 25  	Bidirectional    bool
 26  	ConflictStrategy string
 27  	Parallel         bool
 28  	Workers          int
 29  	ChunkSize        int64
 30  	Recursive        bool
 31  	Delete           bool
 32  	Force            bool
 33  	IncludePattern   string
 34  	ExcludePattern   string
 35  }
 36  
 37  var (
 38  	// Sync command flags
 39  	syncRecursive      bool
 40  	syncDelete         bool
 41  	syncForce          bool
 42  	syncDryRun         bool
 43  	syncProviderType   string
 44  	syncEncrypt        bool
 45  	syncCompress       bool
 46  	syncIncludePattern string
 47  	syncExcludePattern string
 48  	syncTimeout        int
 49  	// TPM configuration flags
 50  	syncTPMDevice string
 51  	syncKeyName   string
 52  	// Advanced sync features
 53  	syncBidirectional    bool
 54  	syncConflictStrategy string
 55  	syncParallel         bool
 56  	syncWorkers          int
 57  	syncChunkSize        int64
 58  )
 59  
 60  // syncCmd represents the sync command
 61  var syncCmd = &cobra.Command{
 62  	Use:   "sync [local-path] [remote-path]",
 63  	Short: "Synchronize files between local and remote storage",
 64  	Long: `Synchronize files between local and remote storage with quantum encryption.
 65  	
 66  Examples:
 67    keepsync sync /path/to/local/dir s3://bucket/remote/dir
 68    keepsync sync /path/to/local/dir webdav://server/remote/dir
 69    keepsync sync /path/to/local/dir sftp://server/remote/dir`,
 70  	Args: cobra.ExactArgs(2),
 71  	RunE: func(cmd *cobra.Command, args []string) error {
 72  		localPath := args[0]
 73  		remotePath := args[1]
 74  
 75  		// Check if local path exists
 76  		if _, err := os.Stat(localPath); os.IsNotExist(err) {
 77  			return fmt.Errorf("local path does not exist: %s", localPath)
 78  		}
 79  
 80  		// Parse remote path to determine provider type and extract bucket/endpoint info
 81  		providerType, bucket, remotePath, err := parseRemotePathWithBucket(remotePath)
 82  		if err != nil {
 83  			return err
 84  		}
 85  
 86  		// Override provider type if specified
 87  		if syncProviderType != "" {
 88  			providerType = syncProviderType
 89  		}
 90  
 91  		logger.Info("Synchronizing %s to %s (bucket: %s) using quantum-enhanced %s provider", localPath, remotePath, bucket, providerType)
 92  
 93  		// Create context with timeout
 94  		ctx, cancel := context.WithTimeout(context.Background(), time.Duration(syncTimeout)*time.Second)
 95  		defer cancel()
 96  
 97  		// Set TPM defaults if not specified
 98  		tmpDevice := syncTPMDevice
 99  		if tmpDevice == "" {
100  			tmpDevice = "/dev/tpmrm0" // Default to hardware TPM resource manager
101  		}
102  		keyName := syncKeyName
103  		if keyName == "" {
104  			keyName = fmt.Sprintf("%s-quantum-key", providerType)
105  		}
106  
107  		// Load provider configuration from config file
108  		config := getConfig()
109  		var providerConfig map[string]interface{}
110  
111  		// Get provider-specific config from the config file
112  		if providers, ok := config["providers"].(map[string]interface{}); ok {
113  			if s3Config, ok := providers[providerType].(map[string]interface{}); ok {
114  				providerConfig = s3Config
115  			}
116  		}
117  
118  		if providerConfig == nil {
119  			return fmt.Errorf(`no configuration found for provider type: %s
120  
121  To fix this issue:
122  1. Set environment variables:
123     export S3_BUCKET=%s
124     export AWS_ACCESS_KEY_ID=your-access-key
125     export AWS_SECRET_ACCESS_KEY=your-secret-key
126     export S3_REGION=us-east-1
127  
128  2. Or create ~/.keepsync/config.json with provider configuration
129     See sample-config.json for example
130  
131  3. Then run: keepsync sync %s %s --verbose`, providerType, bucket, localPath, remotePath)
132  		}
133  
134  		// Create quantum-enhanced provider config with credentials from config file
135  		basicConfig := map[string]interface{}{
136  			"bucket":      bucket,     // Set the bucket name properly
137  			"remote_path": remotePath, // Path within the bucket
138  			"encrypt":     syncEncrypt,
139  			"compress":    syncCompress,
140  			"recursive":   syncRecursive,
141  			"delete":      syncDelete,
142  			"force":       syncForce,
143  			// TPM configuration
144  			"tpm_device": tmpDevice,
145  			"key_name":   keyName,
146  			// Chunking configuration - pass CLI chunk size to provider
147  			// For small chunks (< 1MB), pass as bytes directly to avoid truncation
148  			"chunk_size_bytes": syncChunkSize, // Pass bytes directly
149  			"enable_chunking":  true,
150  			// S3 credentials from config file
151  			"access_key":     providerConfig["access_key"],
152  			"secret_key":     providerConfig["secret_key"],
153  			"endpoint":       providerConfig["endpoint"],
154  			"region":         providerConfig["region"],
155  			"use_path_style": providerConfig["use_path_style"],
156  		}
157  
158  		logger.Info("TPM Configuration: device=%s, key=%s", tmpDevice, keyName)
159  
160  		// Create provider using dynamic quantum factory
161  		provider, err := providers.CreateProviderFromType(ctx, providerType, basicConfig)
162  		if err != nil {
163  			return fmt.Errorf("failed to create quantum provider %s: %w", providerType, err)
164  		}
165  		defer provider.Close()
166  
167  		// Perform sync
168  		if syncDryRun {
169  			logger.Info("Dry run - would sync %s to %s", localPath, remotePath)
170  
171  			// Show capabilities of the quantum provider
172  			factory := providers.GetDefaultFactory()
173  			caps := factory.GetProviderCapabilities(providerType)
174  			logger.Info("Provider capabilities: %v", caps)
175  			return nil
176  		}
177  
178  		// Implement advanced sync logic with bidirectional capabilities
179  		err = performAdvancedSyncFixed(ctx, provider, localPath, remotePath, &syncConfig{
180  			Bidirectional:    syncBidirectional,
181  			ConflictStrategy: syncConflictStrategy,
182  			Parallel:         syncParallel,
183  			Workers:          syncWorkers,
184  			ChunkSize:        syncChunkSize,
185  			Recursive:        syncRecursive,
186  			Delete:           syncDelete,
187  			Force:            syncForce,
188  			IncludePattern:   syncIncludePattern,
189  			ExcludePattern:   syncExcludePattern,
190  		})
191  		if err != nil {
192  			return fmt.Errorf("sync failed: %w", err)
193  		}
194  
195  		logger.Info("Advanced quantum sync completed successfully with provider: %s", providerType)
196  		logger.Info("Features: encryption=%t, compression=%t, bidirectional=%t, parallel=%t, workers=%d",
197  			syncEncrypt, syncCompress, syncBidirectional, syncParallel, syncWorkers)
198  
199  		return nil
200  	},
201  }
202  
203  // parseRemotePath parses a remote path to determine the provider type
204  func parseRemotePath(remotePath string) (string, string, error) {
205  	// Check for provider prefix
206  	if len(remotePath) > 5 && remotePath[0:5] == "s3://" {
207  		return "s3", remotePath[5:], nil
208  	}
209  
210  	if len(remotePath) > 9 && remotePath[0:9] == "webdav://" {
211  		return "webdav", remotePath[9:], nil
212  	}
213  
214  	if len(remotePath) > 7 && remotePath[0:7] == "sftp://" {
215  		return "sftp", remotePath[7:], nil
216  	}
217  
218  	// Default to s3 provider if no prefix
219  	return "s3", remotePath, nil
220  }
221  
222  // parseRemotePathWithBucket parses a remote path and extracts bucket/host and path separately
223  func parseRemotePathWithBucket(remotePath string) (string, string, string, error) {
224  	// Check for provider prefix
225  	if len(remotePath) > 5 && remotePath[0:5] == "s3://" {
226  		remainder := remotePath[5:]
227  		// Split bucket and path: s3://bucket/path -> bucket="bucket", path="path"
228  		parts := strings.SplitN(remainder, "/", 2)
229  		bucket := parts[0]
230  		path := ""
231  		if len(parts) > 1 {
232  			path = parts[1]
233  		}
234  		return "s3", bucket, path, nil
235  	}
236  
237  	if len(remotePath) > 9 && remotePath[0:9] == "webdav://" {
238  		remainder := remotePath[9:]
239  		// For WebDAV, the entire remainder is the endpoint
240  		return "webdav", remainder, "", nil
241  	}
242  
243  	if len(remotePath) > 7 && remotePath[0:7] == "sftp://" {
244  		remainder := remotePath[7:]
245  		// For SFTP, the entire remainder is the host/path
246  		return "sftp", remainder, "", nil
247  	}
248  
249  	// Default to s3 provider if no prefix
250  	return "s3", remotePath, "", nil
251  }
252  
253  // performAdvancedSync implements advanced synchronization with bidirectional support and conflict resolution
254  func performAdvancedSyncFixed(ctx context.Context, provider interface{}, localPath, remotePath string, config *syncConfig) error {
255  	logger.Info("Starting advanced sync: local=%s, remote=%s", localPath, remotePath)
256  	logger.Info("Config: bidirectional=%t, conflict=%s, parallel=%t, workers=%d, chunk_size=%d",
257  		config.Bidirectional, config.ConflictStrategy, config.Parallel, config.Workers, config.ChunkSize)
258  
259  	// Type assertion to QuantumStorageProvider
260  	quantumProvider, ok := provider.(providers.QuantumStorageProvider)
261  	if !ok {
262  		return fmt.Errorf("provider does not implement QuantumStorageProvider interface, type: %T", provider)
263  	}
264  
265  	// Phase 1: Discovery and comparison
266  	logger.Info("Phase 1: Discovering files and directories...")
267  
268  	// Discover local files
269  	localFiles, err := discoverLocalFiles(localPath, config)
270  	if err != nil {
271  		return fmt.Errorf("failed to discover local files: %w", err)
272  	}
273  	logger.Info("Found %d local files", len(localFiles))
274  
275  	// Discover remote files (this triggers incompatible file detection and cleanup)
276  	logger.Info("🔍 SYNC DEBUG: About to call discoverRemoteFiles with remotePath: '%s' (length: %d)", remotePath, len(remotePath))
277  	remoteFiles, err := discoverRemoteFiles(ctx, quantumProvider, remotePath, config)
278  	if err != nil {
279  		return fmt.Errorf("failed to discover remote files: %w", err)
280  	}
281  	logger.Info("Found %d remote files", len(remoteFiles))
282  
283  	// Phase 2: Sync planning and conflict detection
284  	logger.Info("Phase 2: Planning sync operations...")
285  
286  	// Create sync plan based on file differences
287  	syncPlan := createSyncPlan(localFiles, remoteFiles, config)
288  	logger.Info("Sync plan: %d uploads, %d downloads, %d deletions",
289  		len(syncPlan.UploadsNeeded), len(syncPlan.DownloadsNeeded), len(syncPlan.DeletionsNeeded))
290  
291  	// Phase 3: Execute sync operations
292  	logger.Info("Phase 3: Executing sync operations...")
293  
294  	// Upload local files to remote
295  	for _, localFile := range syncPlan.UploadsNeeded {
296  		localFilePath := filepath.Join(localPath, localFile)
297  		remoteFilePath := filepath.Join(remotePath, localFile)
298  		logger.Info("Uploading: %s -> %s", localFilePath, remoteFilePath)
299  		if err := quantumProvider.Upload(ctx, localFilePath, remoteFilePath); err != nil {
300  			logger.Warn("Failed to upload %s: %v", localFilePath, err)
301  			if !config.Force {
302  				return fmt.Errorf("upload failed: %w", err)
303  			}
304  		}
305  	}
306  
307  	// Download remote files to local (if bidirectional)
308  	if config.Bidirectional {
309  		for _, remoteFile := range syncPlan.DownloadsNeeded {
310  			// Extract the relative file path by removing the remote path prefix
311  			relativeFile := remoteFile
312  			if strings.HasPrefix(remoteFile, remotePath+"/") {
313  				relativeFile = strings.TrimPrefix(remoteFile, remotePath+"/")
314  			} else if remoteFile == remotePath {
315  				// Handle case where remoteFile equals remotePath exactly
316  				relativeFile = filepath.Base(remoteFile)
317  			}
318  
319  			localFilePath := filepath.Join(localPath, relativeFile)
320  			// Use the full remote file path as returned by List() - don't join with remotePath again
321  			remoteFilePath := remoteFile
322  			logger.Info("Downloading: %s -> %s", remoteFilePath, localFilePath)
323  			if err := quantumProvider.Download(ctx, remoteFilePath, localFilePath); err != nil {
324  				logger.Warn("Failed to download %s: %v", remoteFilePath, err)
325  				if !config.Force {
326  					return fmt.Errorf("download failed: %w", err)
327  				}
328  			}
329  		}
330  	}
331  
332  	// Delete remote files that no longer exist locally (if delete enabled)
333  	if config.Delete {
334  		for _, remoteFile := range syncPlan.DeletionsNeeded {
335  			logger.Info("Deleting remote file: %s", remoteFile)
336  			if err := quantumProvider.Delete(ctx, remoteFile); err != nil {
337  				logger.Warn("Failed to delete %s: %v", remoteFile, err)
338  				if !config.Force {
339  					return fmt.Errorf("deletion failed: %w", err)
340  				}
341  			}
342  		}
343  	}
344  
345  	// Phase 4: Verification
346  	logger.Info("Phase 4: Sync verification...")
347  
348  	// Re-list remote files to verify sync
349  	verifyRemoteFiles, err := quantumProvider.List(ctx, "")
350  	if err != nil {
351  		logger.Warn("Failed to verify remote files: %v", err)
352  	} else {
353  		logger.Info("Verification: %d remote files after sync", len(verifyRemoteFiles))
354  	}
355  
356  	logger.Info("Advanced sync completed successfully")
357  	return nil
358  }
359  
360  // SyncPlan represents the operations needed for synchronization
361  type SyncPlan struct {
362  	UploadsNeeded   []string // Local files that need to be uploaded
363  	DownloadsNeeded []string // Remote files that need to be downloaded
364  	DeletionsNeeded []string // Remote files that need to be deleted
365  }
366  
367  // createSyncPlan analyzes local and remote files to determine sync operations needed
368  func createSyncPlan(localFiles, remoteFiles []string, config *syncConfig) *SyncPlan {
369  	plan := &SyncPlan{
370  		UploadsNeeded:   []string{},
371  		DownloadsNeeded: []string{},
372  		DeletionsNeeded: []string{},
373  	}
374  
375  	// Create maps for efficient lookup
376  	localFileMap := make(map[string]bool)
377  	remoteFileMap := make(map[string]bool)
378  
379  	for _, file := range localFiles {
380  		localFileMap[file] = true
381  	}
382  	for _, file := range remoteFiles {
383  		remoteFileMap[file] = true
384  	}
385  
386  	// Determine if this is an initial sync scenario
387  	localEmpty := len(localFiles) == 0
388  	remoteEmpty := len(remoteFiles) == 0
389  
390  	logger.Info("Sync analysis: local=%d files, remote=%d files, localEmpty=%t, remoteEmpty=%t",
391  		len(localFiles), len(remoteFiles), localEmpty, remoteEmpty)
392  
393  	// Smart initial sync logic
394  	if localEmpty && !remoteEmpty {
395  		// Local is empty, remote has files -> download all remote files
396  		logger.Info("Initial sync: Local directory empty, downloading all remote files")
397  		for _, remoteFile := range remoteFiles {
398  			plan.DownloadsNeeded = append(plan.DownloadsNeeded, remoteFile)
399  		}
400  		return plan
401  	}
402  
403  	if !localEmpty && remoteEmpty {
404  		// Remote is empty, local has files -> upload all local files
405  		logger.Info("Initial sync: Remote directory empty, uploading all local files")
406  		for _, localFile := range localFiles {
407  			plan.UploadsNeeded = append(plan.UploadsNeeded, localFile)
408  		}
409  		return plan
410  	}
411  
412  	// Both have files or both are empty -> normal sync logic
413  	logger.Info("Normal sync: Analyzing file differences")
414  
415  	// Find files that need to be uploaded (exist locally but not remotely)
416  	for _, localFile := range localFiles {
417  		if !remoteFileMap[localFile] {
418  			plan.UploadsNeeded = append(plan.UploadsNeeded, localFile)
419  		}
420  	}
421  
422  	// Handle files that exist remotely but not locally
423  	for _, remoteFile := range remoteFiles {
424  		if !localFileMap[remoteFile] {
425  			// SMART DELETION LOGIC: Default behavior should be deletion for sync operations
426  			// This matches user expectations: when you delete a file locally, it should be deleted remotely
427  			if !localEmpty && !remoteEmpty {
428  				// Normal sync scenario: file exists remotely but not locally = deletion
429  				logger.Info("File deleted locally, will delete remotely: %s", remoteFile)
430  				plan.DeletionsNeeded = append(plan.DeletionsNeeded, remoteFile)
431  			} else if config.Bidirectional && !config.Delete {
432  				// Only download if explicitly bidirectional AND delete is disabled
433  				logger.Info("Bidirectional sync: File exists remotely but not locally, will download: %s", remoteFile)
434  				plan.DownloadsNeeded = append(plan.DownloadsNeeded, remoteFile)
435  			} else if config.Delete {
436  				// Explicit delete flag -> delete remotely
437  				logger.Info("Delete flag enabled: File deleted locally, will delete remotely: %s", remoteFile)
438  				plan.DeletionsNeeded = append(plan.DeletionsNeeded, remoteFile)
439  			}
440  			// Default: treat missing local files as deletions (matches user expectations)
441  		}
442  	}
443  
444  	return plan
445  }
446  
447  // discoverLocalFiles discovers files in the local directory
448  func discoverLocalFiles(localPath string, config *syncConfig) ([]string, error) {
449  	var files []string
450  
451  	if config.Recursive {
452  		// Recursively walk the directory
453  		err := filepath.Walk(localPath, func(path string, info os.FileInfo, err error) error {
454  			if err != nil {
455  				return err
456  			}
457  			if !info.IsDir() {
458  				// Convert to relative path
459  				relPath, err := filepath.Rel(localPath, path)
460  				if err != nil {
461  					return err
462  				}
463  				files = append(files, relPath)
464  			}
465  			return nil
466  		})
467  		if err != nil {
468  			return nil, fmt.Errorf("failed to walk directory %s: %w", localPath, err)
469  		}
470  	} else {
471  		// List files in directory only (non-recursive)
472  		entries, err := os.ReadDir(localPath)
473  		if err != nil {
474  			return nil, fmt.Errorf("failed to read directory %s: %w", localPath, err)
475  		}
476  		for _, entry := range entries {
477  			if !entry.IsDir() {
478  				files = append(files, entry.Name())
479  			}
480  		}
481  	}
482  
483  	return files, nil
484  }
485  
486  // discoverRemoteFiles discovers files on remote storage using the provider's List method
487  // This triggers incompatible file detection and cleanup in quantum providers
488  func discoverRemoteFiles(ctx context.Context, provider interface{}, remotePath string, config *syncConfig) ([]string, error) {
489  	logger.Info("🔍 DEBUG: discoverRemoteFiles function called with provider type: %T", provider)
490  	logger.Info("🔍 DEBUG: remotePath parameter - length: %d", len(remotePath))
491  	logger.Info("🔍 DEBUG: remotePath parameter - value: [%s]", remotePath)
492  	logger.Info("🔍 DEBUG: About to call quantumProvider.List(ctx, '%s')", remotePath)
493  
494  	// CRITICAL DEBUG: Log the exact parameter being passed to List
495  	logger.Info("🚨 CRITICAL: Calling List with remotePath='%s' (len=%d)", remotePath, len(remotePath))
496  
497  	// Type assertion to access the QuantumStorageProvider interface
498  	quantumProvider, ok := provider.(providers.QuantumStorageProvider)
499  	if !ok {
500  		logger.Warn("Provider does not implement QuantumStorageProvider interface, skipping remote discovery. Provider type: %T", provider)
501  		return []string{}, nil
502  	}
503  
504  	logger.Info("✅ Type assertion successful - provider implements QuantumStorageProvider")
505  	logger.Info("Calling provider List method to discover remote files and detect incompatible files...")
506  
507  	// Call the provider's List method - this will trigger incompatible file detection and cleanup
508  	// in quantum-enhanced providers like the S3 provider
509  	files, err := quantumProvider.List(ctx, remotePath)
510  	if err != nil {
511  		logger.Warn("Failed to list remote files: %v", err)
512  		return nil, fmt.Errorf("failed to list remote files: %w", err)
513  	}
514  
515  	logger.Info("Remote discovery completed, found %d files", len(files))
516  	return files, nil
517  }
518  
519  func init() {
520  	rootCmd.AddCommand(syncCmd)
521  
522  	// Add flags
523  	syncCmd.Flags().BoolVarP(&syncRecursive, "recursive", "r", true, "Recursively synchronize directories")
524  	syncCmd.Flags().BoolVarP(&syncDelete, "delete", "d", false, "Delete files in destination that don't exist in source")
525  	syncCmd.Flags().BoolVarP(&syncForce, "force", "f", false, "Force overwrite of existing files")
526  	syncCmd.Flags().BoolVar(&syncDryRun, "dry-run", false, "Show what would be done without actually doing it")
527  	syncCmd.Flags().StringVar(&syncProviderType, "provider", "", "Override provider type (s3, webdav, sftp)")
528  	syncCmd.Flags().BoolVar(&syncEncrypt, "encrypt", true, "Enable quantum encryption (default: true)")
529  	syncCmd.Flags().BoolVar(&syncCompress, "compress", true, "Enable compression (default: true)")
530  	syncCmd.Flags().StringVar(&syncIncludePattern, "include", "", "Include only files matching pattern")
531  	syncCmd.Flags().StringVar(&syncExcludePattern, "exclude", "", "Exclude files matching pattern")
532  	syncCmd.Flags().IntVar(&syncTimeout, "timeout", 3600, "Timeout in seconds")
533  	// TPM configuration flags
534  	syncCmd.Flags().StringVar(&syncTPMDevice, "tmp-device", "", "TPM device path (default: /dev/tpmrm0)")
535  	syncCmd.Flags().StringVar(&syncKeyName, "key-name", "", "TPM key name (default: provider-quantum-key)")
536  	// Advanced sync features
537  	syncCmd.Flags().BoolVar(&syncBidirectional, "bidirectional", false, "Enable bidirectional synchronization")
538  	syncCmd.Flags().StringVar(&syncConflictStrategy, "conflict", "timestamp", "Conflict resolution strategy (timestamp, size, manual, skip)")
539  	syncCmd.Flags().BoolVar(&syncParallel, "parallel", true, "Enable parallel file transfers (default: true)")
540  	syncCmd.Flags().IntVar(&syncWorkers, "workers", 4, "Number of parallel workers")
541  	syncCmd.Flags().Int64Var(&syncChunkSize, "chunk-size", 1048576, "Chunk size in bytes (default: 1MB)")
542  }
543  // createSyncPlanFixed analyzes local and remote files to determine sync operations needed
544  // This fixed version properly handles file deletions and modifications
545  func createSyncPlanFixed(localFiles, remoteFiles []string, config *syncConfig) *SyncPlan {
546  	plan := &SyncPlan{
547  		UploadsNeeded:   []string{},
548  		DownloadsNeeded: []string{},
549  		DeletionsNeeded: []string{},
550  	}
551  
552  	// Create maps for efficient lookup
553  	localFileMap := make(map[string]bool)
554  	remoteFileMap := make(map[string]bool)
555  
556  	for _, file := range localFiles {
557  		localFileMap[file] = true
558  	}
559  	for _, file := range remoteFiles {
560  		remoteFileMap[file] = true
561  	}
562  
563  	// Determine if this is an initial sync scenario
564  	localEmpty := len(localFiles) == 0
565  	remoteEmpty := len(remoteFiles) == 0
566  
567  	logger.Info("FIXED Sync analysis: local=%d files, remote=%d files, localEmpty=%t, remoteEmpty=%t",
568  		len(localFiles), len(remoteFiles), localEmpty, remoteEmpty)
569  
570  	// Smart initial sync logic
571  	if localEmpty && !remoteEmpty {
572  		// Local is empty, remote has files -> download all remote files (only if bidirectional)
573  		if config.Bidirectional {
574  			logger.Info("Initial sync: Local directory empty, downloading all remote files (bidirectional mode)")
575  			for _, remoteFile := range remoteFiles {
576  				plan.DownloadsNeeded = append(plan.DownloadsNeeded, remoteFile)
577  			}
578  		} else {
579  			logger.Info("Initial sync: Local directory empty, but not bidirectional - no downloads")
580  		}
581  		return plan
582  	}
583  
584  	if !localEmpty && remoteEmpty {
585  		// Remote is empty, local has files -> upload all local files
586  		logger.Info("Initial sync: Remote directory empty, uploading all local files")
587  		for _, localFile := range localFiles {
588  			plan.UploadsNeeded = append(plan.UploadsNeeded, localFile)
589  		}
590  		return plan
591  	}
592  
593  	// Both have files or both are empty -> normal sync logic with proper state tracking
594  	logger.Info("Normal sync: Analyzing file differences with FIXED logic")
595  
596  	// FIXED SYNC LOGIC: Handle all file states properly
597  
598  	// 1. Files that exist locally but not remotely -> UPLOAD
599  	for _, localFile := range localFiles {
600  		if !remoteFileMap[localFile] {
601  			logger.Info("New local file detected, will upload: %s", localFile)
602  			plan.UploadsNeeded = append(plan.UploadsNeeded, localFile)
603  		}
604  	}
605  
606  	// 2. Files that exist remotely but not locally -> DELETE or DOWNLOAD based on explicit flags only
607  	for _, remoteFile := range remoteFiles {
608  		if !localFileMap[remoteFile] {
609  			// CRITICAL FIX: Only delete if explicitly requested with --delete flag
610  			if config.Delete {
611  				// Explicit delete flag -> delete remotely
612  				logger.Info("Delete flag enabled: File deleted locally, will delete remotely: %s", remoteFile)
613  				plan.DeletionsNeeded = append(plan.DeletionsNeeded, remoteFile)
614  			} else if config.Bidirectional {
615  				// Bidirectional mode: download missing files
616  				logger.Info("Bidirectional sync: File exists remotely but not locally, will download: %s", remoteFile)
617  				plan.DownloadsNeeded = append(plan.DownloadsNeeded, remoteFile)
618  			} else {
619  				// Default unidirectional: ignore remote-only files (don't delete or download)
620  				logger.Info("Unidirectional sync: Ignoring remote-only file (no action): %s", remoteFile)
621  			}
622  		}
623  	}
624  
625  	// 3. Files that exist both locally and remotely -> CHECK FOR MODIFICATIONS
626  	// FIXED: Don't automatically upload all files that exist in both places
627  	// Instead, only upload if we detect actual changes (for now, we'll be conservative)
628  	for _, localFile := range localFiles {
629  		if remoteFileMap[localFile] {
630  			// File exists in both places - for now, assume local is authoritative
631  			// This can be enhanced with timestamp/checksum comparison later
632  			logger.Info("File exists both locally and remotely, will update remote: %s", localFile)
633  			plan.UploadsNeeded = append(plan.UploadsNeeded, localFile)
634  		}
635  	}
636  
637  	logger.Info("FIXED Sync plan created: %d uploads, %d downloads, %d deletions",
638  		len(plan.UploadsNeeded), len(plan.DownloadsNeeded), len(plan.DeletionsNeeded))
639  
640  	return plan
641  }