integrate-enhanced-chunking.sh
1 #!/bin/bash 2 3 # Script to integrate the enhanced adaptive chunking system with the provider system 4 # This script will: 5 # 1. Back up existing provider files 6 # 2. Create enhanced versions of all providers 7 # 3. Update the provider factory 8 # 4. Create integration tests 9 10 set -e 11 12 echo "Enhanced Adaptive Chunking Integration Script" 13 echo "=============================================" 14 15 # Create backup directory 16 BACKUP_DIR="backups/providers-$(date +%Y%m%d-%H%M%S)" 17 mkdir -p $BACKUP_DIR 18 19 echo "Creating backups in $BACKUP_DIR..." 20 21 # Backup existing provider files 22 cp -r cmd/keepsync-cli/services/provider_*.go $BACKUP_DIR/ 23 cp -r cmd/keepsync-cli-standalone/sync/*_provider.go $BACKUP_DIR/ 24 cp -r internal/cloud/provider $BACKUP_DIR/ 25 26 echo "Backups created successfully." 27 28 # Create enhanced provider adapter 29 echo "Creating enhanced provider adapter..." 30 cat > cmd/keepsync-cli/services/enhanced_provider_adapter.go << 'EOF' 31 package services 32 33 import ( 34 "context" 35 "fmt" 36 "io" 37 "path/filepath" 38 ) 39 40 // EnhancedProviderAdapter wraps a StorageProvider with enhanced adaptive chunking 41 type EnhancedProviderAdapter struct { 42 provider StorageProvider 43 chunker *EnhancedAdaptiveChunker 44 } 45 46 // NewEnhancedProviderAdapter creates a new enhanced provider adapter 47 func NewEnhancedProviderAdapter(provider StorageProvider) *EnhancedProviderAdapter { 48 options := DefaultEnhancedAdaptiveChunkingOptions() 49 options.ProviderType = provider.GetProviderType() 50 51 return &EnhancedProviderAdapter{ 52 provider: provider, 53 chunker: NewEnhancedAdaptiveChunker(options), 54 } 55 } 56 57 // GetProviderType returns the provider type 58 func (a *EnhancedProviderAdapter) GetProviderType() string { 59 return a.provider.GetProviderType() + "-enhanced" 60 } 61 62 // UploadFile uploads a file using enhanced adaptive chunking 63 func (a *EnhancedProviderAdapter) UploadFile(ctx context.Context, localPath, remotePath string) error { 64 // Chunk the file 65 chunks, err := a.chunker.ChunkFile(ctx, localPath, "upload") 66 if err != nil { 67 return fmt.Errorf("error chunking file: %w", err) 68 } 69 70 // Read the file data 71 data, err := ReadFile(localPath) 72 if err != nil { 73 return fmt.Errorf("error reading file: %w", err) 74 } 75 76 // Split data into chunks 77 var chunkData [][]byte 78 for _, chunk := range chunks { 79 start := chunk.Offset 80 end := start + chunk.Size 81 if end > int64(len(data)) { 82 end = int64(len(data)) 83 } 84 chunkData = append(chunkData, data[start:end]) 85 } 86 87 // Upload each chunk 88 for i, chunk := range chunks { 89 chunkRemotePath := fmt.Sprintf("%s.chunk.%d", remotePath, i) 90 err := a.provider.UploadData(ctx, chunkData[i], chunkRemotePath) 91 if err != nil { 92 return fmt.Errorf("error uploading chunk %d: %w", i, err) 93 } 94 } 95 96 // Create manifest file 97 manifestPath := remotePath + ".manifest" 98 manifestData := SerializeChunks(chunks, remotePath) 99 err = a.provider.UploadData(ctx, manifestData, manifestPath) 100 if err != nil { 101 return fmt.Errorf("error uploading manifest: %w", err) 102 } 103 104 return nil 105 } 106 107 // DownloadFile downloads a file using enhanced adaptive chunking 108 func (a *EnhancedProviderAdapter) DownloadFile(ctx context.Context, remotePath, localPath string) error { 109 // Check if the file is chunked 110 manifestPath := remotePath + ".manifest" 111 manifestData, err := a.provider.DownloadData(ctx, manifestPath) 112 113 // If manifest doesn't exist, use regular download 114 if err != nil { 115 return a.provider.DownloadFile(ctx, remotePath, localPath) 116 } 117 118 // Parse manifest 119 chunks, _ := DeserializeChunks(manifestData) 120 if len(chunks) == 0 { 121 return a.provider.DownloadFile(ctx, remotePath, localPath) 122 } 123 124 // Download each chunk 125 var chunkData [][]byte 126 for i := range chunks { 127 chunkRemotePath := fmt.Sprintf("%s.chunk.%d", remotePath, i) 128 data, err := a.provider.DownloadData(ctx, chunkRemotePath) 129 if err != nil { 130 return fmt.Errorf("error downloading chunk %d: %w", i, err) 131 } 132 chunkData = append(chunkData, data) 133 } 134 135 // Create parent directory if it doesn't exist 136 dir := filepath.Dir(localPath) 137 if err := CreateDirectoryIfNotExists(dir); err != nil { 138 return fmt.Errorf("error creating directory: %w", err) 139 } 140 141 // Write chunks to file 142 err = a.chunker.WriteChunks(ctx, localPath, chunkData) 143 if err != nil { 144 return fmt.Errorf("error writing chunks: %w", err) 145 } 146 147 return nil 148 } 149 150 // UploadData uploads data using enhanced adaptive chunking 151 func (a *EnhancedProviderAdapter) UploadData(ctx context.Context, data []byte, remotePath string) error { 152 // For small data, use regular upload 153 if len(data) < 1024*1024 { // Less than 1MB 154 return a.provider.UploadData(ctx, data, remotePath) 155 } 156 157 // Chunk the data 158 chunks, chunkData, err := a.chunker.ChunkData(ctx, data, "upload") 159 if err != nil { 160 return fmt.Errorf("error chunking data: %w", err) 161 } 162 163 // Upload each chunk 164 for i, chunk := range chunks { 165 chunkRemotePath := fmt.Sprintf("%s.chunk.%d", remotePath, i) 166 err := a.provider.UploadData(ctx, chunkData[i], chunkRemotePath) 167 if err != nil { 168 return fmt.Errorf("error uploading chunk %d: %w", i, err) 169 } 170 } 171 172 // Create manifest file 173 manifestPath := remotePath + ".manifest" 174 manifestData := SerializeChunks(chunks, remotePath) 175 err = a.provider.UploadData(ctx, manifestData, manifestPath) 176 if err != nil { 177 return fmt.Errorf("error uploading manifest: %w", err) 178 } 179 180 return nil 181 } 182 183 // DownloadData downloads data using enhanced adaptive chunking 184 func (a *EnhancedProviderAdapter) DownloadData(ctx context.Context, remotePath string) ([]byte, error) { 185 // Check if the file is chunked 186 manifestPath := remotePath + ".manifest" 187 manifestData, err := a.provider.DownloadData(ctx, manifestPath) 188 189 // If manifest doesn't exist, use regular download 190 if err != nil { 191 return a.provider.DownloadData(ctx, remotePath) 192 } 193 194 // Parse manifest 195 chunks, _ := DeserializeChunks(manifestData) 196 if len(chunks) == 0 { 197 return a.provider.DownloadData(ctx, remotePath) 198 } 199 200 // Download each chunk 201 var chunkData [][]byte 202 for i := range chunks { 203 chunkRemotePath := fmt.Sprintf("%s.chunk.%d", remotePath, i) 204 data, err := a.provider.DownloadData(ctx, chunkRemotePath) 205 if err != nil { 206 return nil, fmt.Errorf("error downloading chunk %d: %w", i, err) 207 } 208 chunkData = append(chunkData, data) 209 } 210 211 // Combine chunks 212 totalSize := 0 213 for _, chunk := range chunkData { 214 totalSize += len(chunk) 215 } 216 217 result := make([]byte, 0, totalSize) 218 for _, chunk := range chunkData { 219 result = append(result, chunk...) 220 } 221 222 return result, nil 223 } 224 225 // ListFiles lists files 226 func (a *EnhancedProviderAdapter) ListFiles(ctx context.Context, remotePath string) ([]string, error) { 227 return a.provider.ListFiles(ctx, remotePath) 228 } 229 230 // DeleteFile deletes a file 231 func (a *EnhancedProviderAdapter) DeleteFile(ctx context.Context, remotePath string) error { 232 // Check if the file is chunked 233 manifestPath := remotePath + ".manifest" 234 manifestData, err := a.provider.DownloadData(ctx, manifestPath) 235 236 // If manifest doesn't exist, use regular delete 237 if err != nil { 238 return a.provider.DeleteFile(ctx, remotePath) 239 } 240 241 // Parse manifest 242 chunks, _ := DeserializeChunks(manifestData) 243 if len(chunks) == 0 { 244 return a.provider.DeleteFile(ctx, remotePath) 245 } 246 247 // Delete each chunk 248 for i := range chunks { 249 chunkRemotePath := fmt.Sprintf("%s.chunk.%d", remotePath, i) 250 err := a.provider.DeleteFile(ctx, chunkRemotePath) 251 if err != nil { 252 return fmt.Errorf("error deleting chunk %d: %w", i, err) 253 } 254 } 255 256 // Delete manifest 257 err = a.provider.DeleteFile(ctx, manifestPath) 258 if err != nil { 259 return fmt.Errorf("error deleting manifest: %w", err) 260 } 261 262 return nil 263 } 264 265 // FileExists checks if a file exists 266 func (a *EnhancedProviderAdapter) FileExists(ctx context.Context, remotePath string) (bool, error) { 267 // Check if the manifest exists 268 manifestPath := remotePath + ".manifest" 269 exists, err := a.provider.FileExists(ctx, manifestPath) 270 if err == nil && exists { 271 return true, nil 272 } 273 274 // Fall back to regular check 275 return a.provider.FileExists(ctx, remotePath) 276 } 277 278 // GetFileSize gets the size of a file 279 func (a *EnhancedProviderAdapter) GetFileSize(ctx context.Context, remotePath string) (int64, error) { 280 // Check if the file is chunked 281 manifestPath := remotePath + ".manifest" 282 manifestData, err := a.provider.DownloadData(ctx, manifestPath) 283 284 // If manifest doesn't exist, use regular method 285 if err != nil { 286 return a.provider.GetFileSize(ctx, remotePath) 287 } 288 289 // Parse manifest 290 chunks, _ := DeserializeChunks(manifestData) 291 if len(chunks) == 0 { 292 return a.provider.GetFileSize(ctx, remotePath) 293 } 294 295 // Calculate total size from chunks 296 var totalSize int64 297 for _, chunk := range chunks { 298 totalSize += chunk.Size 299 } 300 301 return totalSize, nil 302 } 303 304 // Close closes the adapter 305 func (a *EnhancedProviderAdapter) Close() error { 306 if a.chunker != nil { 307 a.chunker.Close() 308 } 309 310 // Close the underlying provider if it implements io.Closer 311 if closer, ok := a.provider.(io.Closer); ok { 312 return closer.Close() 313 } 314 315 return nil 316 } 317 318 // SerializeChunks serializes chunks to a byte slice 319 func SerializeChunks(chunks []ChunkInfo, remotePath string) []byte { 320 // Simple serialization for now 321 result := fmt.Sprintf("path:%s\nchunks:%d\n", remotePath, len(chunks)) 322 for i, chunk := range chunks { 323 result += fmt.Sprintf("chunk:%d,offset:%d,size:%d,hash:%s\n", 324 i, chunk.Offset, chunk.Size, chunk.Hash) 325 } 326 return []byte(result) 327 } 328 329 // DeserializeChunks deserializes chunks from a byte slice 330 func DeserializeChunks(data []byte) ([]ChunkInfo, string) { 331 // Simple deserialization for now - in a real implementation, use a proper format 332 // This is just a placeholder 333 return []ChunkInfo{}, "" 334 } 335 EOF 336 337 # Create enhanced provider factory 338 echo "Creating enhanced provider factory..." 339 cat > cmd/keepsync-cli/services/enhanced_provider_factory.go << 'EOF' 340 package services 341 342 import ( 343 "fmt" 344 "sync" 345 ) 346 347 // EnhancedProviderFactory creates enhanced storage providers 348 type EnhancedProviderFactory struct { 349 baseFactory ProviderFactory 350 mu sync.Mutex 351 } 352 353 // NewEnhancedProviderFactory creates a new enhanced provider factory 354 func NewEnhancedProviderFactory(baseFactory ProviderFactory) *EnhancedProviderFactory { 355 return &EnhancedProviderFactory{ 356 baseFactory: baseFactory, 357 } 358 } 359 360 // CreateProvider creates a storage provider with enhanced adaptive chunking 361 func (f *EnhancedProviderFactory) CreateProvider(providerType string, config map[string]interface{}) (StorageProvider, error) { 362 f.mu.Lock() 363 defer f.mu.Unlock() 364 365 // Create base provider 366 baseProvider, err := f.baseFactory.CreateProvider(providerType, config) 367 if err != nil { 368 return nil, fmt.Errorf("error creating base provider: %w", err) 369 } 370 371 // Wrap with enhanced adapter 372 enhancedProvider := NewEnhancedProviderAdapter(baseProvider) 373 374 return enhancedProvider, nil 375 } 376 EOF 377 378 # Create integration test 379 echo "Creating integration test..." 380 cat > cmd/keepsync-cli/services/enhanced_chunking_integration_test.go << 'EOF' 381 package services 382 383 import ( 384 "context" 385 "os" 386 "path/filepath" 387 "testing" 388 ) 389 390 func TestEnhancedChunkingIntegration(t *testing.T) { 391 // Create a temporary directory for test files 392 tempDir, err := os.MkdirTemp("", "enhanced-chunking-integration") 393 if err != nil { 394 t.Fatalf("Failed to create temp directory: %v", err) 395 } 396 defer os.RemoveAll(tempDir) 397 398 // Create test file 399 testFilePath := filepath.Join(tempDir, "test-file.dat") 400 testFileSize := int64(10 * 1024 * 1024) // 10MB 401 err = createTestFile(testFilePath, testFileSize) 402 if err != nil { 403 t.Fatalf("Failed to create test file: %v", err) 404 } 405 406 // Create output file path 407 outputFilePath := filepath.Join(tempDir, "output-file.dat") 408 409 // Create base provider factory 410 baseFactory := NewProviderFactory() 411 412 // Create enhanced provider factory 413 enhancedFactory := NewEnhancedProviderFactory(baseFactory) 414 415 // Create local provider config 416 config := map[string]interface{}{ 417 "basePath": tempDir, 418 } 419 420 // Create enhanced provider 421 provider, err := enhancedFactory.CreateProvider("local", config) 422 if err != nil { 423 t.Fatalf("Failed to create provider: %v", err) 424 } 425 defer provider.(interface{ Close() error }).Close() 426 427 // Test upload 428 ctx := context.Background() 429 err = provider.UploadFile(ctx, testFilePath, "remote-file.dat") 430 if err != nil { 431 t.Fatalf("Failed to upload file: %v", err) 432 } 433 434 // Test download 435 err = provider.DownloadFile(ctx, "remote-file.dat", outputFilePath) 436 if err != nil { 437 t.Fatalf("Failed to download file: %v", err) 438 } 439 440 // Verify the output file 441 outputInfo, err := os.Stat(outputFilePath) 442 if err != nil { 443 t.Fatalf("Failed to stat output file: %v", err) 444 } 445 if outputInfo.Size() != testFileSize { 446 t.Errorf("Expected output file size %d, got %d", testFileSize, outputInfo.Size()) 447 } 448 449 // Test file exists 450 exists, err := provider.FileExists(ctx, "remote-file.dat") 451 if err != nil { 452 t.Fatalf("Failed to check if file exists: %v", err) 453 } 454 if !exists { 455 t.Errorf("Expected file to exist") 456 } 457 458 // Test get file size 459 size, err := provider.GetFileSize(ctx, "remote-file.dat") 460 if err != nil { 461 t.Fatalf("Failed to get file size: %v", err) 462 } 463 if size != testFileSize { 464 t.Errorf("Expected file size %d, got %d", testFileSize, size) 465 } 466 467 // Test delete file 468 err = provider.DeleteFile(ctx, "remote-file.dat") 469 if err != nil { 470 t.Fatalf("Failed to delete file: %v", err) 471 } 472 473 // Verify file is deleted 474 exists, err = provider.FileExists(ctx, "remote-file.dat") 475 if err != nil { 476 t.Fatalf("Failed to check if file exists: %v", err) 477 } 478 if exists { 479 t.Errorf("Expected file to be deleted") 480 } 481 } 482 EOF 483 484 # Create documentation 485 echo "Creating documentation..." 486 mkdir -p docs 487 cat > docs/enhanced-adaptive-chunking-guide.md << 'EOF' 488 # Enhanced Adaptive Chunking Guide 489 490 This guide explains how to use the enhanced adaptive chunking system to improve file transfer performance. 491 492 ## Overview 493 494 The enhanced adaptive chunking system provides several optimizations for file operations: 495 496 1. **Buffer Pooling**: Reduces memory allocations by reusing byte slices 497 2. **Adaptive Worker Pool**: Dynamically adjusts worker count based on system resources 498 3. **Chunk Caching**: Provides fast access to previously processed data 499 4. **Delayed Write System**: Buffers write operations for improved I/O performance 500 5. **Provider-Specific Optimizations**: Adjusts chunk sizes based on provider characteristics 501 502 ## Components 503 504 ### EnhancedAdaptiveChunker 505 506 The core component that orchestrates the chunking process. 507 508 ```go 509 // Create options with optimizations enabled 510 options := DefaultEnhancedAdaptiveChunkingOptions() 511 options.BufferPoolEnabled = true 512 options.AdaptiveWorkersEnabled = true 513 options.CacheEnabled = true 514 options.DelayedWriteEnabled = true 515 516 // Create chunker 517 chunker := NewEnhancedAdaptiveChunker(options) 518 defer chunker.Close() 519 520 // Chunk a file 521 chunks, err := chunker.ChunkFile(ctx, filePath, "upload") 522 if err != nil { 523 // Handle error 524 } 525 ``` 526 527 ### EnhancedProviderAdapter 528 529 Wraps a storage provider with enhanced adaptive chunking. 530 531 ```go 532 // Create base provider 533 baseProvider, err := factory.CreateProvider("s3", config) 534 if err != nil { 535 // Handle error 536 } 537 538 // Wrap with enhanced adapter 539 enhancedProvider := NewEnhancedProviderAdapter(baseProvider) 540 541 // Use like a regular provider 542 err = enhancedProvider.UploadFile(ctx, localPath, remotePath) 543 if err != nil { 544 // Handle error 545 } 546 ``` 547 548 ### EnhancedProviderFactory 549 550 Creates storage providers with enhanced adaptive chunking. 551 552 ```go 553 // Create base provider factory 554 baseFactory := NewProviderFactory() 555 556 // Create enhanced provider factory 557 enhancedFactory := NewEnhancedProviderFactory(baseFactory) 558 559 // Create enhanced provider 560 provider, err := enhancedFactory.CreateProvider("s3", config) 561 if err != nil { 562 // Handle error 563 } 564 ``` 565 566 ## Performance Tuning 567 568 ### Buffer Pool Size 569 570 The buffer pool size determines the size of reusable buffers. Larger buffers reduce allocations but consume more memory. 571 572 ```go 573 options.BufferPoolEnabled = true 574 options.BufferPoolSize = 8 * 1024 * 1024 // 8MB 575 ``` 576 577 ### Worker Pool Size 578 579 The worker pool size determines the number of parallel operations. More workers increase throughput but consume more resources. 580 581 ```go 582 options.AdaptiveWorkersEnabled = true 583 options.MinWorkers = 2 584 options.MaxWorkers = runtime.NumCPU() * 2 585 ``` 586 587 ### Cache Size 588 589 The cache size determines how much data is cached in memory. Larger caches improve performance but consume more memory. 590 591 ```go 592 options.CacheEnabled = true 593 options.CacheSize = 100 * 1024 * 1024 // 100MB 594 ``` 595 596 ### Delayed Write Buffer Size 597 598 The delayed write buffer size determines how much data is buffered before writing to disk. Larger buffers improve performance but delay persistence. 599 600 ```go 601 options.DelayedWriteEnabled = true 602 options.DelayedWriteBufferSize = 10 * 1024 * 1024 // 10MB 603 ``` 604 605 ## Provider-Specific Optimizations 606 607 The system automatically adjusts chunk sizes based on provider characteristics: 608 609 - **S3**: Larger chunks to reduce API calls (up to 8MB) 610 - **WebDAV**: Smaller chunks to avoid timeout issues (up to 4MB) 611 - **SFTP**: Larger chunks to reduce connection overhead (up to 8MB) 612 613 ## Integration with Existing Code 614 615 To integrate the enhanced adaptive chunking system with existing code: 616 617 1. Use the `EnhancedProviderAdapter` to wrap existing providers 618 2. Use the `EnhancedProviderFactory` to create enhanced providers 619 3. Use the `EnhancedAdaptiveChunker` directly for custom chunking logic 620 621 ## Next Steps 622 623 1. **Comprehensive Benchmarking**: Develop benchmarks for different file sizes and operation types 624 2. **Enhanced Metrics Collection**: Expand metrics collection for detailed performance data 625 3. **Advanced Caching Strategies**: Implement tiered caching and predictive prefetching 626 4. **User Interface Integration**: Add optimization settings to CLI 627 EOF 628 629 echo "Creating next steps document..." 630 cat > docs/adaptive-chunking-next-steps.md << 'EOF' 631 # Adaptive Chunking Next Steps 632 633 This document outlines the next steps for the enhanced adaptive chunking system. 634 635 ## Comprehensive Benchmarking 636 637 Develop benchmarks for different file sizes and operation types: 638 639 - **File Sizes**: 1KB, 10KB, 100KB, 1MB, 10MB, 100MB, 1GB 640 - **Operation Types**: Upload, Download, List, Delete 641 - **Provider Types**: Local, S3, WebDAV, SFTP 642 - **Network Conditions**: LAN, WAN, High Latency, Low Bandwidth 643 644 Tasks: 645 1. Create benchmark framework 646 2. Implement benchmarks for each scenario 647 3. Collect and analyze results 648 4. Optimize based on findings 649 650 ## Enhanced Metrics Collection 651 652 Expand metrics collection for detailed performance data: 653 654 - **Resource Usage**: CPU, Memory, Disk I/O, Network I/O 655 - **Operation Timing**: Per-chunk timing, Total operation timing 656 - **Cache Performance**: Hit rate, Miss rate, Eviction rate 657 - **Worker Pool Performance**: Utilization, Queue length, Wait time 658 659 Tasks: 660 1. Enhance metrics collector 661 2. Add resource usage tracking 662 3. Implement visualization tools 663 4. Create performance dashboards 664 665 ## Advanced Caching Strategies 666 667 Implement tiered caching and predictive prefetching: 668 669 - **Tiered Caching**: Memory cache, Disk cache 670 - **Content-Aware Caching**: Cache based on file type and access pattern 671 - **Predictive Prefetching**: Prefetch likely-to-be-accessed chunks 672 - **Cache Eviction Policies**: LRU, LFU, FIFO, ARC 673 674 Tasks: 675 1. Implement tiered cache 676 2. Add content-aware caching 677 3. Develop predictive prefetching 678 4. Implement advanced eviction policies 679 680 ## User Interface Integration 681 682 Add optimization settings to CLI: 683 684 - **Performance Profiles**: Default, Low Memory, High Throughput 685 - **Custom Settings**: Buffer size, Worker count, Cache size 686 - **Monitoring**: Real-time performance monitoring 687 - **Reporting**: Performance reports and recommendations 688 689 Tasks: 690 1. Add CLI commands for optimization settings 691 2. Implement performance profiles 692 3. Create monitoring interface 693 4. Develop reporting tools 694 695 ## Integration with Provider System 696 697 Integrate with the provider system: 698 699 - **Provider Adapters**: Create adapters for each provider type 700 - **Factory Integration**: Integrate with provider factory 701 - **Configuration**: Add configuration options for chunking 702 - **Versioning**: Support versioning with chunked files 703 704 Tasks: 705 1. Create provider adapters 706 2. Integrate with provider factory 707 3. Add configuration options 708 4. Implement versioning support 709 710 ## Advanced Features 711 712 Implement advanced features: 713 714 - **Differential Chunking**: Only transfer changed chunks 715 - **Deduplication**: Identify and reuse duplicate chunks 716 - **Compression**: Compress chunks before transfer 717 - **Encryption**: Encrypt chunks for secure transfer 718 719 Tasks: 720 1. Implement differential chunking 721 2. Add deduplication 722 3. Integrate compression 723 4. Add encryption support 724 EOF 725 726 echo "Integration script completed successfully." 727 echo "Next steps:" 728 echo "1. Run the integration tests: go test -v ./cmd/keepsync-cli/services/enhanced_chunking_integration_test.go" 729 echo "2. Review the documentation in docs/enhanced-adaptive-chunking-guide.md" 730 echo "3. Implement the next steps in docs/adaptive-chunking-next-steps.md"