main.rs
1 use std::fs; 2 use std::path::PathBuf; 3 4 use super::{ 5 authors, 6 http::ViaHTTP, 7 torrents::ViaTorrent, 8 scraping::core::ViaScraper, 9 }; 10 11 use crate::setup::paths::{make_fundamental_directories, Directories}; 12 13 14 pub fn get_author_root(author_name: &str) -> PathBuf { 15 16 let author_data_root: PathBuf = Directories::get().data.join(author_name); 17 if !author_data_root.exists() { 18 _ = fs::create_dir(&author_data_root); 19 } 20 21 author_data_root 22 } 23 24 25 pub fn retrieve_author(nickname: &str) -> Author<'_> { 26 27 let authors = authors::prepare_sources(); 28 let mut container: Vec<Author> = authors.into_iter().filter( |author| author.name.to_lowercase().contains(nickname) ).collect(); 29 container.pop().unwrap() // The pop function requires a mutabe reference to the target 30 } 31 32 33 #[derive(Default, Clone)] 34 pub struct Author <'a> { 35 pub name: String, 36 pub nickname: &'a str, 37 pub books_via_http: Option<Vec<ViaHTTP>>, 38 pub books_via_scraper: Option<Vec<ViaScraper<'a>>>, 39 pub books_via_torrent: Option<Vec<ViaTorrent>>, 40 } 41 42 43 impl Author <'_>{ 44 45 async fn download_via_http(&self) { 46 47 let http_books: Vec<ViaHTTP> = self.books_via_http.clone().unwrap(); 48 49 for book in http_books { 50 let file_name: String = book.get_file_name(); 51 let author_root: PathBuf = get_author_root(&self.name); 52 let file_path = author_root.join(file_name); 53 book.download(&file_path).await; 54 } 55 } 56 57 async fn download_via_scraper(&self) { 58 let books_to_scrape: &Vec<ViaScraper> = &self.books_via_scraper.clone().unwrap(); 59 60 for book in books_to_scrape { 61 book.download(&self.name).await; 62 } 63 } 64 65 async fn download_via_torrent(&self) { 66 let books_to_torrent: &Vec<ViaTorrent> = &self.books_via_torrent.clone().unwrap(); 67 68 for book in books_to_torrent { 69 let download_path: PathBuf = get_author_root(&self.name); 70 71 if book.must_torrent(&download_path, &self.name) { 72 log::warn!("Torrenting neccessary for {}", &self.name); 73 book.download(&download_path).await; 74 book.extract_files(download_path, &self.name); 75 } else { 76 log::info!("Torrenting is not necessary for {}", &self.name) 77 } 78 } 79 } 80 81 pub async fn download_books(&self) { 82 make_fundamental_directories(); 83 let http_books: &Option<Vec<ViaHTTP>> = &self.books_via_http; 84 let books_to_scrape: &Option<Vec<ViaScraper>> = &self.books_via_scraper; 85 let books_to_torrent: &Option<Vec<ViaTorrent>> = &self.books_via_torrent; 86 87 match (http_books, books_to_scrape, books_to_torrent) { 88 89 (None, None, None) => { 90 log::error!("{} has no books that can be acquired by HTTP, scraping, or torrenting", &self.name) 91 }, 92 93 (Some(_http_books), None, None) => { 94 self.download_via_http().await; 95 }, 96 97 (None, Some(_books_to_scrape), None) => { 98 self.download_via_scraper().await; 99 }, 100 101 (None, None, Some(_books_to_torrent)) => { 102 self.download_via_torrent().await; 103 }, 104 105 (Some(_http_books), Some(_books_to_scrape), None) => { 106 self.download_via_http().await; 107 self.download_via_scraper().await; 108 }, 109 110 (None, Some(_books_to_scrape), Some(_books_to_torrent)) => { 111 self.download_via_scraper().await; 112 self.download_via_torrent().await; 113 }, 114 115 (Some(_http_books), None, Some(_books_to_torrent)) => { 116 self.download_via_http().await; 117 self.download_via_scraper().await; 118 }, 119 120 (Some(_http_books), Some(_books_to_scrape), Some(_books_to_torrent)) => { 121 self.download_via_http().await; 122 self.download_via_scraper().await; 123 self.download_via_torrent().await; 124 } 125 } 126 } 127 } 128