2020-05-02 16:33:45 +01:00
|
|
|
use async_std::fs::File;
|
|
|
|
use async_std::io::prelude::*;
|
2020-05-05 07:40:44 +01:00
|
|
|
use async_std::task;
|
2020-10-22 11:55:02 +01:00
|
|
|
use kuchiki::{traits::*, NodeRef};
|
2020-05-02 16:33:45 +01:00
|
|
|
use url::Url;
|
2020-05-01 14:17:59 +01:00
|
|
|
|
2020-10-22 11:55:02 +01:00
|
|
|
use crate::moz_readability::{MetaData, Readability};
|
2020-10-22 10:12:30 +01:00
|
|
|
|
2020-05-05 10:24:11 +01:00
|
|
|
pub type ResourceInfo = (String, Option<String>);
|
|
|
|
|
2020-05-01 14:17:59 +01:00
|
|
|
pub struct Extractor {
|
2020-10-22 11:55:02 +01:00
|
|
|
article: Option<NodeRef>,
|
2020-05-05 10:24:11 +01:00
|
|
|
pub img_urls: Vec<ResourceInfo>,
|
2020-10-22 10:12:30 +01:00
|
|
|
readability: Readability,
|
2020-05-01 14:17:59 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Extractor {
|
|
|
|
/// Create a new instance of an HTML extractor given an HTML string
|
|
|
|
pub fn from_html(html_str: &str) -> Self {
|
|
|
|
Extractor {
|
2020-10-22 11:55:02 +01:00
|
|
|
article: None,
|
2020-05-02 16:33:45 +01:00
|
|
|
img_urls: Vec::new(),
|
2020-10-22 10:12:30 +01:00
|
|
|
readability: Readability::new(html_str),
|
2020-05-01 14:17:59 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-02 12:51:53 +01:00
|
|
|
/// Locates and extracts the HTML in a document which is determined to be
|
|
|
|
/// the source of the content
|
2020-10-22 10:12:30 +01:00
|
|
|
pub fn extract_content(&mut self, url: &str) {
|
|
|
|
self.readability.parse(url);
|
2020-10-22 11:55:02 +01:00
|
|
|
if let Some(article_node_ref) = &self.readability.article_node {
|
|
|
|
let template = r#"
|
|
|
|
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:epub="http://www.idpf.org/2007/ops">
|
|
|
|
<head>
|
|
|
|
</head>
|
|
|
|
<body>
|
|
|
|
</body>
|
|
|
|
</html>
|
|
|
|
"#;
|
|
|
|
let doc = kuchiki::parse_html().one(template);
|
|
|
|
let body = doc.select_first("body").unwrap();
|
|
|
|
body.as_node().append(article_node_ref.clone());
|
|
|
|
self.article = Some(doc);
|
|
|
|
}
|
2020-05-02 12:51:53 +01:00
|
|
|
}
|
2020-05-01 14:17:59 +01:00
|
|
|
|
2020-05-02 16:33:45 +01:00
|
|
|
/// Traverses the DOM tree of the content and retrieves the IMG URLs
|
|
|
|
fn extract_img_urls(&mut self) {
|
2020-10-22 10:12:30 +01:00
|
|
|
if let Some(content_ref) = &self.readability.article_node {
|
|
|
|
for img_ref in content_ref.select("img").unwrap() {
|
2020-05-02 16:33:45 +01:00
|
|
|
img_ref.as_node().as_element().map(|img_elem| {
|
|
|
|
img_elem.attributes.borrow().get("src").map(|img_url| {
|
|
|
|
if !img_url.is_empty() {
|
2020-05-05 10:24:11 +01:00
|
|
|
self.img_urls.push((img_url.to_string(), None))
|
2020-05-02 16:33:45 +01:00
|
|
|
}
|
|
|
|
})
|
|
|
|
});
|
2020-05-01 14:17:59 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-05-02 16:33:45 +01:00
|
|
|
|
2020-05-05 10:29:08 +01:00
|
|
|
pub async fn download_images(&mut self, article_origin: &Url) -> async_std::io::Result<()> {
|
2020-05-05 07:40:44 +01:00
|
|
|
let mut async_download_tasks = Vec::with_capacity(self.img_urls.len());
|
2020-05-02 16:33:45 +01:00
|
|
|
self.extract_img_urls();
|
2020-11-23 06:39:56 +00:00
|
|
|
println!("Downloading images...");
|
2020-05-02 16:33:45 +01:00
|
|
|
for img_url in &self.img_urls {
|
2020-05-16 08:22:49 +01:00
|
|
|
let img_url = img_url.0.clone();
|
|
|
|
let abs_url = get_absolute_url(&img_url, article_origin);
|
2020-11-23 06:39:56 +00:00
|
|
|
|
2020-05-16 08:22:49 +01:00
|
|
|
async_download_tasks.push(task::spawn(async move {
|
|
|
|
let mut img_response = surf::get(&abs_url).await.expect("Unable to retrieve file");
|
2020-05-05 07:40:44 +01:00
|
|
|
let img_content: Vec<u8> = img_response.body_bytes().await.unwrap();
|
2020-05-05 10:24:11 +01:00
|
|
|
let img_mime = img_response
|
2020-11-23 06:39:56 +00:00
|
|
|
.content_type()
|
|
|
|
.map(|mime| mime.essence().to_string());
|
2020-05-05 07:40:44 +01:00
|
|
|
let img_ext = img_response
|
2020-11-23 06:39:56 +00:00
|
|
|
.content_type()
|
|
|
|
.map(|mime| map_mime_subtype_to_ext(mime.subtype()).to_string())
|
2020-05-05 07:40:44 +01:00
|
|
|
.unwrap();
|
2020-11-23 06:39:56 +00:00
|
|
|
let mut img_path = std::env::temp_dir();
|
|
|
|
img_path.push(format!("{}.{}", hash_url(&abs_url), &img_ext));
|
2020-05-05 07:40:44 +01:00
|
|
|
let mut img_file = File::create(&img_path)
|
|
|
|
.await
|
|
|
|
.expect("Unable to create file");
|
|
|
|
img_file
|
|
|
|
.write_all(&img_content)
|
|
|
|
.await
|
|
|
|
.expect("Unable to save to file");
|
2020-05-05 10:24:11 +01:00
|
|
|
|
2020-11-23 06:39:56 +00:00
|
|
|
(
|
|
|
|
img_url,
|
|
|
|
img_path
|
|
|
|
.file_name()
|
|
|
|
.map(|os_str_name| {
|
|
|
|
os_str_name
|
|
|
|
.to_str()
|
|
|
|
.expect("Unable to get image file name")
|
|
|
|
.to_string()
|
|
|
|
})
|
|
|
|
.unwrap(),
|
|
|
|
img_mime,
|
|
|
|
)
|
2020-05-05 07:40:44 +01:00
|
|
|
}));
|
|
|
|
}
|
2020-05-02 16:33:45 +01:00
|
|
|
|
2020-05-05 10:24:11 +01:00
|
|
|
self.img_urls.clear();
|
|
|
|
|
2020-05-05 07:40:44 +01:00
|
|
|
for async_task in async_download_tasks {
|
2020-05-05 10:24:11 +01:00
|
|
|
let (img_url, img_path, img_mime) = async_task.await;
|
2020-05-05 07:40:44 +01:00
|
|
|
// Update the image sources
|
2020-05-02 17:06:03 +01:00
|
|
|
let img_ref = self
|
2020-10-22 10:12:30 +01:00
|
|
|
.readability
|
|
|
|
.article_node
|
2020-05-02 17:06:03 +01:00
|
|
|
.as_mut()
|
|
|
|
.expect("Unable to get mutable ref")
|
|
|
|
.select_first(&format!("img[src='{}']", img_url))
|
|
|
|
.expect("Image node does not exist");
|
|
|
|
let mut img_node = img_ref.attributes.borrow_mut();
|
2020-05-05 10:24:11 +01:00
|
|
|
*img_node.get_mut("src").unwrap() = img_path.clone();
|
|
|
|
self.img_urls.push((img_path, img_mime));
|
2020-05-02 16:33:45 +01:00
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
2020-05-05 07:40:44 +01:00
|
|
|
|
2020-10-22 10:12:30 +01:00
|
|
|
pub fn article(&self) -> Option<&NodeRef> {
|
2020-10-22 11:55:02 +01:00
|
|
|
self.article.as_ref()
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn metadata(&self) -> &MetaData {
|
|
|
|
&self.readability.metadata
|
2020-10-22 10:12:30 +01:00
|
|
|
}
|
2020-05-01 14:17:59 +01:00
|
|
|
}
|
|
|
|
|
2020-05-02 16:33:45 +01:00
|
|
|
/// Utility for hashing URLs. This is used to help store files locally with unique values
|
|
|
|
fn hash_url(url: &str) -> String {
|
|
|
|
format!("{:x}", md5::compute(url.as_bytes()))
|
|
|
|
}
|
|
|
|
|
2020-11-23 06:39:56 +00:00
|
|
|
/// Handles getting the extension from a given MIME subtype.
|
|
|
|
fn map_mime_subtype_to_ext(subtype: &str) -> &str {
|
|
|
|
if subtype == ("svg+xml") {
|
|
|
|
return "svg";
|
|
|
|
} else if subtype == "x-icon" {
|
|
|
|
"ico"
|
|
|
|
} else {
|
|
|
|
subtype
|
|
|
|
}
|
2020-05-02 16:33:45 +01:00
|
|
|
}
|
|
|
|
|
2020-05-16 08:22:49 +01:00
|
|
|
fn get_absolute_url(url: &str, request_url: &Url) -> String {
|
2020-05-02 16:33:45 +01:00
|
|
|
if Url::parse(url).is_ok() {
|
2020-05-16 08:22:49 +01:00
|
|
|
url.to_owned()
|
2020-05-02 16:33:45 +01:00
|
|
|
} else if url.starts_with("/") {
|
2020-05-16 08:22:49 +01:00
|
|
|
Url::parse(&format!(
|
2020-05-02 16:33:45 +01:00
|
|
|
"{}://{}",
|
|
|
|
request_url.scheme(),
|
|
|
|
request_url.host_str().unwrap()
|
|
|
|
))
|
|
|
|
.unwrap()
|
|
|
|
.join(url)
|
|
|
|
.unwrap()
|
2020-05-16 08:22:49 +01:00
|
|
|
.into_string()
|
2020-05-02 16:33:45 +01:00
|
|
|
} else {
|
2020-05-16 08:22:49 +01:00
|
|
|
request_url.join(url).unwrap().into_string()
|
2020-05-02 16:33:45 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-01 14:17:59 +01:00
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
|
|
|
use super::*;
|
|
|
|
const TEST_HTML: &'static str = r#"
|
|
|
|
<!doctype html>
|
|
|
|
<html lang="en">
|
|
|
|
<head>
|
|
|
|
<meta charset="utf-8">
|
|
|
|
<meta name="description" content="A sample document">
|
|
|
|
<meta name="keywords" content="test,Rust">
|
|
|
|
<meta name="author" content="Paperoni">
|
|
|
|
<title>Testing Paperoni</title>
|
|
|
|
</head>
|
|
|
|
<body>
|
|
|
|
<header>
|
|
|
|
<!-- Unimportant information -->
|
|
|
|
<h1>Testing Paperoni</h1>
|
|
|
|
</header>
|
|
|
|
<article>
|
|
|
|
<h1>Starting out</h1>
|
|
|
|
<p>Some Lorem Ipsum text here</p>
|
|
|
|
<p>Observe this picture</p>
|
2020-05-05 10:29:08 +01:00
|
|
|
<img src="./img.jpg" alt="Random image">
|
2020-05-01 14:17:59 +01:00
|
|
|
</article>
|
|
|
|
<footer>
|
|
|
|
<p>Made in HTML</p>
|
|
|
|
</footer>
|
|
|
|
</body>
|
|
|
|
</html>
|
|
|
|
"#;
|
|
|
|
|
2020-05-02 16:33:45 +01:00
|
|
|
#[test]
|
|
|
|
fn test_extract_img_urls() {
|
|
|
|
let mut extractor = Extractor::from_html(TEST_HTML);
|
2020-10-22 10:12:30 +01:00
|
|
|
extractor.extract_content("http://example.com/");
|
2020-05-02 16:33:45 +01:00
|
|
|
extractor.extract_img_urls();
|
|
|
|
|
|
|
|
assert!(extractor.img_urls.len() > 0);
|
2020-10-22 10:12:30 +01:00
|
|
|
assert_eq!(
|
|
|
|
vec![("http://example.com/img.jpg".to_string(), None)],
|
|
|
|
extractor.img_urls
|
|
|
|
);
|
2020-05-02 16:33:45 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_map_mime_type_to_ext() {
|
2020-11-23 06:39:56 +00:00
|
|
|
let mime_subtypes = vec![
|
|
|
|
"apng", "bmp", "gif", "x-icon", "jpeg", "png", "svg+xml", "tiff", "webp",
|
2020-05-02 16:33:45 +01:00
|
|
|
];
|
2020-11-23 06:39:56 +00:00
|
|
|
let exts = mime_subtypes
|
2020-05-02 16:33:45 +01:00
|
|
|
.into_iter()
|
2020-11-23 06:39:56 +00:00
|
|
|
.map(|mime_type| map_mime_subtype_to_ext(mime_type))
|
2020-05-02 16:33:45 +01:00
|
|
|
.collect::<Vec<_>>();
|
|
|
|
assert_eq!(
|
2020-11-23 06:39:56 +00:00
|
|
|
vec!["apng", "bmp", "gif", "ico", "jpeg", "png", "svg", "tiff", "webp"],
|
2020-05-02 16:33:45 +01:00
|
|
|
exts
|
|
|
|
);
|
|
|
|
}
|
2020-05-01 14:17:59 +01:00
|
|
|
}
|