2020-11-24 11:54:23 +00:00
|
|
|
use std::collections::HashMap;
|
|
|
|
|
2020-10-22 11:55:02 +01:00
|
|
|
use kuchiki::{traits::*, NodeRef};
|
2020-05-01 14:17:59 +01:00
|
|
|
|
2021-04-17 10:04:06 +01:00
|
|
|
use crate::errors::PaperoniError;
|
2020-10-22 11:55:02 +01:00
|
|
|
use crate::moz_readability::{MetaData, Readability};
|
2020-10-22 10:12:30 +01:00
|
|
|
|
2020-05-05 10:24:11 +01:00
|
|
|
pub type ResourceInfo = (String, Option<String>);
|
|
|
|
|
2020-11-24 11:54:23 +00:00
|
|
|
lazy_static! {
|
2020-12-24 09:16:30 +00:00
|
|
|
static ref ESC_SEQ_REGEX: regex::Regex = regex::Regex::new(r#"(&|<|>|'|")"#).unwrap();
|
2020-11-24 11:54:23 +00:00
|
|
|
}
|
|
|
|
|
2020-05-01 14:17:59 +01:00
|
|
|
pub struct Extractor {
|
2020-10-22 11:55:02 +01:00
|
|
|
article: Option<NodeRef>,
|
2020-05-05 10:24:11 +01:00
|
|
|
pub img_urls: Vec<ResourceInfo>,
|
2020-10-22 10:12:30 +01:00
|
|
|
readability: Readability,
|
2021-04-20 19:06:54 +01:00
|
|
|
pub url: String,
|
2020-05-01 14:17:59 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Extractor {
|
|
|
|
/// Create a new instance of an HTML extractor given an HTML string
|
2021-04-20 19:06:54 +01:00
|
|
|
pub fn from_html(html_str: &str, url: &str) -> Self {
|
2020-05-01 14:17:59 +01:00
|
|
|
Extractor {
|
2020-10-22 11:55:02 +01:00
|
|
|
article: None,
|
2020-05-02 16:33:45 +01:00
|
|
|
img_urls: Vec::new(),
|
2020-10-22 10:12:30 +01:00
|
|
|
readability: Readability::new(html_str),
|
2021-04-20 19:06:54 +01:00
|
|
|
url: url.to_string(),
|
2020-05-01 14:17:59 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-02 12:51:53 +01:00
|
|
|
/// Locates and extracts the HTML in a document which is determined to be
|
|
|
|
/// the source of the content
|
2021-04-21 17:07:08 +01:00
|
|
|
pub fn extract_content(&mut self) -> Result<(), PaperoniError> {
|
|
|
|
self.readability.parse(&self.url)?;
|
2020-10-22 11:55:02 +01:00
|
|
|
if let Some(article_node_ref) = &self.readability.article_node {
|
|
|
|
let template = r#"
|
|
|
|
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:epub="http://www.idpf.org/2007/ops">
|
|
|
|
<head>
|
2021-06-09 06:04:50 +01:00
|
|
|
<link rel="stylesheet" href="stylesheet.css" type="text/css"></link>
|
2020-10-22 11:55:02 +01:00
|
|
|
</head>
|
|
|
|
<body>
|
|
|
|
</body>
|
|
|
|
</html>
|
|
|
|
"#;
|
|
|
|
let doc = kuchiki::parse_html().one(template);
|
|
|
|
let body = doc.select_first("body").unwrap();
|
|
|
|
body.as_node().append(article_node_ref.clone());
|
|
|
|
self.article = Some(doc);
|
|
|
|
}
|
2021-04-21 17:07:08 +01:00
|
|
|
Ok(())
|
2020-05-02 12:51:53 +01:00
|
|
|
}
|
2020-05-01 14:17:59 +01:00
|
|
|
|
2020-05-02 16:33:45 +01:00
|
|
|
/// Traverses the DOM tree of the content and retrieves the IMG URLs
|
2021-02-06 09:59:03 +00:00
|
|
|
pub fn extract_img_urls(&mut self) {
|
|
|
|
if let Some(content_ref) = &self.article {
|
2020-10-22 10:12:30 +01:00
|
|
|
for img_ref in content_ref.select("img").unwrap() {
|
2020-05-02 16:33:45 +01:00
|
|
|
img_ref.as_node().as_element().map(|img_elem| {
|
|
|
|
img_elem.attributes.borrow().get("src").map(|img_url| {
|
2020-12-24 09:16:30 +00:00
|
|
|
if !(img_url.is_empty() || img_url.starts_with("data:image")) {
|
2020-05-05 10:24:11 +01:00
|
|
|
self.img_urls.push((img_url.to_string(), None))
|
2020-05-02 16:33:45 +01:00
|
|
|
}
|
|
|
|
})
|
|
|
|
});
|
2020-05-01 14:17:59 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-05-02 16:33:45 +01:00
|
|
|
|
2021-04-21 17:07:08 +01:00
|
|
|
/// Returns the extracted article [NodeRef]. It should only be called *AFTER* calling parse
|
|
|
|
pub fn article(&self) -> &NodeRef {
|
|
|
|
self.article.as_ref().expect(
|
|
|
|
"Article node doesn't exist. This may be because the document has not been parsed",
|
|
|
|
)
|
2020-10-22 11:55:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn metadata(&self) -> &MetaData {
|
|
|
|
&self.readability.metadata
|
2020-10-22 10:12:30 +01:00
|
|
|
}
|
2020-05-01 14:17:59 +01:00
|
|
|
}
|
|
|
|
|
2020-11-24 11:54:23 +00:00
|
|
|
/// Serializes a NodeRef to a string that is XHTML compatible
|
|
|
|
/// The only DOM nodes serialized are Text and Element nodes
|
|
|
|
pub fn serialize_to_xhtml<W: std::io::Write>(
|
|
|
|
node_ref: &NodeRef,
|
|
|
|
mut w: &mut W,
|
2021-04-17 10:04:06 +01:00
|
|
|
) -> Result<(), PaperoniError> {
|
2020-11-24 11:54:23 +00:00
|
|
|
let mut escape_map = HashMap::new();
|
|
|
|
escape_map.insert("<", "<");
|
|
|
|
escape_map.insert(">", ">");
|
|
|
|
escape_map.insert("&", "&");
|
2020-12-24 09:16:30 +00:00
|
|
|
escape_map.insert("\"", """);
|
|
|
|
escape_map.insert("'", "'");
|
2020-11-24 11:54:23 +00:00
|
|
|
for edge in node_ref.traverse_inclusive() {
|
|
|
|
match edge {
|
|
|
|
kuchiki::iter::NodeEdge::Start(n) => match n.data() {
|
|
|
|
kuchiki::NodeData::Text(rc_text) => {
|
|
|
|
let text = rc_text.borrow();
|
|
|
|
let esc_text = ESC_SEQ_REGEX
|
|
|
|
.replace_all(&text, |captures: ®ex::Captures| escape_map[&captures[1]]);
|
|
|
|
write!(&mut w, "{}", esc_text)?;
|
|
|
|
}
|
|
|
|
kuchiki::NodeData::Element(elem_data) => {
|
|
|
|
let attrs = elem_data.attributes.borrow();
|
|
|
|
let attrs_str = attrs
|
|
|
|
.map
|
|
|
|
.iter()
|
2021-06-09 05:26:52 +01:00
|
|
|
.filter(|(k, _)| !k.local.contains("\""))
|
2020-11-24 11:54:23 +00:00
|
|
|
.map(|(k, v)| {
|
|
|
|
format!(
|
|
|
|
"{}=\"{}\"",
|
|
|
|
k.local,
|
|
|
|
ESC_SEQ_REGEX
|
|
|
|
.replace_all(&v.value, |captures: ®ex::Captures| {
|
|
|
|
escape_map[&captures[1]]
|
|
|
|
})
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.fold("".to_string(), |acc, val| acc + " " + &val);
|
|
|
|
write!(&mut w, "<{}{}>", &elem_data.name.local, attrs_str)?;
|
|
|
|
}
|
|
|
|
_ => (),
|
|
|
|
},
|
|
|
|
kuchiki::iter::NodeEdge::End(n) => match n.data() {
|
|
|
|
kuchiki::NodeData::Element(elem_data) => {
|
|
|
|
write!(&mut w, "</{}>", &elem_data.name.local)?;
|
|
|
|
}
|
|
|
|
_ => (),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-05-01 14:17:59 +01:00
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
|
|
|
use super::*;
|
|
|
|
const TEST_HTML: &'static str = r#"
|
|
|
|
<!doctype html>
|
|
|
|
<html lang="en">
|
|
|
|
<head>
|
|
|
|
<meta charset="utf-8">
|
|
|
|
<meta name="description" content="A sample document">
|
|
|
|
<meta name="keywords" content="test,Rust">
|
|
|
|
<meta name="author" content="Paperoni">
|
|
|
|
<title>Testing Paperoni</title>
|
|
|
|
</head>
|
|
|
|
<body>
|
|
|
|
<header>
|
|
|
|
<!-- Unimportant information -->
|
|
|
|
<h1>Testing Paperoni</h1>
|
|
|
|
</header>
|
|
|
|
<article>
|
|
|
|
<h1>Starting out</h1>
|
|
|
|
<p>Some Lorem Ipsum text here</p>
|
|
|
|
<p>Observe this picture</p>
|
2020-05-05 10:29:08 +01:00
|
|
|
<img src="./img.jpg" alt="Random image">
|
2020-12-24 09:16:30 +00:00
|
|
|
<img src="data:image/png;base64,lJGWEIUQOIQWIDYVIVEDYFOUYQFWD">
|
2020-05-01 14:17:59 +01:00
|
|
|
</article>
|
|
|
|
<footer>
|
|
|
|
<p>Made in HTML</p>
|
|
|
|
</footer>
|
|
|
|
</body>
|
|
|
|
</html>
|
|
|
|
"#;
|
|
|
|
|
2020-05-02 16:33:45 +01:00
|
|
|
#[test]
|
|
|
|
fn test_extract_img_urls() {
|
2021-04-20 19:06:54 +01:00
|
|
|
let mut extractor = Extractor::from_html(TEST_HTML, "http://example.com/");
|
2021-04-21 17:07:08 +01:00
|
|
|
extractor
|
|
|
|
.extract_content()
|
|
|
|
.expect("Article extraction failed unexpectedly");
|
2020-05-02 16:33:45 +01:00
|
|
|
extractor.extract_img_urls();
|
|
|
|
|
|
|
|
assert!(extractor.img_urls.len() > 0);
|
2020-10-22 10:12:30 +01:00
|
|
|
assert_eq!(
|
|
|
|
vec![("http://example.com/img.jpg".to_string(), None)],
|
|
|
|
extractor.img_urls
|
|
|
|
);
|
2020-05-02 16:33:45 +01:00
|
|
|
}
|
2020-05-01 14:17:59 +01:00
|
|
|
}
|