Merge pull request #1 from hipstermojo/readability

Add Readability port
This commit is contained in:
Kenneth Gitere 2020-10-22 19:24:31 +03:00 committed by GitHub
commit 566c3427be
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 4249 additions and 237 deletions

17
Cargo.lock generated
View file

@ -704,9 +704,9 @@ dependencies = [
[[package]] [[package]]
name = "kuchiki" name = "kuchiki"
version = "0.8.0" version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1beeffc5ae5ab0def2cb85e26063a8e6b4f579b0adec3805bf87472086948956" checksum = "1ea8e9c6e031377cff82ee3001dc8026cdf431ed4e2e6b51f98ab8c73484a358"
dependencies = [ dependencies = [
"cssparser", "cssparser",
"html5ever 0.25.1", "html5ever 0.25.1",
@ -1010,12 +1010,15 @@ dependencies = [
[[package]] [[package]]
name = "paperoni" name = "paperoni"
version = "0.1.0" version = "0.1.0-alpha1"
dependencies = [ dependencies = [
"async-std", "async-std",
"epub-builder", "epub-builder",
"html5ever 0.25.1",
"kuchiki", "kuchiki",
"lazy_static 1.4.0",
"md5", "md5",
"regex",
"structopt", "structopt",
"surf", "surf",
"url", "url",
@ -1392,9 +1395,9 @@ checksum = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84"
[[package]] [[package]]
name = "regex" name = "regex"
version = "1.3.7" version = "1.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a6020f034922e3194c711b82a627453881bc4682166cabb07134a10c26ba7692" checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6"
dependencies = [ dependencies = [
"aho-corasick", "aho-corasick",
"memchr", "memchr",
@ -1404,9 +1407,9 @@ dependencies = [
[[package]] [[package]]
name = "regex-syntax" name = "regex-syntax"
version = "0.6.17" version = "0.6.18"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae" checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8"
[[package]] [[package]]
name = "remove_dir_all" name = "remove_dir_all"

View file

@ -1,6 +1,9 @@
[package] [package]
description = "A web article downloader"
homepage = "https://github.com/hipstermojo/paperoni"
repository = "https://github.com/hipstermojo/paperoni"
name = "paperoni" name = "paperoni"
version = "0.1.0" version = "0.1.0-alpha1"
authors = ["Kenneth Gitere <gitere81@gmail.com>"] authors = ["Kenneth Gitere <gitere81@gmail.com>"]
edition = "2018" edition = "2018"
license = "MIT" license = "MIT"
@ -10,8 +13,11 @@ license = "MIT"
[dependencies] [dependencies]
async-std = "1.5.0" async-std = "1.5.0"
epub-builder = "0.4.5" epub-builder = "0.4.5"
kuchiki = "0.8.0" html5ever = "0.25.1"
kuchiki = "0.8.1"
lazy_static = "1.3.9"
md5 = "0.7.0" md5 = "0.7.0"
regex = "1.3.9"
surf = "1.0.3" surf = "1.0.3"
structopt = { version = "0.3" } structopt = { version = "0.3" }
url = "2.1.1" url = "2.1.1"

53
README.md Normal file
View file

@ -0,0 +1,53 @@
<p align="center"><img src="./paperoni-dark.png" width="400"></p>
<p align="center"><i>Salami not included</i></p>
Paperoni is a web article downloader written in Rust. The downloaded articles are then exported as EPUB files.
> This project is in an alpha release so it is pretty unstable.
## Usage
```sh
paperoni https://en.wikipedia.org/wiki/Pepperoni
```
Paperoni also supports passing multiple links as arguments. If you are on a Unix-like OS, you can simply do something like this:
```sh
cat links.txt | xargs paperoni
```
## How it works
The URL passed to Paperoni is fetched and the returned HTML response is passed to the extractor.
This extractor retrieves a possible article using a port of the [Mozilla Readability algorithm](https://github.com/mozilla/readability). This article is then saved in an EPUB.
> The port of the algorithm is still unstable as well so it is not fully compatible with all the websites that can be extracted using Readability.
## How it (currently) doesn't work
This program is still in alpha so a number of things currently break:
- Links with redirects will crash the program as it has no redirect logic.
- Websites that only run with JavaScript cannot be extracted.
- Website articles that cannot be extracted by Readability cannot be extracted by Paperoni either.
## Running locally
### Precompiled binaries
Check the [releases](https://github.com/hipstermojo/paperoni/releases) page for precompiled binaries. Currently there are only builds for Debian and Arch.
### Building from source
This project uses `async/.await` so it should be compiled using a minimum Rust version of 1.33. Preferrably use the latest version of Rust.
```sh
git clone https://github.com/hipstermojo/paperoni.git
cd paperoni
## You can build and install paperoni locally
cargo install --path .
## or use it from within the project
cargo run -- # pass your url here
```

BIN
paperoni-dark.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.9 KiB

View file

@ -9,5 +9,5 @@ use structopt::StructOpt;
pub struct Opts { pub struct Opts {
// #[structopt(conflicts_with("links"))] // #[structopt(conflicts_with("links"))]
/// Url of a web article /// Url of a web article
pub url: Option<String>, pub urls: Vec<String>,
} }

View file

@ -1,90 +1,53 @@
use async_std::fs::File; use async_std::fs::File;
use async_std::io::prelude::*; use async_std::io::prelude::*;
use async_std::task; use async_std::task;
use kuchiki::{traits::*, ElementData, NodeDataRef, NodeRef}; use kuchiki::{traits::*, NodeRef};
use url::Url; use url::Url;
use crate::moz_readability::{MetaData, Readability};
pub type ResourceInfo = (String, Option<String>); pub type ResourceInfo = (String, Option<String>);
pub struct Extractor { pub struct Extractor {
pub root_node: NodeRef, article: Option<NodeRef>,
pub content: Option<NodeDataRef<ElementData>>,
pub img_urls: Vec<ResourceInfo>, pub img_urls: Vec<ResourceInfo>,
readability: Readability,
} }
impl Extractor { impl Extractor {
/// Create a new instance of an HTML extractor given an HTML string /// Create a new instance of an HTML extractor given an HTML string
pub fn from_html(html_str: &str) -> Self { pub fn from_html(html_str: &str) -> Self {
Extractor { Extractor {
content: None, article: None,
img_urls: Vec::new(), img_urls: Vec::new(),
root_node: kuchiki::parse_html().one(html_str), readability: Readability::new(html_str),
} }
} }
/// Extract the value of an attribute
fn extract_attr_val<T: Fn(&str) -> U, U>(
&self,
css_selector: &str,
attr_target: &str,
mapper: T,
) -> Option<U> {
self.root_node
.select_first(css_selector)
.ok()
.and_then(|data| data.attributes.borrow().get(attr_target).map(mapper))
}
/// Extract the text of a DOM node given its CSS selector
fn extract_inner_text(&self, css_selector: &str) -> Option<String> {
let node_ref = self.root_node.select_first(css_selector).ok()?;
extract_text_from_node(node_ref.as_node())
}
/// Locates and extracts the HTML in a document which is determined to be /// Locates and extracts the HTML in a document which is determined to be
/// the source of the content /// the source of the content
pub fn extract_content(&mut self) { pub fn extract_content(&mut self, url: &str) {
// Extract the useful parts of the head section self.readability.parse(url);
let author: Option<String> = if let Some(article_node_ref) = &self.readability.article_node {
self.extract_attr_val("meta[name='author']", "content", |author| { let template = r#"
author.to_string() <html xmlns="http://www.w3.org/1999/xhtml" xmlns:epub="http://www.idpf.org/2007/ops">
}); <head>
</head>
let description = <body>
self.extract_attr_val("meta[name='description']", "content", |description| { </body>
description.to_string() </html>
}); "#;
let doc = kuchiki::parse_html().one(template);
let tags = self.extract_attr_val("meta[name='keywords']", "content", |tags| { let body = doc.select_first("body").unwrap();
tags.split(",") body.as_node().append(article_node_ref.clone());
.map(|tag| tag.trim().to_string()) self.article = Some(doc);
.collect::<Vec<String>>()
});
let title = self.extract_inner_text("title").unwrap_or("".to_string());
let lang = self
.extract_attr_val("html", "lang", |lang| lang.to_string())
.unwrap_or("en".to_string());
let meta_attrs = MetaAttr::new(author, description, lang, tags, title);
// Extract the article
let article_ref = self.root_node.select_first("article").unwrap();
for node_ref in article_ref.as_node().descendants() {
match node_ref.data() {
kuchiki::NodeData::Element(..) | kuchiki::NodeData::Text(..) => (),
_ => node_ref.detach(),
}
} }
self.content = Some(article_ref);
} }
/// Traverses the DOM tree of the content and retrieves the IMG URLs /// Traverses the DOM tree of the content and retrieves the IMG URLs
fn extract_img_urls(&mut self) { fn extract_img_urls(&mut self) {
if let Some(content_ref) = &self.content { if let Some(content_ref) = &self.readability.article_node {
for img_ref in content_ref.as_node().select("img").unwrap() { for img_ref in content_ref.select("img").unwrap() {
img_ref.as_node().as_element().map(|img_elem| { img_ref.as_node().as_element().map(|img_elem| {
img_elem.attributes.borrow().get("src").map(|img_url| { img_elem.attributes.borrow().get("src").map(|img_url| {
if !img_url.is_empty() { if !img_url.is_empty() {
@ -133,10 +96,10 @@ impl Extractor {
let (img_url, img_path, img_mime) = async_task.await; let (img_url, img_path, img_mime) = async_task.await;
// Update the image sources // Update the image sources
let img_ref = self let img_ref = self
.content .readability
.article_node
.as_mut() .as_mut()
.expect("Unable to get mutable ref") .expect("Unable to get mutable ref")
.as_node()
.select_first(&format!("img[src='{}']", img_url)) .select_first(&format!("img[src='{}']", img_url))
.expect("Image node does not exist"); .expect("Image node does not exist");
let mut img_node = img_ref.attributes.borrow_mut(); let mut img_node = img_ref.attributes.borrow_mut();
@ -145,11 +108,14 @@ impl Extractor {
} }
Ok(()) Ok(())
} }
}
fn extract_text_from_node(node: &NodeRef) -> Option<String> { pub fn article(&self) -> Option<&NodeRef> {
node.first_child() self.article.as_ref()
.map(|child_ref| child_ref.text_contents()) }
pub fn metadata(&self) -> &MetaData {
&self.readability.metadata
}
} }
/// Utility for hashing URLs. This is used to help store files locally with unique values /// Utility for hashing URLs. This is used to help store files locally with unique values
@ -192,33 +158,6 @@ fn get_absolute_url(url: &str, request_url: &Url) -> String {
} }
} }
#[derive(Debug)]
pub struct MetaAttr {
author: Option<String>,
description: Option<String>,
language: String,
tags: Option<Vec<String>>,
title: String,
}
impl MetaAttr {
pub fn new(
author: Option<String>,
description: Option<String>,
language: String,
tags: Option<Vec<String>>,
title: String,
) -> Self {
MetaAttr {
author,
description,
language,
tags,
title,
}
}
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;
@ -250,86 +189,17 @@ mod test {
</html> </html>
"#; "#;
#[test]
fn test_extract_attr_val() {
let extractor = Extractor::from_html(TEST_HTML);
let ext_author =
extractor.extract_attr_val("meta[name='author']", "content", |val| val.to_string());
assert!(ext_author.is_some());
assert_eq!("Paperoni", &ext_author.unwrap());
let ext_author =
extractor.extract_attr_val("meta[name='invalid-name']", "content", |val| {
val.to_string()
});
assert!(ext_author.is_none());
let lang_attr = extractor.extract_attr_val("html", "lang", |lang| lang.to_string());
assert!(lang_attr.is_some());
assert_eq!("en".to_string(), lang_attr.unwrap());
}
#[test]
fn test_extract_inner_text() {
let extractor = Extractor::from_html(TEST_HTML);
let title_text = extractor.extract_inner_text("title");
assert!(title_text.is_some());
assert_eq!("Testing Paperoni".to_string(), title_text.unwrap());
let title_text = extractor.extract_inner_text("titln");
assert!(title_text.is_none());
}
#[test]
fn test_extract_text() {
let extractor = Extractor::from_html(TEST_HTML);
let h1_node = extractor.root_node.select_first("h1").unwrap();
let h1_text = extract_text_from_node(h1_node.as_node());
assert!(h1_text.is_some());
assert_eq!("Testing Paperoni".to_string(), h1_text.unwrap());
}
#[test]
fn test_extract_content() {
let extracted_html: String = r#"
<article>
<h1>Starting out</h1>
<p>Some Lorem Ipsum text here</p>
<p>Observe this picture</p>
<img alt="Random image" src="./img.jpg">
</article>
"#
.lines()
.map(|line| line.trim())
.collect();
let mut extractor = Extractor::from_html(
&TEST_HTML
.lines()
.map(|line| line.trim())
.collect::<String>(),
);
extractor.extract_content();
let mut output = Vec::new();
assert!(extractor.content.is_some());
extractor
.content
.unwrap()
.as_node()
.serialize(&mut output)
.expect("Unable to serialize output HTML");
let output = std::str::from_utf8(&output).unwrap();
assert_eq!(extracted_html, output);
}
#[test] #[test]
fn test_extract_img_urls() { fn test_extract_img_urls() {
let mut extractor = Extractor::from_html(TEST_HTML); let mut extractor = Extractor::from_html(TEST_HTML);
extractor.extract_content(); extractor.extract_content("http://example.com/");
extractor.extract_img_urls(); extractor.extract_img_urls();
assert!(extractor.img_urls.len() > 0); assert!(extractor.img_urls.len() > 0);
assert_eq!(vec![("./img.jpg".to_string(), None)], extractor.img_urls); assert_eq!(
vec![("http://example.com/img.jpg".to_string(), None)],
extractor.img_urls
);
} }
#[test] #[test]
@ -354,24 +224,4 @@ mod test {
exts exts
); );
} }
#[test]
fn test_get_absolute_url() {
let absolute_url = "https://example.image.com/images/1.jpg";
let relative_url = "../../images/2.jpg";
let relative_from_host_url = "/images/3.jpg";
let host_url = Url::parse("https://example.image.com/blog/how-to-test-resolvers/").unwrap();
let abs_url = get_absolute_url(&absolute_url, &host_url);
assert_eq!("https://example.image.com/images/1.jpg", abs_url);
let abs_url = get_absolute_url(&relative_url, &host_url);
assert_eq!("https://example.image.com/images/2.jpg", abs_url);
let relative_url = "2-1.jpg";
let abs_url = get_absolute_url(&relative_url, &host_url);
assert_eq!(
"https://example.image.com/blog/how-to-test-resolvers/2-1.jpg",
abs_url
);
let abs_url = get_absolute_url(&relative_from_host_url, &host_url);
assert_eq!("https://example.image.com/images/3.jpg", abs_url);
}
} }

View file

@ -1,3 +1,6 @@
#[macro_use]
extern crate lazy_static;
use std::fs::File; use std::fs::File;
use async_std::{fs::create_dir, fs::remove_dir_all, task}; use async_std::{fs::create_dir, fs::remove_dir_all, task};
@ -7,60 +10,81 @@ use url::Url;
mod cli; mod cli;
mod extractor; mod extractor;
mod moz_readability;
use extractor::Extractor; use extractor::Extractor;
fn main() { fn main() {
let opt = cli::Opts::from_args(); let opt = cli::Opts::from_args();
if let Some(url) = opt.url { if !opt.urls.is_empty() {
println!("Downloading single article"); println!("Downloading single article");
download(url) download(opt.urls);
} }
} }
async fn fetch_url(url: &str) -> String { type HTMLResource = (String, String);
async fn fetch_url(url: &str) -> HTMLResource {
let client = surf::Client::new(); let client = surf::Client::new();
println!("Fetching..."); println!("Fetching...");
// TODO: Add middleware for following redirects // TODO: Add middleware for following redirects
client (
.get(url) url.to_string(),
.recv_string() client
.await .get(url)
.expect("Unable to fetch URL") .recv_string()
.await
.expect("Unable to fetch URL"),
)
} }
fn download(url: String) { fn download(urls: Vec<String>) {
let mut async_url_tasks = Vec::with_capacity(urls.len());
for url in urls {
async_url_tasks.push(task::spawn(async move { fetch_url(&url).await }));
}
task::block_on(async { task::block_on(async {
let html = fetch_url(&url).await; for url_task in async_url_tasks {
let mut extractor = Extractor::from_html(&html); let (url, html) = url_task.await;
println!("Extracting"); println!("Extracting");
extractor.extract_content(); let mut extractor = Extractor::from_html(&html);
create_dir("res/") extractor.extract_content(&url);
.await if extractor.article().is_some() {
.expect("Unable to create res/ output folder"); create_dir("res/")
extractor .await
.download_images(&Url::parse(&url).unwrap()) .expect("Unable to create res/ output folder");
.await extractor
.expect("Unable to download images"); .download_images(&Url::parse(&url).unwrap())
let mut out_file = File::create("out.epub").unwrap(); .await
let mut html_buf = Vec::new(); .expect("Unable to download images");
extractor let mut out_file =
.content File::create(format!("{}.epub", extractor.metadata().title())).unwrap();
.unwrap() let mut html_buf = Vec::new();
.as_node() extractor
.serialize(&mut html_buf) .article()
.expect("Unable to serialize"); .unwrap()
let html_buf = std::str::from_utf8(&html_buf).unwrap(); .serialize(&mut html_buf)
let mut epub = EpubBuilder::new(ZipLibrary::new().unwrap()).unwrap(); .expect("Unable to serialize");
epub.add_content(EpubContent::new("code.xhtml", html_buf.as_bytes())) let html_buf = std::str::from_utf8(&html_buf).unwrap();
.unwrap(); let html_buf = moz_readability::regexes::REPLACE_SELF_CLOSING_REGEX
for img in extractor.img_urls { .replace_all(html_buf, "$tag/>");
let file_path = format!("{}", &img.0); let mut epub = EpubBuilder::new(ZipLibrary::new().unwrap()).unwrap();
if let Some(author) = extractor.metadata().byline() {
epub.metadata("author", author).unwrap();
}
epub.metadata("title", extractor.metadata().title())
.unwrap();
epub.add_content(EpubContent::new("code.xhtml", html_buf.as_bytes()))
.unwrap();
for img in extractor.img_urls {
let file_path = format!("{}", &img.0);
let img_buf = File::open(file_path).expect("Can't read file"); let img_buf = File::open(file_path).expect("Can't read file");
epub.add_resource(img.0, img_buf, img.1.unwrap()).unwrap(); epub.add_resource(img.0, img_buf, img.1.unwrap()).unwrap();
}
epub.generate(&mut out_file).unwrap();
println!("Cleaning up");
remove_dir_all("res/").await.unwrap();
}
} }
epub.generate(&mut out_file).unwrap();
println!("Cleaning up");
remove_dir_all("res/").await.unwrap();
}) })
} }

3912
src/moz_readability/mod.rs Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,139 @@
/// This module contains regular expressions frequently used by moz_readability
/// All regexes that only test if a `&str` matches the regex are preceded by the
/// word "is_match". All other regexes are publicly accessible.
use regex::Regex;
pub fn is_match_byline(match_str: &str) -> bool {
lazy_static! {
static ref BYLINE_REGEX: Regex =
Regex::new(r"(?i)byline|author|dateline|writtenby|p-author").unwrap();
}
BYLINE_REGEX.is_match(match_str)
}
pub fn is_match_positive(match_str: &str) -> bool {
lazy_static! {
static ref POSITIVE_REGEX: Regex = Regex::new(r"(?i)article|body|content|entry|hentry|h-entry|main|page|pagination|post|text|blog|story").unwrap();
}
POSITIVE_REGEX.is_match(match_str)
}
pub fn is_match_negative(match_str: &str) -> bool {
lazy_static! {
static ref NEGATIVE_REGEX: Regex = Regex::new(r"(?i)hidden|^hid$| hid$| hid |^hid |banner|combx|comment|com-|contact|foot|footer|footnote|gdpr|masthead|media|meta|outbrain|promo|related|scroll|share|shoutbox|sidebar|skyscraper|sponsor|shopping|tags|tool|widget").unwrap();
}
NEGATIVE_REGEX.is_match(match_str)
}
pub fn is_match_videos(match_str: &str) -> bool {
lazy_static! {
static ref VIDEOS_REGEX: Regex = Regex::new(r"(?i)//(www\.)?((dailymotion|youtube|youtube-nocookie|player\.vimeo|v\.qq)\.com|(archive|upload\.wikimedia)\.org|player\.twitch\.tv)").unwrap();
}
VIDEOS_REGEX.is_match(match_str)
}
pub fn is_match_unlikely(match_str: &str) -> bool {
lazy_static! {
static ref UNLIKELY_REGEX: Regex = Regex::new(r"(?i)-ad-|ai2html|banner|breadcrumbs|combx|comment|community|cover-wrap|disqus|extra|footer|gdpr|header|legends|menu|related|remark|replies|rss|shoutbox|sidebar|skyscraper|social|sponsor|supplemental|ad-break|agegate|pagination|pager|popup|yom-remote").unwrap();
}
UNLIKELY_REGEX.is_match(match_str)
}
pub fn is_match_ok_maybe(match_str: &str) -> bool {
lazy_static! {
static ref OK_MAYBE_REGEX: Regex =
Regex::new(r"(?i)and|article|body|column|content|main|shadow").unwrap();
}
OK_MAYBE_REGEX.is_match(match_str)
}
pub fn is_match_node_content(match_str: &str) -> bool {
lazy_static! {
static ref NODE_CONTENT_REGEX: Regex = Regex::new(r"\.( |$)").unwrap();
}
NODE_CONTENT_REGEX.is_match(match_str)
}
pub fn is_match_share_elems(match_str: &str) -> bool {
lazy_static! {
static ref SHARE_ELEMS_REGEX: Regex =
Regex::new(r"(?i)(\b|_)(share|sharedaddy)(\b|_)").unwrap();
}
SHARE_ELEMS_REGEX.is_match(match_str)
}
pub fn is_match_has_content(match_str: &str) -> bool {
lazy_static! {
static ref HAS_CONTENT_REGEX: Regex = Regex::new(r"\S$").unwrap();
}
HAS_CONTENT_REGEX.is_match(match_str)
}
pub fn is_match_img_ext(match_str: &str) -> bool {
lazy_static! {
static ref IMG_EXT_REGEX: Regex = Regex::new(r"(?i)\.(jpg|jpeg|png|webp)").unwrap();
}
IMG_EXT_REGEX.is_match(match_str)
}
pub fn is_match_srcset(match_str: &str) -> bool {
lazy_static! {
static ref SRCSET_REGEX: Regex = Regex::new(r"\.(jpg|jpeg|png|webp)\s+\d").unwrap();
}
SRCSET_REGEX.is_match(match_str)
}
pub fn is_match_src_regex(match_str: &str) -> bool {
lazy_static! {
static ref SRC_REGEX: Regex = Regex::new(r"^\s*\S+\.(jpg|jpeg|png|webp)\S*\s*$").unwrap();
}
SRC_REGEX.is_match(match_str)
}
pub fn is_match_name_pattern(match_str: &str) -> bool {
lazy_static! {
static ref NAME_PATTERN_REGEX: Regex = Regex::new(r"(?i)\s*(?:(dc|dcterm|og|twitter|weibo:(article|webpage))\s*[\.:]\s*)?(author|creator|description|title|site_name)\s*$").unwrap();
}
NAME_PATTERN_REGEX.is_match(match_str)
}
pub fn is_match_title_separator(match_str: &str) -> bool {
lazy_static! {
static ref TITLE_SEPARATOR_REGEX: Regex = Regex::new(r" [\|\-\\/>»] ").unwrap();
}
TITLE_SEPARATOR_REGEX.is_match(match_str)
}
pub fn is_match_has_title_separator(match_str: &str) -> bool {
lazy_static! {
static ref HAS_TITLE_SEPARATOR_REGEX: Regex = Regex::new(r" [\\/>»] ").unwrap();
}
HAS_TITLE_SEPARATOR_REGEX.is_match(match_str)
}
lazy_static! {
pub static ref NORMALIZE_REGEX: Regex = Regex::new(r"\s{2,}").unwrap();
pub static ref B64_DATA_URL_REGEX: Regex =
Regex::new(r"(?i)^data:\s*([^\s;,]+)\s*;\s*base64\s*").unwrap();
pub static ref BASE64_REGEX: Regex = Regex::new(r"(?i)base64\s*").unwrap();
pub static ref PROPERTY_REGEX: Regex = Regex::new(
r"(?i)\s*(dc|dcterm|og|twitter)\s*:\s*(author|creator|description|title|site_name)\s*"
)
.unwrap();
pub static ref SRCSET_CAPTURE_REGEX: Regex =
Regex::new(r"(\S+)(\s+[\d.]+[xw])?(\s*(?:,|$))").unwrap();
pub static ref REPLACE_WHITESPACE_REGEX: Regex = Regex::new(r"\s").unwrap();
pub static ref REPLACE_DOT_REGEX: Regex = Regex::new(r"\.").unwrap();
pub static ref REPLACE_HTML_ESCAPE_REGEX: Regex =
Regex::new("&(quot|amp|apos|lt|gt);").unwrap();
pub static ref REPLACE_HEX_REGEX: Regex =
Regex::new(r"(?i)&#(?:x([0-9a-z]{1,4})|([0-9]{1,4}));").unwrap();
pub static ref REPLACE_START_SEPARATOR_REGEX: Regex =
Regex::new(r"(?i)(?P<start>.*)[\|\-\\/>»] .*").unwrap();
pub static ref REPLACE_END_SEPARATOR_REGEX: Regex =
Regex::new(r"(?i)[^\|\-\\/>»]*[\|\-\\/>»](?P<end>.*)").unwrap();
pub static ref REPLACE_MULTI_SEPARATOR_REGEX: Regex = Regex::new(r"[\|\-\\/>»]+").unwrap();
pub static ref REPLACE_SELF_CLOSING_REGEX: Regex = Regex::new(
r#"(?P<tag><(?:area|base|br|col|embed|hr|img|input|link|meta|param|source|track|wbr)(?: [a-z\-]+=["'][\sa-zA-Z0-9\./\-_#]+["']|[a-z\-]+)*)>"#
)
.unwrap();
}

25
test_html/simple.html Normal file
View file

@ -0,0 +1,25 @@
<!DOCTYPE html>
<html lang="en">
<head>
<title>Sample Document</title>
</head>
<body>
<h1>Some text in h1</h1>
<img src="inexistent.png">
<div class="invalid-elems">
<!-- This div contains invalid elements -->
<h1>Imagine some lorem ipsum</h1>
<img>
</div>
<!-- Test that the no-script content is copied over -->
<img src="lazy-load.png">
<noscript>
<div class="parent">
<img src="eager-load.png" id="lazy-load">
</div>
</noscript>
</body>
</html>