srctree

Robin Linden parent 65eda1b4 238fd544
html2: Delete the simple_page test

Loading files from disk for testing was useful when starting out on the spec-compliant tokenizer, but now just complicates things.
.gitattributes added: 5, removed: 31, total 0
@@ -1 +1 @@
* text=auto eol=lf
* text=auto
 
html2/BUILD added: 5, removed: 31, total 0
@@ -16,10 +16,6 @@ cc_library(
],
)
 
data_files = {
"tokenizer": ["test/simple_page.html"],
}
 
dependencies = {
"tree_constructor": ["//dom2"],
}
@@ -28,10 +24,6 @@ dependencies = {
name = src[:-4],
size = "small",
srcs = [src],
data = data_files.get(
src[:-9],
[],
),
deps = dependencies.get(
src[:-9],
[],
 
ev/null added: 5, removed: 31, total 0
@@ -1,3 +0,0 @@
<!DOCTYPE html>
<html>
</html>
 
html2/tokenizer_test.cpp added: 5, removed: 31, total 0
@@ -61,21 +61,6 @@ void expect_text(TokenizerOutput &output, std::string_view text) {
} // namespace
 
int main() {
etest::test("simple_page", [] {
std::ifstream page{"html2/test/simple_page.html", std::ios::binary};
require(page.is_open());
std::string page_str{std::istreambuf_iterator<char>{page}, std::istreambuf_iterator<char>{}};
auto tokens = run_tokenizer(page_str);
 
expect_token(tokens, DoctypeToken{.name = "html"s});
expect_token(tokens, CharacterToken{'\n'});
expect_token(tokens, StartTagToken{.tag_name = "html"s});
expect_token(tokens, CharacterToken{'\n'});
expect_token(tokens, EndTagToken{.tag_name = "html"s});
expect_token(tokens, CharacterToken{'\n'});
expect_token(tokens, EndOfFileToken{});
});
 
etest::test("script, empty", [] {
auto tokens = run_tokenizer("<script></script>");