Avoid redundant read/parse of test file in micromark helper test cases.

This commit is contained in:
David Anson 2023-03-20 22:05:05 -07:00
parent 8f93a77b87
commit fdec335de4

View file

@ -6,38 +6,42 @@ import { newLineRe } from "../helpers/helpers.js";
import { filterByPredicate, filterByTypes, getMicromarkEvents, parse } import { filterByPredicate, filterByTypes, getMicromarkEvents, parse }
from "../helpers/micromark.cjs"; from "../helpers/micromark.cjs";
const testContent = new Promise((resolve, reject) => {
fs
.readFile("./test/every-markdown-syntax.md", "utf8")
.then((content) => content.split(newLineRe).join("\n"))
.then(resolve, reject);
});
const testTokens = new Promise((resolve, reject) => {
testContent.then(parse).then(resolve, reject);
});
test("parse", async(t) => { test("parse", async(t) => {
t.plan(1); t.plan(1);
const content = await fs.readFile("./test/every-markdown-syntax.md", "utf8"); t.snapshot(await testTokens, "Unexpected tokens");
const normalizedContent = content.split(newLineRe).join("\n");
const document = parse(normalizedContent);
t.snapshot(document, "Unexpected tokens");
}); });
test("getMicromarkEvents/filterByPredicate", async(t) => { test("getMicromarkEvents/filterByPredicate", async(t) => {
t.plan(1); t.plan(1);
const content = await fs.readFile("./test/every-markdown-syntax.md", "utf8"); const content = await testContent;
const normalizedContent = content.split(newLineRe).join("\n"); const events = getMicromarkEvents(content);
const events = getMicromarkEvents(normalizedContent);
const eventTypes = events const eventTypes = events
.filter((event) => event[0] === "enter") .filter((event) => event[0] === "enter")
.map((event) => event[1].type); .map((event) => event[1].type);
const document = parse(normalizedContent); const tokens = parse(content);
const tokens = filterByPredicate(document, () => true); const filtered = filterByPredicate(tokens, () => true);
const tokenTypes = tokens.map((token) => token.type); const tokenTypes = filtered.map((token) => token.type);
t.deepEqual(tokenTypes, eventTypes); t.deepEqual(tokenTypes, eventTypes);
}); });
test("filterByTypes", async(t) => { test("filterByTypes", async(t) => {
t.plan(6); t.plan(6);
const content = await fs.readFile("./test/every-markdown-syntax.md", "utf8"); const filtered = filterByTypes(
const normalizedContent = content.split(newLineRe).join("\n"); await testTokens,
const document = parse(normalizedContent);
const tokens = filterByTypes(
document,
[ "atxHeadingText", "codeText", "htmlText", "setextHeadingText" ] [ "atxHeadingText", "codeText", "htmlText", "setextHeadingText" ]
); );
for (const token of tokens) { for (const token of filtered) {
t.true(token.type.endsWith("Text")); t.true(token.type.endsWith("Text"));
} }
}); });