2023-01-12 21:20:54 -08:00
|
|
|
// @ts-check
|
|
|
|
|
2023-01-09 21:59:54 -08:00
|
|
|
import fs from "node:fs/promises";
|
|
|
|
import test from "ava";
|
2023-01-12 21:20:54 -08:00
|
|
|
import { newLineRe } from "../helpers/helpers.js";
|
2023-02-10 20:37:32 -08:00
|
|
|
import { filterByPredicate, filterByTypes, getMicromarkEvents, parse }
|
|
|
|
from "../helpers/micromark.cjs";
|
2023-01-09 21:59:54 -08:00
|
|
|
|
|
|
|
test("parse", async(t) => {
|
|
|
|
t.plan(1);
|
|
|
|
const content = await fs.readFile("./test/every-markdown-syntax.md", "utf8");
|
2023-02-10 20:37:32 -08:00
|
|
|
const normalizedContent = content.split(newLineRe).join("\n");
|
|
|
|
const document = parse(normalizedContent);
|
2023-01-09 21:59:54 -08:00
|
|
|
t.snapshot(document, "Unexpected tokens");
|
|
|
|
});
|
2023-02-10 20:37:32 -08:00
|
|
|
|
|
|
|
test("getMicromarkEvents/filterByPredicate", async(t) => {
|
|
|
|
t.plan(1);
|
|
|
|
const content = await fs.readFile("./test/every-markdown-syntax.md", "utf8");
|
|
|
|
const normalizedContent = content.split(newLineRe).join("\n");
|
|
|
|
const events = getMicromarkEvents(normalizedContent);
|
|
|
|
const eventTypes = events
|
|
|
|
.filter((event) => event[0] === "enter")
|
|
|
|
.map((event) => event[1].type);
|
|
|
|
const document = parse(normalizedContent);
|
|
|
|
const tokens = filterByPredicate(document, () => true);
|
|
|
|
const tokenTypes = tokens.map((token) => token.type);
|
|
|
|
t.deepEqual(tokenTypes, eventTypes);
|
|
|
|
});
|
|
|
|
|
|
|
|
test("filterByTypes", async(t) => {
|
|
|
|
t.plan(6);
|
|
|
|
const content = await fs.readFile("./test/every-markdown-syntax.md", "utf8");
|
|
|
|
const normalizedContent = content.split(newLineRe).join("\n");
|
|
|
|
const document = parse(normalizedContent);
|
|
|
|
const tokens = filterByTypes(
|
|
|
|
document,
|
|
|
|
[ "atxHeadingText", "codeText", "htmlText", "setextHeadingText" ]
|
|
|
|
);
|
|
|
|
for (const token of tokens) {
|
|
|
|
t.true(token.type.endsWith("Text"));
|
|
|
|
}
|
|
|
|
});
|