import test from 'ava' import parser from '../lib/parser' import lexer from '../lib/lexer' function ps(index) { return { index, line: 0, column: index } } const lexerOptions = { childlessTags: [] } const parserOptions = { voidTags: [], closingTags: [], closingTagAncestorBreakers: {}, } test('parser() should return nodes', (t) => { const str = '
This is one
This is two
' const tokens = lexer(str, lexerOptions) const nodes = parser(tokens, parserOptions) t.deepEqual(nodes, [ { type: 'element', tagName: 'p', attributes: [], children: [ { type: 'text', content: 'This is one', position: { start: ps(3), end: ps(14), }, }, ], position: { start: ps(0), end: ps(14), }, }, { type: 'element', tagName: 'p', attributes: [], children: [ { type: 'text', content: 'This is two', position: { start: ps(17), end: ps(28), }, }, ], position: { start: ps(14), end: ps(str.length), }, }, ]) } { const parserOptions = { voidTags: [], closingTags: ['p', 'span'], closingTagAncestorBreakers: {}, } const str = 'This is one okay This is two is
encountered within an
before being
closed so we should not rewind the stack in those cases.
This edge-case also applies to
s.
*/
{
const str = '
'
const tokens = lexer(str, lexerOptions)
const nodes = parser(tokens, {
voidTags: [],
closingTags: ['li'],
closingTagAncestorBreakers: {
li: ['ul'],
},
})
t.deepEqual(nodes, [
{
type: 'element',
tagName: 'ul',
attributes: [],
position: {
start: ps(0),
end: ps(42),
},
children: [
{
type: 'element',
tagName: 'li',
attributes: [],
position: {
start: ps(4),
end: ps(37),
},
children: [
{
type: 'text',
content: 'abc',
position: {
start: ps(8),
end: ps(11),
},
},
{
type: 'element',
tagName: 'ul',
attributes: [],
position: {
start: ps(11),
end: ps(32),
},
children: [
{
type: 'element',
tagName: 'li',
attributes: [],
position: {
start: ps(15),
end: ps(27),
},
children: [
{
type: 'text',
content: 'def',
position: {
start: ps(19),
end: ps(22),
},
},
],
},
],
},
],
},
],
},
])
}
{
const str = '
'
const tokens = lexer(str, lexerOptions)
const nodes = parser(tokens, {
voidTags: [],
closingTags: ['li'],
closingTagAncestorBreakers: {
li: ['ul'],
},
})
t.deepEqual(nodes, [
{
type: 'element',
tagName: 'ul',
attributes: [],
position: {
start: ps(0),
end: ps(55),
},
children: [
{
type: 'element',
tagName: 'li',
attributes: [],
position: {
start: ps(4),
end: ps(50),
},
children: [
{
type: 'text',
content: 'abc',
position: {
start: ps(8),
end: ps(11),
},
},
{
type: 'element',
tagName: 'ul',
attributes: [],
position: {
start: ps(11),
end: ps(45),
},
children: [
{
type: 'element',
tagName: 'span',
attributes: [],
position: {
start: ps(15),
end: ps(40),
},
children: [
{
type: 'element',
tagName: 'li',
attributes: [],
position: {
start: ps(21),
end: ps(33),
},
children: [
{
type: 'text',
content: 'def',
position: {
start: ps(25),
end: ps(28),
},
},
],
},
],
},
],
},
],
},
],
},
])
}
{
const str = '
'
const tokens = lexer(str, lexerOptions)
const nodes = parser(tokens, {
voidTags: [],
closingTags: ['li'],
closingTagAncestorBreakers: {
li: ['ul'],
},
})
t.deepEqual(nodes, [
{
type: 'element',
tagName: 'ul',
attributes: [],
position: {
start: ps(0),
end: ps(49),
},
children: [
{
type: 'element',
tagName: 'li',
attributes: [],
position: {
start: ps(4),
end: ps(44),
},
children: [
{
type: 'text',
content: 'abc',
position: {
start: ps(8),
end: ps(11),
},
},
{
type: 'element',
tagName: 'ul',
attributes: [],
position: {
start: ps(11),
end: ps(39),
},
children: [
{
type: 'element',
tagName: 'li',
attributes: [],
position: {
start: ps(15),
end: ps(22),
},
children: [
{
type: 'text',
content: 'def',
position: {
start: ps(19),
end: ps(22),
},
},
],
},
{
type: 'element',
tagName: 'li',
attributes: [],
position: {
start: ps(22),
end: ps(34),
},
children: [
{
type: 'text',
content: 'ghi',
position: {
start: ps(26),
end: ps(29),
},
},
],
},
],
},
],
},
],
},
])
}
})
test('parser() should handle nested tables', (t) => {
const str =
'
'
const tokens = lexer(str, lexerOptions)
const nodes = parser(tokens, {
voidTags: [],
closingTags: ['tbody'],
closingTagAncestorBreakers: {
tbody: ['table'],
tr: ['table'],
td: ['table'],
},
})
t.deepEqual(nodes, [
{
type: 'element',
tagName: 'table',
attributes: [],
position: {
start: ps(0),
end: ps(96),
},
children: [
{
type: 'element',
tagName: 'tbody',
attributes: [],
position: {
start: ps(7),
end: ps(88),
},
children: [
{
type: 'element',
tagName: 'tr',
attributes: [],
position: {
start: ps(14),
end: ps(80),
},
children: [
{
type: 'element',
tagName: 'td',
attributes: [],
position: {
start: ps(18),
end: ps(75),
},
children: [
{
type: 'element',
tagName: 'table',
attributes: [],
position: {
start: ps(22),
end: ps(70),
},
children: [
{
type: 'element',
tagName: 'tbody',
attributes: [],
position: {
start: ps(29),
end: ps(62),
},
children: [
{
type: 'element',
tagName: 'tr',
attributes: [],
position: {
start: ps(36),
end: ps(54),
},
children: [
{
type: 'element',
tagName: 'td',
attributes: [],
position: {
start: ps(40),
end: ps(49),
},
children: [],
},
],
},
],
},
],
},
],
},
],
},
],
},
],
},
])
})
test('parser() should ignore unnecessary closing tags', (t) => {
/*
In this case the bit is unnecessary and should
not be represented in the output nor interfere with the stack.
*/
const str = 'x'
const tokens = lexer(str, lexerOptions)
const nodes = parser(tokens, parserOptions)
t.deepEqual(nodes, [
{
type: 'text',
content: 'x',
position: {
start: ps(4),
end: ps(str.length),
},
},
])
})